mirror of
https://github.com/valitydev/salt.git
synced 2024-11-07 00:55:19 +00:00
Merge pull request #25695 from stanislavb/expose-aws-region-config-and-fetch-region-from-metadata
Configurable AWS region & region from IAM metadata
This commit is contained in:
commit
d330ef0d81
@ -563,7 +563,9 @@ class Client(object):
|
||||
service_url=self.opts.get('s3.service_url',
|
||||
None),
|
||||
verify_ssl=self.opts.get('s3.verify_ssl',
|
||||
True))
|
||||
True),
|
||||
location=self.opts.get('s3.location',
|
||||
None))
|
||||
return dest
|
||||
except Exception:
|
||||
raise MinionError('Could not fetch from {0}'.format(url))
|
||||
|
@ -320,8 +320,11 @@ def _get_s3_key():
|
||||
verify_ssl = __opts__['s3.verify_ssl'] \
|
||||
if 's3.verify_ssl' in __opts__ \
|
||||
else None
|
||||
location = __opts__['s3.location'] \
|
||||
if 's3.location' in __opts__ \
|
||||
else None
|
||||
|
||||
return key, keyid, service_url, verify_ssl
|
||||
return key, keyid, service_url, verify_ssl, location
|
||||
|
||||
|
||||
def _init():
|
||||
@ -391,7 +394,7 @@ def _refresh_buckets_cache_file(cache_file):
|
||||
|
||||
log.debug('Refreshing buckets cache file')
|
||||
|
||||
key, keyid, service_url, verify_ssl = _get_s3_key()
|
||||
key, keyid, service_url, verify_ssl, location = _get_s3_key()
|
||||
metadata = {}
|
||||
|
||||
# helper s3 query function
|
||||
@ -402,6 +405,7 @@ def _refresh_buckets_cache_file(cache_file):
|
||||
bucket=bucket,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
return_bin=False)
|
||||
|
||||
if _is_env_per_bucket():
|
||||
@ -582,7 +586,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
|
||||
Checks the local cache for the file, if it's old or missing go grab the
|
||||
file from S3 and update the cache
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_s3_key()
|
||||
key, keyid, service_url, verify_ssl, location = _get_s3_key()
|
||||
|
||||
# check the local cache...
|
||||
if os.path.isfile(cached_file_path):
|
||||
@ -617,6 +621,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
|
||||
bucket=bucket_name,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
path=_quote(path),
|
||||
local_file=cached_file_path,
|
||||
full_headers=True
|
||||
@ -645,6 +650,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
|
||||
bucket=bucket_name,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
path=_quote(path),
|
||||
local_file=cached_file_path
|
||||
)
|
||||
|
@ -30,12 +30,18 @@ Connection module for Amazon S3
|
||||
|
||||
SSL verification may also be turned off in the configuration:
|
||||
|
||||
s3.verify_ssl: False
|
||||
s3.verify_ssl: False
|
||||
|
||||
This is required if using S3 bucket names that contain a period, as
|
||||
these will not match Amazon's S3 wildcard certificates. Certificate
|
||||
verification is enabled by default.
|
||||
|
||||
AWS region may be specified in the configuration:
|
||||
|
||||
s3.location: eu-central-1
|
||||
|
||||
Default is us-east-1.
|
||||
|
||||
This module should be usable to query other S3-like services, such as
|
||||
Eucalyptus.
|
||||
|
||||
@ -61,7 +67,7 @@ def __virtual__():
|
||||
|
||||
|
||||
def delete(bucket, path=None, action=None, key=None, keyid=None,
|
||||
service_url=None, verify_ssl=None):
|
||||
service_url=None, verify_ssl=None, location=None):
|
||||
'''
|
||||
Delete a bucket, or delete an object from a bucket.
|
||||
|
||||
@ -73,8 +79,8 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
|
||||
|
||||
salt myminion s3.delete mybucket remoteobject
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='DELETE',
|
||||
bucket=bucket,
|
||||
@ -83,12 +89,13 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl)
|
||||
verify_ssl=verify_ssl,
|
||||
location=location)
|
||||
|
||||
|
||||
def get(bucket=None, path=None, return_bin=False, action=None,
|
||||
local_file=None, key=None, keyid=None, service_url=None,
|
||||
verify_ssl=None):
|
||||
verify_ssl=None, location=None):
|
||||
'''
|
||||
List the contents of a bucket, or return an object from a bucket. Set
|
||||
return_bin to True in order to retrieve an object wholesale. Otherwise,
|
||||
@ -140,8 +147,8 @@ def get(bucket=None, path=None, return_bin=False, action=None,
|
||||
|
||||
salt myminion s3.get mybucket myfile.png action=acl
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='GET',
|
||||
bucket=bucket,
|
||||
@ -152,11 +159,12 @@ def get(bucket=None, path=None, return_bin=False, action=None,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl)
|
||||
verify_ssl=verify_ssl,
|
||||
location=location)
|
||||
|
||||
|
||||
def head(bucket, path=None, key=None, keyid=None, service_url=None,
|
||||
verify_ssl=None):
|
||||
verify_ssl=None, location=None):
|
||||
'''
|
||||
Return the metadata for a bucket, or an object in a bucket.
|
||||
|
||||
@ -167,8 +175,8 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
|
||||
salt myminion s3.head mybucket
|
||||
salt myminion s3.head mybucket myfile.png
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='HEAD',
|
||||
bucket=bucket,
|
||||
@ -177,11 +185,12 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl,
|
||||
location=location,
|
||||
full_headers=True)
|
||||
|
||||
|
||||
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
|
||||
key=None, keyid=None, service_url=None, verify_ssl=None):
|
||||
key=None, keyid=None, service_url=None, verify_ssl=None, location=None):
|
||||
'''
|
||||
Create a new bucket, or upload an object to a bucket.
|
||||
|
||||
@ -197,8 +206,8 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
|
||||
|
||||
salt myminion s3.put mybucket remotepath local_file=/path/to/file
|
||||
'''
|
||||
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
|
||||
verify_ssl)
|
||||
key, keyid, service_url, verify_ssl, location = _get_key(
|
||||
key, keyid, service_url, verify_ssl, location)
|
||||
|
||||
return salt.utils.s3.query(method='PUT',
|
||||
bucket=bucket,
|
||||
@ -209,10 +218,11 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
|
||||
key=key,
|
||||
keyid=keyid,
|
||||
service_url=service_url,
|
||||
verify_ssl=verify_ssl)
|
||||
verify_ssl=verify_ssl,
|
||||
location=location)
|
||||
|
||||
|
||||
def _get_key(key, keyid, service_url, verify_ssl):
|
||||
def _get_key(key, keyid, service_url, verify_ssl, location):
|
||||
'''
|
||||
Examine the keys, and populate as necessary
|
||||
'''
|
||||
@ -234,4 +244,7 @@ def _get_key(key, keyid, service_url, verify_ssl):
|
||||
if verify_ssl is None:
|
||||
verify_ssl = True
|
||||
|
||||
return key, keyid, service_url, verify_ssl
|
||||
if location is None and __salt__['config.option']('s3.location') is not None:
|
||||
location = __salt__['config.option']('s3.location')
|
||||
|
||||
return key, keyid, service_url, verify_ssl, location
|
||||
|
@ -96,12 +96,14 @@ _s3_sync_on_update = True # sync cache on update rather than jit
|
||||
|
||||
|
||||
class S3Credentials(object):
|
||||
def __init__(self, key, keyid, bucket, service_url, verify_ssl=True):
|
||||
def __init__(self, key, keyid, bucket, service_url, verify_ssl,
|
||||
location):
|
||||
self.key = key
|
||||
self.keyid = keyid
|
||||
self.bucket = bucket
|
||||
self.service_url = service_url
|
||||
self.verify_ssl = verify_ssl
|
||||
self.location = location
|
||||
|
||||
|
||||
def ext_pillar(minion_id,
|
||||
@ -110,6 +112,7 @@ def ext_pillar(minion_id,
|
||||
key,
|
||||
keyid,
|
||||
verify_ssl=True,
|
||||
location=None,
|
||||
multiple_env=False,
|
||||
environment='base',
|
||||
prefix='',
|
||||
@ -118,7 +121,8 @@ def ext_pillar(minion_id,
|
||||
Execute a command and read the output as YAML
|
||||
'''
|
||||
|
||||
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl)
|
||||
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl,
|
||||
location)
|
||||
|
||||
# normpath is needed to remove appended '/' if root is empty string.
|
||||
pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
|
||||
@ -230,6 +234,7 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
|
||||
bucket=creds.bucket,
|
||||
service_url=creds.service_url,
|
||||
verify_ssl=creds.verify_ssl,
|
||||
location=creds.location,
|
||||
return_bin=False,
|
||||
params={'prefix': prefix})
|
||||
|
||||
@ -368,5 +373,6 @@ def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
|
||||
service_url=creds.service_url,
|
||||
path=_quote(path),
|
||||
local_file=cached_file_path,
|
||||
verify_ssl=creds.verify_ssl
|
||||
verify_ssl=creds.verify_ssl,
|
||||
location=creds.location
|
||||
)
|
||||
|
@ -63,8 +63,25 @@ def _convert_key_to_str(key):
|
||||
return key
|
||||
|
||||
|
||||
def get_iam_region(version='latest', url='http://169.254.169.254',
|
||||
timeout=None, num_retries=5):
|
||||
'''
|
||||
Gets instance identity document and returns region
|
||||
'''
|
||||
instance_identity_url = '{0}/{1}/latest/dynamic/instance-identity/document'.format(url, version)
|
||||
|
||||
region = None
|
||||
try:
|
||||
document = _retry_get_url(instance_identity_url, num_retries, timeout)
|
||||
region = json.loads(document)['region']
|
||||
except (ValueError, TypeError, KeyError):
|
||||
# JSON failed to decode
|
||||
log.error('Failed to read region from instance metadata. Giving up.')
|
||||
return region
|
||||
|
||||
|
||||
def get_iam_metadata(version='latest', url='http://169.254.169.254',
|
||||
timeout=None, num_retries=5):
|
||||
timeout=None, num_retries=5):
|
||||
'''
|
||||
Grabs the first IAM role from this instances metadata if it exists.
|
||||
'''
|
||||
|
@ -30,7 +30,7 @@ DEFAULT_LOCATION = 'us-east-1'
|
||||
def query(key, keyid, method='GET', params=None, headers=None,
|
||||
requesturl=None, return_url=False, bucket=None, service_url=None,
|
||||
path='', return_bin=False, action=None, local_file=None,
|
||||
verify_ssl=True, location=DEFAULT_LOCATION, full_headers=False):
|
||||
verify_ssl=True, location=None, full_headers=False):
|
||||
'''
|
||||
Perform a query against an S3-like API. This function requires that a
|
||||
secret key and the id for that key are passed in. For instance:
|
||||
@ -38,7 +38,10 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
|
||||
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
|
||||
|
||||
A service_url may also be specified in the configuration::
|
||||
If keyid or key is not specified, an attempt to fetch them from EC2 IAM
|
||||
metadata service will be made.
|
||||
|
||||
A service_url may also be specified in the configuration:
|
||||
|
||||
s3.service_url: s3.amazonaws.com
|
||||
|
||||
@ -58,6 +61,13 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
This is required if using S3 bucket names that contain a period, as
|
||||
these will not match Amazon's S3 wildcard certificates. Certificate
|
||||
verification is enabled by default.
|
||||
|
||||
A region may be specified:
|
||||
|
||||
s3.location: eu-central-1
|
||||
|
||||
If region is not specified, an attempt to fetch the region from EC2 IAM
|
||||
metadata service will be made. Failing that, default is us-east-1
|
||||
'''
|
||||
if not HAS_REQUESTS:
|
||||
log.error('There was an error: requests is required for s3 access')
|
||||
@ -77,12 +87,15 @@ def query(key, keyid, method='GET', params=None, headers=None,
|
||||
endpoint = service_url
|
||||
|
||||
# Try grabbing the credentials from the EC2 instance IAM metadata if available
|
||||
token = None
|
||||
if not key or not keyid:
|
||||
iam_creds = iam.get_iam_metadata()
|
||||
key = iam_creds['secret_key']
|
||||
keyid = iam_creds['access_key']
|
||||
token = iam_creds['security_token']
|
||||
|
||||
if not location:
|
||||
location = iam.get_iam_region()
|
||||
if not location:
|
||||
location = DEFAULT_LOCATION
|
||||
|
||||
data = ''
|
||||
if method == 'PUT':
|
||||
|
@ -33,7 +33,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.delete('bucket'), 'A')
|
||||
|
||||
@ -44,7 +44,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.get(), 'A')
|
||||
|
||||
@ -54,7 +54,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.head('bucket'), 'A')
|
||||
|
||||
@ -64,7 +64,7 @@ class S3TestCase(TestCase):
|
||||
'''
|
||||
with patch.object(s3, '_get_key',
|
||||
return_value=('key', 'keyid', 'service_url',
|
||||
'verify_ssl')):
|
||||
'verify_ssl', 'location')):
|
||||
with patch.object(salt.utils.s3, 'query', return_value='A'):
|
||||
self.assertEqual(s3.put('bucket'), 'A')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user