Merge pull request #38295 from porunov/develop

Add path style requests and http protocol support to the s3 module
This commit is contained in:
Mike Place 2016-12-18 05:57:37 -07:00 committed by GitHub
commit 39eed71eca
5 changed files with 122 additions and 37 deletions

View File

@ -327,8 +327,14 @@ def _get_s3_key():
location = __opts__['s3.location'] \
if 's3.location' in __opts__ \
else None
path_style = __opts__['s3.path_style'] \
if 's3.path_style' in __opts__ \
else None
https_enable = __opts__['s3.https_enable'] \
if 's3.https_enable' in __opts__ \
else None
return key, keyid, service_url, verify_ssl, kms_keyid, location
return key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable
def _init():
@ -398,7 +404,7 @@ def _refresh_buckets_cache_file(cache_file):
log.debug('Refreshing buckets cache file')
key, keyid, service_url, verify_ssl, kms_keyid, location = _get_s3_key()
key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable = _get_s3_key()
metadata = {}
# helper s3 query function
@ -411,7 +417,9 @@ def _refresh_buckets_cache_file(cache_file):
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
return_bin=False)
return_bin=False,
path_style=path_style,
https_enable=https_enable)
if _is_env_per_bucket():
# Single environment per bucket
@ -609,7 +617,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache
'''
key, keyid, service_url, verify_ssl, kms_keyid, location = _get_s3_key()
key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable = _get_s3_key()
# check the local cache...
if os.path.isfile(cached_file_path):
@ -648,7 +656,9 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
location=location,
path=_quote(path),
local_file=cached_file_path,
full_headers=True
full_headers=True,
path_style=path_style,
https_enable=https_enable
)
if ret is not None:
for header_name, header_value in ret['headers'].items():
@ -677,7 +687,9 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
verify_ssl=verify_ssl,
location=location,
path=_quote(path),
local_file=cached_file_path
local_file=cached_file_path,
path_style=path_style,
https_enable=https_enable,
)

View File

@ -32,6 +32,16 @@ Connection module for Amazon S3
The service_url will form the basis for the final endpoint that is used to
query the service.
Path style can be enabled:
s3.path_style: True
This can be useful if you need to use salt with a proxy for an s3 compatible storage
You can use either https protocol or http protocol:
s3.https_enable: True
SSL verification may also be turned off in the configuration:
s3.verify_ssl: False
@ -68,7 +78,7 @@ def __virtual__():
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None, verify_ssl=None, kms_keyid=None, location=None,
role_arn=None):
role_arn=None, path_style=None, https_enable=None):
'''
Delete a bucket, or delete an object from a bucket.
@ -80,7 +90,7 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
salt myminion s3.delete mybucket remoteobject
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = _get_key(
key,
keyid,
service_url,
@ -88,6 +98,8 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
kms_keyid,
location,
role_arn,
path_style,
https_enable,
)
return __utils__['s3.query'](method='DELETE',
@ -100,12 +112,15 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
role_arn=role_arn,
path_style=path_style,
https_enable=https_enable)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
verify_ssl=None, kms_keyid=None, location=None, role_arn=None,
path_style=None, https_enable=None):
'''
List the contents of a bucket, or return an object from a bucket. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
@ -157,7 +172,7 @@ def get(bucket=None, path=None, return_bin=False, action=None,
salt myminion s3.get mybucket myfile.png action=acl
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = _get_key(
key,
keyid,
service_url,
@ -165,6 +180,8 @@ def get(bucket=None, path=None, return_bin=False, action=None,
kms_keyid,
location,
role_arn,
path_style,
https_enable,
)
return __utils__['s3.query'](method='GET',
@ -179,11 +196,14 @@ def get(bucket=None, path=None, return_bin=False, action=None,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
role_arn=role_arn,
path_style=path_style,
https_enable=https_enable)
def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
verify_ssl=None, kms_keyid=None, location=None, role_arn=None,
path_style=None, https_enable=None):
'''
Return the metadata for a bucket, or an object in a bucket.
@ -194,7 +214,7 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
salt myminion s3.head mybucket
salt myminion s3.head mybucket myfile.png
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = _get_key(
key,
keyid,
service_url,
@ -202,6 +222,8 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
kms_keyid,
location,
role_arn,
path_style,
https_enable,
)
return __utils__['s3.query'](method='HEAD',
@ -214,12 +236,15 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=verify_ssl,
location=location,
full_headers=True,
role_arn=role_arn)
role_arn=role_arn,
path_style=path_style,
https_enable=https_enable)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None,
kms_keyid=None, location=None, role_arn=None):
kms_keyid=None, location=None, role_arn=None, path_style=None,
https_enable=None):
'''
Create a new bucket, or upload an object to a bucket.
@ -235,7 +260,7 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
salt myminion s3.put mybucket remotepath local_file=/path/to/file
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = _get_key(
key,
keyid,
service_url,
@ -243,6 +268,8 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
kms_keyid,
location,
role_arn,
path_style,
https_enable,
)
return __utils__['s3.query'](method='PUT',
@ -257,10 +284,12 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
role_arn=role_arn,
path_style=path_style,
https_enable=https_enable)
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn):
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable):
'''
Examine the keys, and populate as necessary
'''
@ -291,4 +320,16 @@ def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn)
if role_arn is None and __salt__['config.option']('s3.role_arn'):
role_arn = __salt__['config.option']('s3.role_arn')
return key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn
if path_style is None and __salt__['config.option']('s3.path_style') is not None:
path_style = __salt__['config.option']('s3.path_style')
if path_style is None:
path_style = False
if https_enable is None and __salt__['config.option']('s3.https_enable') is not None:
https_enable = __salt__['config.option']('s3.https_enable')
if https_enable is None:
https_enable = True
return key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable

View File

@ -20,6 +20,8 @@ options
kms_keyid: 01234567-89ab-cdef-0123-4567890abcde
s3_cache_expire: 30
s3_sync_on_update: True
path_style: False
https_enable: True
The ``bucket`` parameter specifies the target S3 bucket. It is required.
@ -59,6 +61,12 @@ time of S3 metadata cache file.
The ``s3_sync_on_update`` parameter defaults to True. It specifies if cache
is synced on update rather than jit.
The ``path_style`` parameter defaults to False. It specifies whether to use
path style requests or dns style requests
The ``https_enable`` parameter defaults to True. It specifies whether to use
https protocol or http protocol
This pillar can operate in two modes, single environment per bucket or multiple
environments per bucket.
@ -105,7 +113,7 @@ log = logging.getLogger(__name__)
class S3Credentials(object):
def __init__(self, key, keyid, bucket, service_url, verify_ssl=True,
kms_keyid=None, location=None):
kms_keyid=None, location=None, path_style=False, https_enable=True):
self.key = key
self.keyid = keyid
self.kms_keyid = kms_keyid
@ -113,6 +121,8 @@ class S3Credentials(object):
self.service_url = service_url
self.verify_ssl = verify_ssl
self.location = location
self.path_style = path_style
self.https_enable = https_enable
def ext_pillar(minion_id,
@ -128,14 +138,16 @@ def ext_pillar(minion_id,
service_url=None,
kms_keyid=None,
s3_cache_expire=30, # cache for 30 seconds
s3_sync_on_update=True): # sync cache on update rather than jit
s3_sync_on_update=True, # sync cache on update rather than jit
path_style=False,
https_enable=True):
'''
Execute a command and read the output as YAML
'''
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl,
kms_keyid, location)
kms_keyid, location, path_style, https_enable)
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
@ -265,7 +277,9 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
verify_ssl=creds.verify_ssl,
location=creds.location,
return_bin=False,
params={'prefix': prefix})
params={'prefix': prefix},
path_style=creds.path_style,
https_enable=creds.https_enable)
# grab only the files/dirs in the bucket
def __get_pillar_files_from_s3_meta(s3_meta):
@ -407,5 +421,7 @@ def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
path=_quote(path),
local_file=cached_file_path,
verify_ssl=creds.verify_ssl,
location=creds.location
location=creds.location,
path_style=creds.path_style,
https_enable=creds.https_enable
)

View File

@ -30,7 +30,8 @@ def query(key, keyid, method='GET', params=None, headers=None,
requesturl=None, return_url=False, bucket=None, service_url=None,
path='', return_bin=False, action=None, local_file=None,
verify_ssl=True, full_headers=False, kms_keyid=None,
location=None, role_arn=None, chunk_size=16384):
location=None, role_arn=None, chunk_size=16384, path_style=False,
https_enable=True):
'''
Perform a query against an S3-like API. This function requires that a
secret key and the id for that key are passed in. For instance:
@ -54,9 +55,19 @@ def query(key, keyid, method='GET', params=None, headers=None,
The service_url will form the basis for the final endpoint that is used to
query the service.
Path style can be enabled:
s3.path_style: True
This can be useful if you need to use salt with a proxy for an s3 compatible storage
You can use either https protocol or http protocol:
s3.https_enable: True
SSL verification may also be turned off in the configuration:
s3.verify_ssl: False
s3.verify_ssl: False
This is required if using S3 bucket names that contain a period, as
these will not match Amazon's S3 wildcard certificates. Certificate
@ -81,10 +92,13 @@ def query(key, keyid, method='GET', params=None, headers=None,
if not service_url:
service_url = 's3.amazonaws.com'
if bucket:
endpoint = '{0}.{1}'.format(bucket, service_url)
else:
if not bucket or path_style:
endpoint = service_url
else:
endpoint = '{0}.{1}'.format(bucket, service_url)
if path_style and bucket:
path = '{0}/{1}'.format(bucket, path)
# Try grabbing the credentials from the EC2 instance IAM metadata if available
if not key:
@ -110,7 +124,7 @@ def query(key, keyid, method='GET', params=None, headers=None,
path = ''
if not requesturl:
requesturl = 'https://{0}/{1}'.format(endpoint, path)
requesturl = (('https' if https_enable else 'http')+'://{0}/{1}').format(endpoint, path)
headers, requesturl = salt.utils.aws.sig4(
method,
endpoint,

View File

@ -30,14 +30,16 @@ class S3TestCase(TestCase):
def test__get_key_defaults(self):
mock = MagicMock(return_value='')
with patch.dict(s3.__salt__, {'config.option': mock}):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = (
s3._get_key(None, None, None, None, None, None, None))
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn, path_style, https_enable = (
s3._get_key(None, None, None, None, None, None, None, None, None))
self.assertEqual(None, role_arn)
self.assertEqual(None, key)
self.assertEqual(None, keyid)
self.assertEqual('s3.amazonaws.com', service_url)
self.assertEqual('', verify_ssl)
self.assertEqual('', location)
self.assertEqual('', path_style)
self.assertEqual('', https_enable)
def test_delete(self):
'''
@ -46,7 +48,7 @@ class S3TestCase(TestCase):
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn')):
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.delete('bucket'), 'A')
def test_get(self):
@ -57,7 +59,7 @@ class S3TestCase(TestCase):
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn')):
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.get(), 'A')
def test_head(self):
@ -67,7 +69,7 @@ class S3TestCase(TestCase):
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn')):
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.head('bucket'), 'A')
def test_put(self):
@ -77,7 +79,7 @@ class S3TestCase(TestCase):
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl', 'kms_keyid', 'location',
'role_arn')):
'role_arn', 'path_style', 'https_enable')):
self.assertEqual(s3.put('bucket'), 'A')