Merge pull request #25767 from techhat/awskms

Add AWS KMS support to S3
This commit is contained in:
Mike Place 2015-07-28 10:00:41 -06:00
commit fefb4eb79b
6 changed files with 84 additions and 30 deletions

View File

@ -320,8 +320,9 @@ def _get_s3_key():
verify_ssl = __opts__['s3.verify_ssl'] \
if 's3.verify_ssl' in __opts__ \
else None
kms_keyid = __opts__['aws.kmw.keyid'] if 'aws.kms.keyid' in __opts__ else None
return key, keyid, service_url, verify_ssl
return key, keyid, service_url, verify_ssl, kms_keyid
def _init():
@ -391,7 +392,7 @@ def _refresh_buckets_cache_file(cache_file):
log.debug('Refreshing buckets cache file')
key, keyid, service_url, verify_ssl = _get_s3_key()
key, keyid, service_url, verify_ssl, kms_keyid = _get_s3_key()
metadata = {}
# helper s3 query function
@ -399,6 +400,7 @@ def _refresh_buckets_cache_file(cache_file):
return s3.query(
key=key,
keyid=keyid,
kms_keyid=keyid,
bucket=bucket,
service_url=service_url,
verify_ssl=verify_ssl,
@ -582,7 +584,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
Checks the local cache for the file, if it's old or missing go grab the
file from S3 and update the cache
'''
key, keyid, service_url, verify_ssl = _get_s3_key()
key, keyid, service_url, verify_ssl, kms_keyid = _get_s3_key()
# check the local cache...
if os.path.isfile(cached_file_path):
@ -613,6 +615,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
ret = s3.query(
key=key,
keyid=keyid,
kms_keyid=keyid,
method='HEAD',
bucket=bucket_name,
service_url=service_url,
@ -642,6 +645,7 @@ def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
s3.query(
key=key,
keyid=keyid,
kms_keyid=keyid,
bucket=bucket_name,
service_url=service_url,
verify_ssl=verify_ssl,

View File

@ -61,7 +61,7 @@ def __virtual__():
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None, verify_ssl=None):
service_url=None, verify_ssl=None, kms_keyid=None):
'''
Delete a bucket, or delete an object from a bucket.
@ -73,8 +73,13 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
salt myminion s3.delete mybucket remoteobject
'''
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
verify_ssl)
key, keyid, service_url, verify_ssl, kms_keyid = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
)
return salt.utils.s3.query(method='DELETE',
bucket=bucket,
@ -82,13 +87,14 @@ def delete(bucket, path=None, action=None, key=None, keyid=None,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None,
verify_ssl=None):
verify_ssl=None, kms_keyid=None):
'''
List the contents of a bucket, or return an object from a bucket. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
@ -140,8 +146,13 @@ def get(bucket=None, path=None, return_bin=False, action=None,
salt myminion s3.get mybucket myfile.png action=acl
'''
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
verify_ssl)
key, keyid, service_url, verify_ssl, kms_keyid = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
)
return salt.utils.s3.query(method='GET',
bucket=bucket,
@ -151,12 +162,13 @@ def get(bucket=None, path=None, return_bin=False, action=None,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl)
def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=None):
verify_ssl=None, kms_keyid=None):
'''
Return the metadata for a bucket, or an object in a bucket.
@ -167,21 +179,27 @@ def head(bucket, path=None, key=None, keyid=None, service_url=None,
salt myminion s3.head mybucket
salt myminion s3.head mybucket myfile.png
'''
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
verify_ssl)
key, keyid, service_url, verify_ssl, kms_keyid = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
)
return salt.utils.s3.query(method='HEAD',
bucket=bucket,
path=path,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
full_headers=True)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None):
key=None, keyid=None, service_url=None, verify_ssl=None, kms_keyid=None):
'''
Create a new bucket, or upload an object to a bucket.
@ -197,8 +215,13 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
salt myminion s3.put mybucket remotepath local_file=/path/to/file
'''
key, keyid, service_url, verify_ssl = _get_key(key, keyid, service_url,
verify_ssl)
key, keyid, service_url, verify_ssl, kms_keyid = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
)
return salt.utils.s3.query(method='PUT',
bucket=bucket,
@ -208,11 +231,12 @@ def put(bucket, path=None, return_bin=False, action=None, local_file=None,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl)
def _get_key(key, keyid, service_url, verify_ssl):
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid):
'''
Examine the keys, and populate as necessary
'''
@ -222,6 +246,9 @@ def _get_key(key, keyid, service_url, verify_ssl):
if not keyid and __salt__['config.option']('s3.keyid'):
keyid = __salt__['config.option']('s3.keyid')
if not kms_keyid and __salt__['config.option']('aws.kms.keyid'):
kms_keyid = __salt__['config.option']('aws.kms.keyid')
if not service_url and __salt__['config.option']('s3.service_url'):
service_url = __salt__['config.option']('s3.service_url')
@ -234,4 +261,4 @@ def _get_key(key, keyid, service_url, verify_ssl):
if verify_ssl is None:
verify_ssl = True
return key, keyid, service_url, verify_ssl
return key, keyid, service_url, verify_ssl, kms_keyid

View File

@ -17,6 +17,7 @@ options
prefix: somewhere/overthere
verify_ssl: True
service_url: s3.amazonaws.com
kms_keyid: 01234567-89ab-cdef-0123-4567890abcde
The ``bucket`` parameter specifies the target S3 bucket. It is required.
@ -48,6 +49,9 @@ must be set to False else an invalid certificate error will be thrown (issue
The ``service_url`` parameter defaults to 's3.amazonaws.com'. It specifies the
base url to use for accessing S3.
The ``kms_keyid`` parameter is optional. It specifies the ID of the Key
Management Service (KMS) master key that was used to encrypt the object.
This pillar can operate in two modes, single environment per bucket or multiple
environments per bucket.
@ -98,9 +102,10 @@ _s3_sync_on_update = True # sync cache on update rather than jit
class S3Credentials(object):
def __init__(self, key, keyid, bucket, service_url, verify_ssl=True):
def __init__(self, key, keyid, bucket, service_url, verify_ssl=True, kms_keyid=None):
self.key = key
self.keyid = keyid
self.kms_keyid = kms_keyid
self.bucket = bucket
self.service_url = service_url
self.verify_ssl = verify_ssl
@ -115,12 +120,13 @@ def ext_pillar(minion_id,
multiple_env=False,
environment='base',
prefix='',
service_url=None):
service_url=None,
kms_keyid=None):
'''
Execute a command and read the output as YAML
'''
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl)
s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl, kms_keyid)
# normpath is needed to remove appended '/' if root is empty string.
pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment,
@ -232,6 +238,7 @@ def _refresh_buckets_cache_file(creds, cache_file, multiple_env, environment, pr
return s3.query(
key=creds.key,
keyid=creds.keyid,
kms_keyid=creds.kms_keyid,
bucket=creds.bucket,
service_url=creds.service_url,
verify_ssl=creds.verify_ssl,
@ -369,6 +376,7 @@ def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
s3.query(
key=creds.key,
keyid=creds.keyid,
kms_keyid=creds.kms_keyid,
bucket=bucket,
service_url=creds.service_url,
path=_quote(path),

View File

@ -141,7 +141,7 @@ def sig2(method, endpoint, params, provider, aws_api_version):
def sig4(method, endpoint, params, prov_dict,
aws_api_version=DEFAULT_AWS_API_VERSION, location=DEFAULT_LOCATION,
product='ec2', uri='/', requesturl=None, data=''):
product='ec2', uri='/', requesturl=None, data='', headers=None):
'''
Sign a query against AWS services using Signature Version 4 Signing
Process. This is documented at:
@ -165,12 +165,18 @@ def sig4(method, endpoint, params, prov_dict,
amzdate = timenow.strftime('%Y%m%dT%H%M%SZ')
datestamp = timenow.strftime('%Y%m%d')
canonical_headers = 'host:{0}\nx-amz-date:{1}\n'.format(
canonical_headers = 'host:{0}\nx-amz-date:{1}'.format(
endpoint,
amzdate,
)
signed_headers = 'host;x-amz-date'
if isinstance(headers, dict):
for header in sorted(headers.keys()):
canonical_headers += '\n{0}:{1}'.format(header, headers[header])
signed_headers += ';{0}'.format(header)
canonical_headers += '\n'
algorithm = 'AWS4-HMAC-SHA256'
# Create payload hash (hash of the request body content). For GET
@ -223,18 +229,21 @@ def sig4(method, endpoint, params, prov_dict,
signature,
)
headers = {
new_headers = {
'x-amz-date': amzdate,
'x-amz-content-sha256': payload_hash,
'Authorization': authorization_header,
}
if isinstance(headers, dict):
for header in sorted(headers.keys()):
new_headers[header] = headers[header]
# Add in security token if we have one
if token != '':
headers['X-Amz-Security-Token'] = token
new_headers['X-Amz-Security-Token'] = token
requesturl = '{0}?{1}'.format(requesturl, querystring)
return headers, requesturl
return new_headers, requesturl
def _sign(key, msg):

View File

@ -30,7 +30,8 @@ DEFAULT_LOCATION = 'us-east-1'
def query(key, keyid, method='GET', params=None, headers=None,
requesturl=None, return_url=False, bucket=None, service_url=None,
path='', return_bin=False, action=None, local_file=None,
verify_ssl=True, location=DEFAULT_LOCATION, full_headers=False):
verify_ssl=True, location=DEFAULT_LOCATION, full_headers=False,
kms_keyid=None):
'''
Perform a query against an S3-like API. This function requires that a
secret key and the id for that key are passed in. For instance:
@ -84,6 +85,10 @@ def query(key, keyid, method='GET', params=None, headers=None,
keyid = iam_creds['access_key']
token = iam_creds['security_token']
if kms_keyid is not None and method in ('PUT', 'POST'):
headers['x-amz-server-side-encryption'] = 'aws:kms'
headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid
data = ''
if method == 'PUT':
if local_file:
@ -102,6 +107,7 @@ def query(key, keyid, method='GET', params=None, headers=None,
location=location,
product='s3',
requesturl=requesturl,
headers=headers,
)
log.debug('S3 Request: {0}'.format(requesturl))

View File

@ -33,7 +33,7 @@ class S3TestCase(TestCase):
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl')):
'verify_ssl', 'kms_keyid')):
with patch.object(salt.utils.s3, 'query', return_value='A'):
self.assertEqual(s3.delete('bucket'), 'A')
@ -44,7 +44,7 @@ class S3TestCase(TestCase):
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl')):
'verify_ssl', 'kms_keyid')):
with patch.object(salt.utils.s3, 'query', return_value='A'):
self.assertEqual(s3.get(), 'A')
@ -54,7 +54,7 @@ class S3TestCase(TestCase):
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl')):
'verify_ssl', 'kms_keyid')):
with patch.object(salt.utils.s3, 'query', return_value='A'):
self.assertEqual(s3.head('bucket'), 'A')
@ -64,7 +64,7 @@ class S3TestCase(TestCase):
'''
with patch.object(s3, '_get_key',
return_value=('key', 'keyid', 'service_url',
'verify_ssl')):
'verify_ssl', 'kms_keyid')):
with patch.object(salt.utils.s3, 'query', return_value='A'):
self.assertEqual(s3.put('bucket'), 'A')