Merge pull request #6974 from terminalmage/fileserver-fix

Fix exception caught when mtime_map parent dir does not exist
This commit is contained in:
Pedro Algarvio 2013-08-31 18:23:22 -07:00
commit a6a8424a9f
3 changed files with 28 additions and 5 deletions

View File

@ -159,7 +159,7 @@ def update():
'{0}'.format(exc))
try:
os.remove(lk_fn)
except (OSError, IOError):
except (IOError, OSError):
pass
# if there is a change, fire an event
@ -170,7 +170,7 @@ def update():
os.path.join(__opts__['cachedir'], 'gitfs/hash'),
find_file
)
except os.error:
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass

View File

@ -85,11 +85,10 @@ def update():
os.path.join(__opts__['cachedir'], 'roots/hash'),
find_file
)
except os.error:
except (IOError, OSError):
# Hash file won't exist if no files have yet been served up
pass
mtime_map_path = os.path.join(__opts__['cachedir'], 'roots/mtime_map')
# data to send on event
data = {'changed': False,
@ -109,8 +108,10 @@ def update():
# compare the maps, set changed to the return value
data['changed'] = salt.fileserver.diff_mtime_map(old_mtime_map, new_mtime_map)
# write out the new map
mtime_map_path_dir = os.path.dirname(mtime_map_path)
if not os.path.exists(mtime_map_path_dir):
os.makedirs(mtime_map_path_dir)
with salt.utils.fopen(mtime_map_path, 'w') as fp_:
for file_path, mtime in new_mtime_map.iteritems():
fp_.write('{file_path}:{mtime}\n'.format(file_path=file_path,
@ -120,6 +121,7 @@ def update():
event = salt.utils.event.MasterEvent(__opts__['sock_dir'])
event.fire_event(data, 'salt.fileserver.roots.update')
def file_hash(load, fnd):
'''
Return a file hash, the hash type is set in the master config file

View File

@ -62,6 +62,7 @@ log = logging.getLogger(__name__)
_s3_cache_expire = 30 # cache for 30 seconds
_s3_sync_on_update = True # sync cache on update rather than jit
def envs():
'''
Return a list of directories within the bucket that can be
@ -72,6 +73,7 @@ def envs():
metadata = _init()
return metadata.keys()
def update():
'''
Update the cache file for the bucket.
@ -95,6 +97,7 @@ def update():
log.info('Sync local cache from S3 completed.')
def find_file(path, env='base', **kwargs):
'''
Look through the buckets cache file for a match.
@ -130,6 +133,7 @@ def find_file(path, env='base', **kwargs):
return fnd
def file_hash(load, fnd):
'''
Return an MD5 file hash
@ -154,6 +158,7 @@ def file_hash(load, fnd):
return ret
def serve_file(load, fnd):
'''
Return a chunk from a file based on the data received
@ -187,6 +192,7 @@ def serve_file(load, fnd):
ret['data'] = data
return ret
def file_list(load):
'''
Return a list of all files on the file server in a specified environment
@ -209,6 +215,7 @@ def file_list(load):
return ret
def file_list_emptydirs(load):
'''
Return a list of all empty directories on the master
@ -218,6 +225,7 @@ def file_list_emptydirs(load):
return []
def dir_list(load):
'''
Return a list of all directories on the master
@ -243,6 +251,7 @@ def dir_list(load):
return ret
def _get_s3_key():
'''
Get AWS keys from pillar or config
@ -253,6 +262,7 @@ def _get_s3_key():
return key, keyid
def _init():
'''
Connect to S3 and download the metadata for each file in all buckets
@ -269,6 +279,7 @@ def _init():
# bucket files cache expired
return _refresh_buckets_cache_file(cache_file)
def _get_cache_dir():
'''
Return the path to the s3cache dir
@ -277,6 +288,7 @@ def _get_cache_dir():
# Or is that making too many assumptions?
return os.path.join(__opts__['cachedir'], 's3cache')
def _get_cached_file_name(bucket_name, env, path):
'''
Return the cached file name for a bucket path file
@ -290,6 +302,7 @@ def _get_cached_file_name(bucket_name, env, path):
return file_path
def _get_buckets_cache_filename():
'''
Return the filename of the cache for bucket contents.
@ -302,6 +315,7 @@ def _get_buckets_cache_filename():
return os.path.join(cache_dir, 'buckets_files.cache')
def _refresh_buckets_cache_file(cache_file):
'''
Retrieve the content of all buckets and cache the metadata to the buckets
@ -375,6 +389,7 @@ def _refresh_buckets_cache_file(cache_file):
return metadata
def _read_buckets_cache_file(cache_file):
'''
Return the contents of the buckets cache file
@ -387,6 +402,7 @@ def _read_buckets_cache_file(cache_file):
return data
def _find_files(metadata, dirs_only=False):
'''
Looks for all the files in the S3 bucket cache metadata
@ -405,6 +421,7 @@ def _find_files(metadata, dirs_only=False):
return ret
def _find_file_meta(metadata, bucket_name, env, path):
'''
Looks for a file's metadata in the S3 bucket cache file
@ -418,6 +435,7 @@ def _find_file_meta(metadata, bucket_name, env, path):
if 'Key' in item_meta and item_meta['Key'] == path:
return item_meta
def _get_buckets():
'''
Return the configuration buckets
@ -425,6 +443,7 @@ def _get_buckets():
return __opts__['s3.buckets'] if 's3.buckets' in __opts__ else {}
def _get_file_from_s3(metadata, env, bucket_name, path, cached_file_path):
'''
Checks the local cache for the file, if it's old or missing go grab the
@ -453,6 +472,7 @@ def _get_file_from_s3(metadata, env, bucket_name, path, cached_file_path):
path=urllib.quote(path),
local_file=cached_file_path)
def _trim_env_off_path(paths, env, trim_slash=False):
'''
Return a list of file paths with the env directory removed
@ -462,6 +482,7 @@ def _trim_env_off_path(paths, env, trim_slash=False):
return map(lambda d: d[env_len:slash_len], paths)
def _is_env_per_bucket():
'''
Return the configuration mode, either buckets per environment or a list of