"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "salt/fileserver/hgfs.py" between
salt-3002.1.tar.gz and salt-3002.2.tar.gz

About: SaltStack is a systems management software for data center automation, cloud orchestration, server provisioning, configuration management and more. Community version.

hgfs.py  (salt-3002.1):hgfs.py  (salt-3002.2)
# -*- coding: utf-8 -*-
""" """
Mercurial Fileserver Backend Mercurial Fileserver Backend
To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the
Master config file. Master config file.
.. code-block:: yaml .. code-block:: yaml
fileserver_backend: fileserver_backend:
- hgfs - hgfs
skipping to change at line 37 skipping to change at line 36
.. versionchanged:: 2014.1.0 .. versionchanged:: 2014.1.0
The :conf_master:`hgfs_base` master config parameter was added, allowing The :conf_master:`hgfs_base` master config parameter was added, allowing
for a branch other than ``default`` to be used for the ``base`` for a branch other than ``default`` to be used for the ``base``
environment, and allowing for a ``base`` environment to be specified when environment, and allowing for a ``base`` environment to be specified when
using an :conf_master:`hgfs_branch_method` of ``bookmarks``. using an :conf_master:`hgfs_branch_method` of ``bookmarks``.
:depends: - mercurial :depends: - mercurial
- python bindings for mercurial (``python-hglib``) - python bindings for mercurial (``python-hglib``)
""" """
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy import copy
import errno import errno
import fnmatch import fnmatch
import glob import glob
import hashlib import hashlib
import logging import logging
import os import os
import shutil import shutil
from datetime import datetime from datetime import datetime
import salt.fileserver import salt.fileserver
# Import salt libs
import salt.utils.data import salt.utils.data
import salt.utils.files import salt.utils.files
import salt.utils.gzip_util import salt.utils.gzip_util
import salt.utils.hashutils import salt.utils.hashutils
import salt.utils.stringutils import salt.utils.stringutils
import salt.utils.url import salt.utils.url
import salt.utils.versions import salt.utils.versions
from salt.exceptions import FileserverConfigError from salt.exceptions import FileserverConfigError
# Import third party libs
from salt.ext import six from salt.ext import six
from salt.utils.event import tagify from salt.utils.event import tagify
VALID_BRANCH_METHODS = ("branches", "bookmarks", "mixed") VALID_BRANCH_METHODS = ("branches", "bookmarks", "mixed")
PER_REMOTE_OVERRIDES = ("base", "branch_method", "mountpoint", "root") PER_REMOTE_OVERRIDES = ("base", "branch_method", "mountpoint", "root")
# pylint: disable=import-error # pylint: disable=import-error
try: try:
import hglib import hglib
skipping to change at line 197 skipping to change at line 189
def init(): def init():
""" """
Return a list of hglib objects for the various hgfs remotes Return a list of hglib objects for the various hgfs remotes
""" """
bp_ = os.path.join(__opts__["cachedir"], "hgfs") bp_ = os.path.join(__opts__["cachedir"], "hgfs")
new_remote = False new_remote = False
repos = [] repos = []
per_remote_defaults = {} per_remote_defaults = {}
for param in PER_REMOTE_OVERRIDES: for param in PER_REMOTE_OVERRIDES:
per_remote_defaults[param] = six.text_type(__opts__["hgfs_{0}".format(pa ram)]) per_remote_defaults[param] = str(__opts__["hgfs_{}".format(param)])
for remote in __opts__["hgfs_remotes"]: for remote in __opts__["hgfs_remotes"]:
repo_conf = copy.deepcopy(per_remote_defaults) repo_conf = copy.deepcopy(per_remote_defaults)
if isinstance(remote, dict): if isinstance(remote, dict):
repo_url = next(iter(remote)) repo_url = next(iter(remote))
per_remote_conf = dict( per_remote_conf = {
[ key: str(val)
(key, six.text_type(val)) for key, val in salt.utils.data.repack_dictlist(
for key, val in six.iteritems( remote[repo_url]
salt.utils.data.repack_dictlist(remote[repo_url]) ).items()
) }
]
)
if not per_remote_conf: if not per_remote_conf:
log.error( log.error(
"Invalid per-remote configuration for hgfs remote %s. If " "Invalid per-remote configuration for hgfs remote %s. If "
"no per-remote parameters are being specified, there may " "no per-remote parameters are being specified, there may "
"be a trailing colon after the URL, which should be " "be a trailing colon after the URL, which should be "
"removed. Check the master configuration file.", "removed. Check the master configuration file.",
repo_url, repo_url,
) )
_failhard() _failhard()
skipping to change at line 252 skipping to change at line 242
", ".join(PER_REMOTE_OVERRIDES), ", ".join(PER_REMOTE_OVERRIDES),
) )
per_remote_errors = True per_remote_errors = True
if per_remote_errors: if per_remote_errors:
_failhard() _failhard()
repo_conf.update(per_remote_conf) repo_conf.update(per_remote_conf)
else: else:
repo_url = remote repo_url = remote
if not isinstance(repo_url, six.string_types): if not isinstance(repo_url, str):
log.error( log.error(
"Invalid hgfs remote %s. Remotes must be strings, you may " "Invalid hgfs remote %s. Remotes must be strings, you may "
"need to enclose the URL in quotes", "need to enclose the URL in quotes",
repo_url, repo_url,
) )
_failhard() _failhard()
try: try:
repo_conf["mountpoint"] = salt.utils.url.strip_proto( repo_conf["mountpoint"] = salt.utils.url.strip_proto(
repo_conf["mountpoint"] repo_conf["mountpoint"]
) )
except TypeError: except TypeError:
# mountpoint not specified # mountpoint not specified
pass pass
hash_type = getattr(hashlib, __opts__.get("hash_type", "md5")) hash_type = getattr(hashlib, __opts__.get("hash_type", "md5"))
repo_hash = hash_type(repo_url).hexdigest() repo_hash = hash_type(repo_url.encode("utf-8")).hexdigest()
rp_ = os.path.join(bp_, repo_hash) rp_ = os.path.join(bp_, repo_hash)
if not os.path.isdir(rp_): if not os.path.isdir(rp_):
os.makedirs(rp_) os.makedirs(rp_)
if not os.listdir(rp_): if not os.listdir(rp_):
# Only init if the directory is empty. # Only init if the directory is empty.
hglib.init(rp_) hglib.init(rp_)
new_remote = True new_remote = True
try: try:
repo = hglib.open(rp_) repo = hglib.open(rp_)
skipping to change at line 299 skipping to change at line 289
_failhard() _failhard()
except Exception as exc: # pylint: disable=broad-except except Exception as exc: # pylint: disable=broad-except
log.error( log.error(
"Exception '%s' encountered while initializing hgfs " "remote %s ", "Exception '%s' encountered while initializing hgfs " "remote %s ",
exc, exc,
repo_url, repo_url,
) )
_failhard() _failhard()
try: try:
refs = repo.config(names="paths") refs = repo.config(names=b"paths")
except hglib.error.CommandError: except hglib.error.CommandError:
refs = None refs = None
# Do NOT put this if statement inside the except block above. Earlier # Do NOT put this if statement inside the except block above. Earlier
# versions of hglib did not raise an exception, so we need to do it # versions of hglib did not raise an exception, so we need to do it
# this way to support both older and newer hglib. # this way to support both older and newer hglib.
if not refs: if not refs:
# Write an hgrc defining the remote URL # Write an hgrc defining the remote URL
hgconfpath = os.path.join(rp_, ".hg", "hgrc") hgconfpath = os.path.join(rp_, ".hg", "hgrc")
with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig: with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig:
hgconfig.write("[paths]\n") hgconfig.write("[paths]\n")
hgconfig.write( hgconfig.write(
salt.utils.stringutils.to_str("default = {0}\n".format(repo_ url)) salt.utils.stringutils.to_str("default = {}\n".format(repo_u rl))
) )
repo_conf.update( repo_conf.update(
{ {
"repo": repo, "repo": repo,
"url": repo_url, "url": repo_url,
"hash": repo_hash, "hash": repo_hash,
"cachedir": rp_, "cachedir": rp_,
"lockfile": os.path.join( "lockfile": os.path.join(
__opts__["cachedir"], "hgfs", "{0}.update.lk".format(repo_ha sh) __opts__["cachedir"], "hgfs", "{}.update.lk".format(repo_has h)
), ),
} }
) )
repos.append(repo_conf) repos.append(repo_conf)
repo.close() repo.close()
if new_remote: if new_remote:
remote_map = os.path.join(__opts__["cachedir"], "hgfs/remote_map.txt") remote_map = os.path.join(__opts__["cachedir"], "hgfs/remote_map.txt")
try: try:
with salt.utils.files.fopen(remote_map, "w+") as fp_: with salt.utils.files.fopen(remote_map, "w+") as fp_:
timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f") timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
fp_.write("# hgfs_remote map as of {0}\n".format(timestamp)) fp_.write("# hgfs_remote map as of {}\n".format(timestamp))
for repo in repos: for repo in repos:
fp_.write( fp_.write(
salt.utils.stringutils.to_str( salt.utils.stringutils.to_str(
"{0} = {1}\n".format(repo["hash"], repo["url"]) "{} = {}\n".format(repo["hash"], repo["url"])
) )
) )
except OSError: except OSError:
pass pass
else: else:
log.info("Wrote new hgfs_remote map to %s", remote_map) log.info("Wrote new hgfs_remote map to %s", remote_map)
return repos return repos
def _clear_old_remotes(): def _clear_old_remotes():
skipping to change at line 397 skipping to change at line 387
Completely clear hgfs cache Completely clear hgfs cache
""" """
fsb_cachedir = os.path.join(__opts__["cachedir"], "hgfs") fsb_cachedir = os.path.join(__opts__["cachedir"], "hgfs")
list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs") list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs")
errors = [] errors = []
for rdir in (fsb_cachedir, list_cachedir): for rdir in (fsb_cachedir, list_cachedir):
if os.path.exists(rdir): if os.path.exists(rdir):
try: try:
shutil.rmtree(rdir) shutil.rmtree(rdir)
except OSError as exc: except OSError as exc:
errors.append("Unable to delete {0}: {1}".format(rdir, exc)) errors.append("Unable to delete {}: {}".format(rdir, exc))
return errors return errors
def clear_lock(remote=None): def clear_lock(remote=None):
""" """
Clear update.lk Clear update.lk
``remote`` can either be a dictionary containing repo configuration ``remote`` can either be a dictionary containing repo configuration
information, or a pattern. If the latter, then remotes for which the URL information, or a pattern. If the latter, then remotes for which the URL
matches the pattern will be locked. matches the pattern will be locked.
""" """
def _do_clear_lock(repo): def _do_clear_lock(repo):
def _add_error(errlist, repo, exc): def _add_error(errlist, repo, exc):
msg = "Unable to remove update lock for {0} ({1}): {2} ".format( msg = "Unable to remove update lock for {} ({}): {} ".format(
repo["url"], repo["lockfile"], exc repo["url"], repo["lockfile"], exc
) )
log.debug(msg) log.debug(msg)
errlist.append(msg) errlist.append(msg)
success = [] success = []
failed = [] failed = []
if os.path.exists(repo["lockfile"]): if os.path.exists(repo["lockfile"]):
try: try:
os.remove(repo["lockfile"]) os.remove(repo["lockfile"])
skipping to change at line 434 skipping to change at line 424
# Somehow this path is a directory. Should never happen # Somehow this path is a directory. Should never happen
# unless some wiseguy manually creates a directory at this # unless some wiseguy manually creates a directory at this
# path, but just in case, handle it. # path, but just in case, handle it.
try: try:
shutil.rmtree(repo["lockfile"]) shutil.rmtree(repo["lockfile"])
except OSError as exc: except OSError as exc:
_add_error(failed, repo, exc) _add_error(failed, repo, exc)
else: else:
_add_error(failed, repo, exc) _add_error(failed, repo, exc)
else: else:
msg = "Removed lock for {0}".format(repo["url"]) msg = "Removed lock for {}".format(repo["url"])
log.debug(msg) log.debug(msg)
success.append(msg) success.append(msg)
return success, failed return success, failed
if isinstance(remote, dict): if isinstance(remote, dict):
return _do_clear_lock(remote) return _do_clear_lock(remote)
cleared = [] cleared = []
errors = [] errors = []
for repo in init(): for repo in init():
if remote: if remote:
try: try:
if not fnmatch.fnmatch(repo["url"], remote): if not fnmatch.fnmatch(repo["url"], remote):
continue continue
except TypeError: except TypeError:
# remote was non-string, try again # remote was non-string, try again
if not fnmatch.fnmatch(repo["url"], six.text_type(remote)): if not fnmatch.fnmatch(repo["url"], str(remote)):
continue continue
success, failed = _do_clear_lock(repo) success, failed = _do_clear_lock(repo)
cleared.extend(success) cleared.extend(success)
errors.extend(failed) errors.extend(failed)
return cleared, errors return cleared, errors
def lock(remote=None): def lock(remote=None):
""" """
Place an update.lk Place an update.lk
skipping to change at line 474 skipping to change at line 464
matches the pattern will be locked. matches the pattern will be locked.
""" """
def _do_lock(repo): def _do_lock(repo):
success = [] success = []
failed = [] failed = []
if not os.path.exists(repo["lockfile"]): if not os.path.exists(repo["lockfile"]):
try: try:
with salt.utils.files.fopen(repo["lockfile"], "w"): with salt.utils.files.fopen(repo["lockfile"], "w"):
pass pass
except (IOError, OSError) as exc: except OSError as exc:
msg = "Unable to set update lock for {0} ({1}): {2} ".format( msg = "Unable to set update lock for {} ({}): {} ".format(
repo["url"], repo["lockfile"], exc repo["url"], repo["lockfile"], exc
) )
log.debug(msg) log.debug(msg)
failed.append(msg) failed.append(msg)
else: else:
msg = "Set lock for {0}".format(repo["url"]) msg = "Set lock for {}".format(repo["url"])
log.debug(msg) log.debug(msg)
success.append(msg) success.append(msg)
return success, failed return success, failed
if isinstance(remote, dict): if isinstance(remote, dict):
return _do_lock(remote) return _do_lock(remote)
locked = [] locked = []
errors = [] errors = []
for repo in init(): for repo in init():
if remote: if remote:
try: try:
if not fnmatch.fnmatch(repo["url"], remote): if not fnmatch.fnmatch(repo["url"], remote):
continue continue
except TypeError: except TypeError:
# remote was non-string, try again # remote was non-string, try again
if not fnmatch.fnmatch(repo["url"], six.text_type(remote)): if not fnmatch.fnmatch(repo["url"], str(remote)):
continue continue
success, failed = _do_lock(repo) success, failed = _do_lock(repo)
locked.extend(success) locked.extend(success)
errors.extend(failed) errors.extend(failed)
return locked, errors return locked, errors
def update(): def update():
""" """
Execute an hg pull on all of the repos Execute an hg pull on all of the repos
skipping to change at line 577 skipping to change at line 567
__opts__["sock_dir"], __opts__["sock_dir"],
__opts__["transport"], __opts__["transport"],
opts=__opts__, opts=__opts__,
listen=False, listen=False,
) as event: ) as event:
event.fire_event(data, tagify(["hgfs", "update"], prefix="fileserver ")) event.fire_event(data, tagify(["hgfs", "update"], prefix="fileserver "))
try: try:
salt.fileserver.reap_fileserver_cache_dir( salt.fileserver.reap_fileserver_cache_dir(
os.path.join(__opts__["cachedir"], "hgfs/hash"), find_file os.path.join(__opts__["cachedir"], "hgfs/hash"), find_file
) )
except (IOError, OSError): except OSError:
# Hash file won't exist if no files have yet been served up # Hash file won't exist if no files have yet been served up
pass pass
def _env_is_exposed(env): def _env_is_exposed(env):
""" """
Check if an environment is exposed by comparing it against a whitelist and Check if an environment is exposed by comparing it against a whitelist and
blacklist. blacklist.
""" """
return salt.utils.stringutils.check_whitelist_blacklist( return salt.utils.stringutils.check_whitelist_blacklist(
env, env,
skipping to change at line 631 skipping to change at line 621
""" """
Find the first file to match the path and ref, read the file out of hg Find the first file to match the path and ref, read the file out of hg
and send the path to the newly cached file and send the path to the newly cached file
""" """
fnd = {"path": "", "rel": ""} fnd = {"path": "", "rel": ""}
if os.path.isabs(path) or tgt_env not in envs(): if os.path.isabs(path) or tgt_env not in envs():
return fnd return fnd
dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path) dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path)
hashes_glob = os.path.join( hashes_glob = os.path.join(
__opts__["cachedir"], "hgfs/hash", tgt_env, "{0}.hash.*".format(path) __opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.*".format(path)
) )
blobshadest = os.path.join( blobshadest = os.path.join(
__opts__["cachedir"], "hgfs/hash", tgt_env, "{0}.hash.blob_sha1".format( path) __opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.blob_sha1".format(p ath)
) )
lk_fn = os.path.join( lk_fn = os.path.join(
__opts__["cachedir"], "hgfs/hash", tgt_env, "{0}.lk".format(path) __opts__["cachedir"], "hgfs/hash", tgt_env, "{}.lk".format(path)
) )
destdir = os.path.dirname(dest) destdir = os.path.dirname(dest)
hashdir = os.path.dirname(blobshadest) hashdir = os.path.dirname(blobshadest)
if not os.path.isdir(destdir): if not os.path.isdir(destdir):
try: try:
os.makedirs(destdir) os.makedirs(destdir)
except OSError: except OSError:
# Path exists and is a file, remove it and retry # Path exists and is a file, remove it and retry
os.remove(destdir) os.remove(destdir)
os.makedirs(destdir) os.makedirs(destdir)
skipping to change at line 679 skipping to change at line 669
salt.fileserver.wait_lock(lk_fn, dest) salt.fileserver.wait_lock(lk_fn, dest)
if os.path.isfile(blobshadest) and os.path.isfile(dest): if os.path.isfile(blobshadest) and os.path.isfile(dest):
with salt.utils.files.fopen(blobshadest, "r") as fp_: with salt.utils.files.fopen(blobshadest, "r") as fp_:
sha = fp_.read() sha = fp_.read()
if sha == ref[2]: if sha == ref[2]:
fnd["rel"] = path fnd["rel"] = path
fnd["path"] = dest fnd["path"] = dest
repo["repo"].close() repo["repo"].close()
return fnd return fnd
try: try:
repo["repo"].cat(["path:{0}".format(repo_path)], rev=ref[2], output= dest) repo["repo"].cat(["path:{}".format(repo_path)], rev=ref[2], output=d est)
except hglib.error.CommandError: except hglib.error.CommandError:
repo["repo"].close() repo["repo"].close()
continue continue
with salt.utils.files.fopen(lk_fn, "w"): with salt.utils.files.fopen(lk_fn, "w"):
pass pass
for filename in glob.glob(hashes_glob): for filename in glob.glob(hashes_glob):
try: try:
os.remove(filename) os.remove(filename)
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
pass pass
with salt.utils.files.fopen(blobshadest, "w+") as fp_: with salt.utils.files.fopen(blobshadest, "w+") as fp_:
fp_.write(ref[2]) fp_.write(ref[2])
try: try:
os.remove(lk_fn) os.remove(lk_fn)
except (OSError, IOError): except OSError:
pass pass
fnd["rel"] = path fnd["rel"] = path
fnd["path"] = dest fnd["path"] = dest
try: try:
# Converting the stat result to a list, the elements of the # Converting the stat result to a list, the elements of the
# list correspond to the following stat_result params: # list correspond to the following stat_result params:
# 0 => st_mode=33188 # 0 => st_mode=33188
# 1 => st_ino=10227377 # 1 => st_ino=10227377
# 2 => st_dev=65026 # 2 => st_dev=65026
# 3 => st_nlink=1 # 3 => st_nlink=1
skipping to change at line 762 skipping to change at line 752
if not all(x in load for x in ("path", "saltenv")): if not all(x in load for x in ("path", "saltenv")):
return "" return ""
ret = {"hash_type": __opts__["hash_type"]} ret = {"hash_type": __opts__["hash_type"]}
relpath = fnd["rel"] relpath = fnd["rel"]
path = fnd["path"] path = fnd["path"]
hashdest = os.path.join( hashdest = os.path.join(
__opts__["cachedir"], __opts__["cachedir"],
"hgfs/hash", "hgfs/hash",
load["saltenv"], load["saltenv"],
"{0}.hash.{1}".format(relpath, __opts__["hash_type"]), "{}.hash.{}".format(relpath, __opts__["hash_type"]),
) )
if not os.path.isfile(hashdest): if not os.path.isfile(hashdest):
ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"]) ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
with salt.utils.files.fopen(hashdest, "w+") as fp_: with salt.utils.files.fopen(hashdest, "w+") as fp_:
fp_.write(ret["hsum"]) fp_.write(ret["hsum"])
return ret return ret
else: else:
with salt.utils.files.fopen(hashdest, "rb") as fp_: with salt.utils.files.fopen(hashdest, "rb") as fp_:
ret["hsum"] = salt.utils.stringutils.to_unicode(fp_.read()) ret["hsum"] = salt.utils.stringutils.to_unicode(fp_.read())
return ret return ret
skipping to change at line 789 skipping to change at line 779
# "env" is not supported; Use "saltenv". # "env" is not supported; Use "saltenv".
load.pop("env") load.pop("env")
list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs") list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs")
if not os.path.isdir(list_cachedir): if not os.path.isdir(list_cachedir):
try: try:
os.makedirs(list_cachedir) os.makedirs(list_cachedir)
except os.error: except os.error:
log.critical("Unable to make cachedir %s", list_cachedir) log.critical("Unable to make cachedir %s", list_cachedir)
return [] return []
list_cache = os.path.join(list_cachedir, "{0}.p".format(load["saltenv"])) list_cache = os.path.join(list_cachedir, "{}.p".format(load["saltenv"]))
w_lock = os.path.join(list_cachedir, ".{0}.w".format(load["saltenv"])) w_lock = os.path.join(list_cachedir, ".{}.w".format(load["saltenv"]))
cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cac he( cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cac he(
__opts__, form, list_cache, w_lock __opts__, form, list_cache, w_lock
) )
if cache_match is not None: if cache_match is not None:
return cache_match return cache_match
if refresh_cache: if refresh_cache:
ret = {} ret = {}
ret["files"] = _get_file_list(load) ret["files"] = _get_file_list(load)
ret["dirs"] = _get_dir_list(load) ret["dirs"] = _get_dir_list(load)
if save_cache: if save_cache:
 End of changes. 28 change blocks. 
41 lines changed or deleted 31 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)