"Fossies" - the Fresh Open Source Software Archive 
Member "salt-3002.2/salt/fileserver/hgfs.py" (18 Nov 2020, 29247 Bytes) of package /linux/misc/salt-3002.2.tar.gz:
As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style:
standard) with prefixed line numbers.
Alternatively you can here
view or
download the uninterpreted source code file.
For more information about "hgfs.py" see the
Fossies "Dox" file reference documentation and the latest
Fossies "Diffs" side-by-side code changes report:
3002.1_vs_3002.2.
1 """
2 Mercurial Fileserver Backend
3
4 To enable, add ``hgfs`` to the :conf_master:`fileserver_backend` option in the
5 Master config file.
6
7 .. code-block:: yaml
8
9 fileserver_backend:
10 - hgfs
11
12 .. note::
13 ``hg`` also works here. Prior to the 2018.3.0 release, *only* ``hg`` would
14 work.
15
16 After enabling this backend, branches, bookmarks, and tags in a remote
17 mercurial repository are exposed to salt as different environments. This
18 feature is managed by the :conf_master:`fileserver_backend` option in the salt
19 master config file.
20
21 This fileserver has an additional option :conf_master:`hgfs_branch_method` that
22 will set the desired branch method. Possible values are: ``branches``,
23 ``bookmarks``, or ``mixed``. If using ``branches`` or ``mixed``, the
24 ``default`` branch will be mapped to ``base``.
25
26
27 .. versionchanged:: 2014.1.0
28 The :conf_master:`hgfs_base` master config parameter was added, allowing
29 for a branch other than ``default`` to be used for the ``base``
30 environment, and allowing for a ``base`` environment to be specified when
31 using an :conf_master:`hgfs_branch_method` of ``bookmarks``.
32
33
34 :depends: - mercurial
35 - python bindings for mercurial (``python-hglib``)
36 """
37
38
39 import copy
40 import errno
41 import fnmatch
42 import glob
43 import hashlib
44 import logging
45 import os
46 import shutil
47 from datetime import datetime
48
49 import salt.fileserver
50 import salt.utils.data
51 import salt.utils.files
52 import salt.utils.gzip_util
53 import salt.utils.hashutils
54 import salt.utils.stringutils
55 import salt.utils.url
56 import salt.utils.versions
57 from salt.exceptions import FileserverConfigError
58 from salt.ext import six
59 from salt.utils.event import tagify
60
61 VALID_BRANCH_METHODS = ("branches", "bookmarks", "mixed")
62 PER_REMOTE_OVERRIDES = ("base", "branch_method", "mountpoint", "root")
63
64
65 # pylint: disable=import-error
66 try:
67 import hglib
68
69 HAS_HG = True
70 except ImportError:
71 HAS_HG = False
72 # pylint: enable=import-error
73
74
75 log = logging.getLogger(__name__)
76
77 # Define the module's virtual name
78 __virtualname__ = "hgfs"
79 __virtual_aliases__ = ("hg",)
80
81
82 def __virtual__():
83 """
84 Only load if mercurial is available
85 """
86 if __virtualname__ not in __opts__["fileserver_backend"]:
87 return False
88 if not HAS_HG:
89 log.error(
90 "Mercurial fileserver backend is enabled in configuration "
91 "but could not be loaded, is hglib installed?"
92 )
93 return False
94 if __opts__["hgfs_branch_method"] not in VALID_BRANCH_METHODS:
95 log.error(
96 "Invalid hgfs_branch_method '%s'. Valid methods are: %s",
97 __opts__["hgfs_branch_method"],
98 VALID_BRANCH_METHODS,
99 )
100 return False
101 return __virtualname__
102
103
104 def _all_branches(repo):
105 """
106 Returns all branches for the specified repo
107 """
108 # repo.branches() returns a list of 3-tuples consisting of
109 # (branch name, rev #, nodeid)
110 # Example: [('default', 4, '7c96229269fa')]
111 return repo.branches()
112
113
114 def _get_branch(repo, name):
115 """
116 Find the requested branch in the specified repo
117 """
118 try:
119 return [x for x in _all_branches(repo) if x[0] == name][0]
120 except IndexError:
121 return False
122
123
124 def _all_bookmarks(repo):
125 """
126 Returns all bookmarks for the specified repo
127 """
128 # repo.bookmarks() returns a tuple containing the following:
129 # 1. A list of 3-tuples consisting of (bookmark name, rev #, nodeid)
130 # 2. The index of the current bookmark (-1 if no current one)
131 # Example: ([('mymark', 4, '7c96229269fa')], -1)
132 return repo.bookmarks()[0]
133
134
135 def _get_bookmark(repo, name):
136 """
137 Find the requested bookmark in the specified repo
138 """
139 try:
140 return [x for x in _all_bookmarks(repo) if x[0] == name][0]
141 except IndexError:
142 return False
143
144
145 def _all_tags(repo):
146 """
147 Returns all tags for the specified repo
148 """
149 # repo.tags() returns a list of 4-tuples consisting of
150 # (tag name, rev #, nodeid, islocal)
151 # Example: [('1.0', 3, '3be15e71b31a', False),
152 # ('tip', 4, '7c96229269fa', False)]
153 # Avoid returning the special 'tip' tag.
154 return [x for x in repo.tags() if x[0] != "tip"]
155
156
157 def _get_tag(repo, name):
158 """
159 Find the requested tag in the specified repo
160 """
161 try:
162 return [x for x in _all_tags(repo) if x[0] == name][0]
163 except IndexError:
164 return False
165
166
167 def _get_ref(repo, name):
168 """
169 Return ref tuple if ref is in the repo.
170 """
171 if name == "base":
172 name = repo["base"]
173 if name == repo["base"] or name in envs():
174 if repo["branch_method"] == "branches":
175 return _get_branch(repo["repo"], name) or _get_tag(repo["repo"], name)
176 elif repo["branch_method"] == "bookmarks":
177 return _get_bookmark(repo["repo"], name) or _get_tag(repo["repo"], name)
178 elif repo["branch_method"] == "mixed":
179 return (
180 _get_branch(repo["repo"], name)
181 or _get_bookmark(repo["repo"], name)
182 or _get_tag(repo["repo"], name)
183 )
184 return False
185
186
187 def _failhard():
188 """
189 Fatal fileserver configuration issue, raise an exception
190 """
191 raise FileserverConfigError("Failed to load hg fileserver backend")
192
193
194 def init():
195 """
196 Return a list of hglib objects for the various hgfs remotes
197 """
198 bp_ = os.path.join(__opts__["cachedir"], "hgfs")
199 new_remote = False
200 repos = []
201
202 per_remote_defaults = {}
203 for param in PER_REMOTE_OVERRIDES:
204 per_remote_defaults[param] = str(__opts__["hgfs_{}".format(param)])
205
206 for remote in __opts__["hgfs_remotes"]:
207 repo_conf = copy.deepcopy(per_remote_defaults)
208 if isinstance(remote, dict):
209 repo_url = next(iter(remote))
210 per_remote_conf = {
211 key: str(val)
212 for key, val in salt.utils.data.repack_dictlist(
213 remote[repo_url]
214 ).items()
215 }
216 if not per_remote_conf:
217 log.error(
218 "Invalid per-remote configuration for hgfs remote %s. If "
219 "no per-remote parameters are being specified, there may "
220 "be a trailing colon after the URL, which should be "
221 "removed. Check the master configuration file.",
222 repo_url,
223 )
224 _failhard()
225
226 branch_method = per_remote_conf.get(
227 "branch_method", per_remote_defaults["branch_method"]
228 )
229 if branch_method not in VALID_BRANCH_METHODS:
230 log.error(
231 "Invalid branch_method '%s' for remote %s. Valid "
232 "branch methods are: %s. This remote will be ignored.",
233 branch_method,
234 repo_url,
235 ", ".join(VALID_BRANCH_METHODS),
236 )
237 _failhard()
238
239 per_remote_errors = False
240 for param in (x for x in per_remote_conf if x not in PER_REMOTE_OVERRIDES):
241 log.error(
242 "Invalid configuration parameter '%s' for remote %s. "
243 "Valid parameters are: %s. See the documentation for "
244 "further information.",
245 param,
246 repo_url,
247 ", ".join(PER_REMOTE_OVERRIDES),
248 )
249 per_remote_errors = True
250 if per_remote_errors:
251 _failhard()
252
253 repo_conf.update(per_remote_conf)
254 else:
255 repo_url = remote
256
257 if not isinstance(repo_url, str):
258 log.error(
259 "Invalid hgfs remote %s. Remotes must be strings, you may "
260 "need to enclose the URL in quotes",
261 repo_url,
262 )
263 _failhard()
264
265 try:
266 repo_conf["mountpoint"] = salt.utils.url.strip_proto(
267 repo_conf["mountpoint"]
268 )
269 except TypeError:
270 # mountpoint not specified
271 pass
272
273 hash_type = getattr(hashlib, __opts__.get("hash_type", "md5"))
274 repo_hash = hash_type(repo_url.encode("utf-8")).hexdigest()
275 rp_ = os.path.join(bp_, repo_hash)
276 if not os.path.isdir(rp_):
277 os.makedirs(rp_)
278
279 if not os.listdir(rp_):
280 # Only init if the directory is empty.
281 hglib.init(rp_)
282 new_remote = True
283 try:
284 repo = hglib.open(rp_)
285 except hglib.error.ServerError:
286 log.error(
287 "Cache path %s (corresponding remote: %s) exists but is not "
288 "a valid mercurial repository. You will need to manually "
289 "delete this directory on the master to continue to use this "
290 "hgfs remote.",
291 rp_,
292 repo_url,
293 )
294 _failhard()
295 except Exception as exc: # pylint: disable=broad-except
296 log.error(
297 "Exception '%s' encountered while initializing hgfs " "remote %s",
298 exc,
299 repo_url,
300 )
301 _failhard()
302
303 try:
304 refs = repo.config(names=b"paths")
305 except hglib.error.CommandError:
306 refs = None
307
308 # Do NOT put this if statement inside the except block above. Earlier
309 # versions of hglib did not raise an exception, so we need to do it
310 # this way to support both older and newer hglib.
311 if not refs:
312 # Write an hgrc defining the remote URL
313 hgconfpath = os.path.join(rp_, ".hg", "hgrc")
314 with salt.utils.files.fopen(hgconfpath, "w+") as hgconfig:
315 hgconfig.write("[paths]\n")
316 hgconfig.write(
317 salt.utils.stringutils.to_str("default = {}\n".format(repo_url))
318 )
319
320 repo_conf.update(
321 {
322 "repo": repo,
323 "url": repo_url,
324 "hash": repo_hash,
325 "cachedir": rp_,
326 "lockfile": os.path.join(
327 __opts__["cachedir"], "hgfs", "{}.update.lk".format(repo_hash)
328 ),
329 }
330 )
331 repos.append(repo_conf)
332 repo.close()
333
334 if new_remote:
335 remote_map = os.path.join(__opts__["cachedir"], "hgfs/remote_map.txt")
336 try:
337 with salt.utils.files.fopen(remote_map, "w+") as fp_:
338 timestamp = datetime.now().strftime("%d %b %Y %H:%M:%S.%f")
339 fp_.write("# hgfs_remote map as of {}\n".format(timestamp))
340 for repo in repos:
341 fp_.write(
342 salt.utils.stringutils.to_str(
343 "{} = {}\n".format(repo["hash"], repo["url"])
344 )
345 )
346 except OSError:
347 pass
348 else:
349 log.info("Wrote new hgfs_remote map to %s", remote_map)
350
351 return repos
352
353
354 def _clear_old_remotes():
355 """
356 Remove cache directories for remotes no longer configured
357 """
358 bp_ = os.path.join(__opts__["cachedir"], "hgfs")
359 try:
360 cachedir_ls = os.listdir(bp_)
361 except OSError:
362 cachedir_ls = []
363 repos = init()
364 # Remove actively-used remotes from list
365 for repo in repos:
366 try:
367 cachedir_ls.remove(repo["hash"])
368 except ValueError:
369 pass
370 to_remove = []
371 for item in cachedir_ls:
372 if item in ("hash", "refs"):
373 continue
374 path = os.path.join(bp_, item)
375 if os.path.isdir(path):
376 to_remove.append(path)
377 failed = []
378 if to_remove:
379 for rdir in to_remove:
380 try:
381 shutil.rmtree(rdir)
382 except OSError as exc:
383 log.error("Unable to remove old hgfs remote cachedir %s: %s", rdir, exc)
384 failed.append(rdir)
385 else:
386 log.debug("hgfs removed old cachedir %s", rdir)
387 for fdir in failed:
388 to_remove.remove(fdir)
389 return bool(to_remove), repos
390
391
392 def clear_cache():
393 """
394 Completely clear hgfs cache
395 """
396 fsb_cachedir = os.path.join(__opts__["cachedir"], "hgfs")
397 list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs")
398 errors = []
399 for rdir in (fsb_cachedir, list_cachedir):
400 if os.path.exists(rdir):
401 try:
402 shutil.rmtree(rdir)
403 except OSError as exc:
404 errors.append("Unable to delete {}: {}".format(rdir, exc))
405 return errors
406
407
408 def clear_lock(remote=None):
409 """
410 Clear update.lk
411
412 ``remote`` can either be a dictionary containing repo configuration
413 information, or a pattern. If the latter, then remotes for which the URL
414 matches the pattern will be locked.
415 """
416
417 def _do_clear_lock(repo):
418 def _add_error(errlist, repo, exc):
419 msg = "Unable to remove update lock for {} ({}): {} ".format(
420 repo["url"], repo["lockfile"], exc
421 )
422 log.debug(msg)
423 errlist.append(msg)
424
425 success = []
426 failed = []
427 if os.path.exists(repo["lockfile"]):
428 try:
429 os.remove(repo["lockfile"])
430 except OSError as exc:
431 if exc.errno == errno.EISDIR:
432 # Somehow this path is a directory. Should never happen
433 # unless some wiseguy manually creates a directory at this
434 # path, but just in case, handle it.
435 try:
436 shutil.rmtree(repo["lockfile"])
437 except OSError as exc:
438 _add_error(failed, repo, exc)
439 else:
440 _add_error(failed, repo, exc)
441 else:
442 msg = "Removed lock for {}".format(repo["url"])
443 log.debug(msg)
444 success.append(msg)
445 return success, failed
446
447 if isinstance(remote, dict):
448 return _do_clear_lock(remote)
449
450 cleared = []
451 errors = []
452 for repo in init():
453 if remote:
454 try:
455 if not fnmatch.fnmatch(repo["url"], remote):
456 continue
457 except TypeError:
458 # remote was non-string, try again
459 if not fnmatch.fnmatch(repo["url"], str(remote)):
460 continue
461 success, failed = _do_clear_lock(repo)
462 cleared.extend(success)
463 errors.extend(failed)
464 return cleared, errors
465
466
467 def lock(remote=None):
468 """
469 Place an update.lk
470
471 ``remote`` can either be a dictionary containing repo configuration
472 information, or a pattern. If the latter, then remotes for which the URL
473 matches the pattern will be locked.
474 """
475
476 def _do_lock(repo):
477 success = []
478 failed = []
479 if not os.path.exists(repo["lockfile"]):
480 try:
481 with salt.utils.files.fopen(repo["lockfile"], "w"):
482 pass
483 except OSError as exc:
484 msg = "Unable to set update lock for {} ({}): {} ".format(
485 repo["url"], repo["lockfile"], exc
486 )
487 log.debug(msg)
488 failed.append(msg)
489 else:
490 msg = "Set lock for {}".format(repo["url"])
491 log.debug(msg)
492 success.append(msg)
493 return success, failed
494
495 if isinstance(remote, dict):
496 return _do_lock(remote)
497
498 locked = []
499 errors = []
500 for repo in init():
501 if remote:
502 try:
503 if not fnmatch.fnmatch(repo["url"], remote):
504 continue
505 except TypeError:
506 # remote was non-string, try again
507 if not fnmatch.fnmatch(repo["url"], str(remote)):
508 continue
509 success, failed = _do_lock(repo)
510 locked.extend(success)
511 errors.extend(failed)
512
513 return locked, errors
514
515
516 def update():
517 """
518 Execute an hg pull on all of the repos
519 """
520 # data for the fileserver event
521 data = {"changed": False, "backend": "hgfs"}
522 # _clear_old_remotes runs init(), so use the value from there to avoid a
523 # second init()
524 data["changed"], repos = _clear_old_remotes()
525 for repo in repos:
526 if os.path.exists(repo["lockfile"]):
527 log.warning(
528 "Update lockfile is present for hgfs remote %s, skipping. "
529 "If this warning persists, it is possible that the update "
530 "process was interrupted. Removing %s or running "
531 "'salt-run fileserver.clear_lock hgfs' will allow updates "
532 "to continue for this remote.",
533 repo["url"],
534 repo["lockfile"],
535 )
536 continue
537 _, errors = lock(repo)
538 if errors:
539 log.error(
540 "Unable to set update lock for hgfs remote %s, skipping.", repo["url"]
541 )
542 continue
543 log.debug("hgfs is fetching from %s", repo["url"])
544 repo["repo"].open()
545 curtip = repo["repo"].tip()
546 try:
547 repo["repo"].pull()
548 except Exception as exc: # pylint: disable=broad-except
549 log.error(
550 "Exception %s caught while updating hgfs remote %s",
551 exc,
552 repo["url"],
553 exc_info_on_loglevel=logging.DEBUG,
554 )
555 else:
556 newtip = repo["repo"].tip()
557 if curtip[1] != newtip[1]:
558 data["changed"] = True
559 repo["repo"].close()
560 clear_lock(repo)
561
562 env_cache = os.path.join(__opts__["cachedir"], "hgfs/envs.p")
563 if data.get("changed", False) is True or not os.path.isfile(env_cache):
564 env_cachedir = os.path.dirname(env_cache)
565 if not os.path.exists(env_cachedir):
566 os.makedirs(env_cachedir)
567 new_envs = envs(ignore_cache=True)
568 serial = salt.payload.Serial(__opts__)
569 with salt.utils.files.fopen(env_cache, "wb+") as fp_:
570 fp_.write(serial.dumps(new_envs))
571 log.trace("Wrote env cache data to %s", env_cache)
572
573 # if there is a change, fire an event
574 if __opts__.get("fileserver_events", False):
575 with salt.utils.event.get_event(
576 "master",
577 __opts__["sock_dir"],
578 __opts__["transport"],
579 opts=__opts__,
580 listen=False,
581 ) as event:
582 event.fire_event(data, tagify(["hgfs", "update"], prefix="fileserver"))
583 try:
584 salt.fileserver.reap_fileserver_cache_dir(
585 os.path.join(__opts__["cachedir"], "hgfs/hash"), find_file
586 )
587 except OSError:
588 # Hash file won't exist if no files have yet been served up
589 pass
590
591
592 def _env_is_exposed(env):
593 """
594 Check if an environment is exposed by comparing it against a whitelist and
595 blacklist.
596 """
597 return salt.utils.stringutils.check_whitelist_blacklist(
598 env,
599 whitelist=__opts__["hgfs_saltenv_whitelist"],
600 blacklist=__opts__["hgfs_saltenv_blacklist"],
601 )
602
603
604 def envs(ignore_cache=False):
605 """
606 Return a list of refs that can be used as environments
607 """
608 if not ignore_cache:
609 env_cache = os.path.join(__opts__["cachedir"], "hgfs/envs.p")
610 cache_match = salt.fileserver.check_env_cache(__opts__, env_cache)
611 if cache_match is not None:
612 return cache_match
613 ret = set()
614 for repo in init():
615 repo["repo"].open()
616 if repo["branch_method"] in ("branches", "mixed"):
617 for branch in _all_branches(repo["repo"]):
618 branch_name = branch[0]
619 if branch_name == repo["base"]:
620 branch_name = "base"
621 ret.add(branch_name)
622 if repo["branch_method"] in ("bookmarks", "mixed"):
623 for bookmark in _all_bookmarks(repo["repo"]):
624 bookmark_name = bookmark[0]
625 if bookmark_name == repo["base"]:
626 bookmark_name = "base"
627 ret.add(bookmark_name)
628 ret.update([x[0] for x in _all_tags(repo["repo"])])
629 repo["repo"].close()
630 return [x for x in sorted(ret) if _env_is_exposed(x)]
631
632
633 def find_file(path, tgt_env="base", **kwargs): # pylint: disable=W0613
634 """
635 Find the first file to match the path and ref, read the file out of hg
636 and send the path to the newly cached file
637 """
638 fnd = {"path": "", "rel": ""}
639 if os.path.isabs(path) or tgt_env not in envs():
640 return fnd
641
642 dest = os.path.join(__opts__["cachedir"], "hgfs/refs", tgt_env, path)
643 hashes_glob = os.path.join(
644 __opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.*".format(path)
645 )
646 blobshadest = os.path.join(
647 __opts__["cachedir"], "hgfs/hash", tgt_env, "{}.hash.blob_sha1".format(path)
648 )
649 lk_fn = os.path.join(
650 __opts__["cachedir"], "hgfs/hash", tgt_env, "{}.lk".format(path)
651 )
652 destdir = os.path.dirname(dest)
653 hashdir = os.path.dirname(blobshadest)
654 if not os.path.isdir(destdir):
655 try:
656 os.makedirs(destdir)
657 except OSError:
658 # Path exists and is a file, remove it and retry
659 os.remove(destdir)
660 os.makedirs(destdir)
661 if not os.path.isdir(hashdir):
662 try:
663 os.makedirs(hashdir)
664 except OSError:
665 # Path exists and is a file, remove it and retry
666 os.remove(hashdir)
667 os.makedirs(hashdir)
668
669 for repo in init():
670 if repo["mountpoint"] and not path.startswith(repo["mountpoint"] + os.path.sep):
671 continue
672 repo_path = path[len(repo["mountpoint"]) :].lstrip(os.path.sep)
673 if repo["root"]:
674 repo_path = os.path.join(repo["root"], repo_path)
675
676 repo["repo"].open()
677 ref = _get_ref(repo, tgt_env)
678 if not ref:
679 # Branch or tag not found in repo, try the next
680 repo["repo"].close()
681 continue
682 salt.fileserver.wait_lock(lk_fn, dest)
683 if os.path.isfile(blobshadest) and os.path.isfile(dest):
684 with salt.utils.files.fopen(blobshadest, "r") as fp_:
685 sha = fp_.read()
686 if sha == ref[2]:
687 fnd["rel"] = path
688 fnd["path"] = dest
689 repo["repo"].close()
690 return fnd
691 try:
692 repo["repo"].cat(["path:{}".format(repo_path)], rev=ref[2], output=dest)
693 except hglib.error.CommandError:
694 repo["repo"].close()
695 continue
696 with salt.utils.files.fopen(lk_fn, "w"):
697 pass
698 for filename in glob.glob(hashes_glob):
699 try:
700 os.remove(filename)
701 except Exception: # pylint: disable=broad-except
702 pass
703 with salt.utils.files.fopen(blobshadest, "w+") as fp_:
704 fp_.write(ref[2])
705 try:
706 os.remove(lk_fn)
707 except OSError:
708 pass
709 fnd["rel"] = path
710 fnd["path"] = dest
711 try:
712 # Converting the stat result to a list, the elements of the
713 # list correspond to the following stat_result params:
714 # 0 => st_mode=33188
715 # 1 => st_ino=10227377
716 # 2 => st_dev=65026
717 # 3 => st_nlink=1
718 # 4 => st_uid=1000
719 # 5 => st_gid=1000
720 # 6 => st_size=1056233
721 # 7 => st_atime=1468284229
722 # 8 => st_mtime=1456338235
723 # 9 => st_ctime=1456338235
724 fnd["stat"] = list(os.stat(dest))
725 except Exception: # pylint: disable=broad-except
726 pass
727 repo["repo"].close()
728 return fnd
729 return fnd
730
731
732 def serve_file(load, fnd):
733 """
734 Return a chunk from a file based on the data received
735 """
736 if "env" in load:
737 # "env" is not supported; Use "saltenv".
738 load.pop("env")
739
740 ret = {"data": "", "dest": ""}
741 if not all(x in load for x in ("path", "loc", "saltenv")):
742 return ret
743 if not fnd["path"]:
744 return ret
745 ret["dest"] = fnd["rel"]
746 gzip = load.get("gzip", None)
747 fpath = os.path.normpath(fnd["path"])
748 with salt.utils.files.fopen(fpath, "rb") as fp_:
749 fp_.seek(load["loc"])
750 data = fp_.read(__opts__["file_buffer_size"])
751 if data and six.PY3 and not salt.utils.files.is_binary(fpath):
752 data = data.decode(__salt_system_encoding__)
753 if gzip and data:
754 data = salt.utils.gzip_util.compress(data, gzip)
755 ret["gzip"] = gzip
756 ret["data"] = data
757 return ret
758
759
760 def file_hash(load, fnd):
761 """
762 Return a file hash, the hash type is set in the master config file
763 """
764 if "env" in load:
765 # "env" is not supported; Use "saltenv".
766 load.pop("env")
767
768 if not all(x in load for x in ("path", "saltenv")):
769 return ""
770 ret = {"hash_type": __opts__["hash_type"]}
771 relpath = fnd["rel"]
772 path = fnd["path"]
773 hashdest = os.path.join(
774 __opts__["cachedir"],
775 "hgfs/hash",
776 load["saltenv"],
777 "{}.hash.{}".format(relpath, __opts__["hash_type"]),
778 )
779 if not os.path.isfile(hashdest):
780 ret["hsum"] = salt.utils.hashutils.get_hash(path, __opts__["hash_type"])
781 with salt.utils.files.fopen(hashdest, "w+") as fp_:
782 fp_.write(ret["hsum"])
783 return ret
784 else:
785 with salt.utils.files.fopen(hashdest, "rb") as fp_:
786 ret["hsum"] = salt.utils.stringutils.to_unicode(fp_.read())
787 return ret
788
789
790 def _file_lists(load, form):
791 """
792 Return a dict containing the file lists for files and dirs
793 """
794 if "env" in load:
795 # "env" is not supported; Use "saltenv".
796 load.pop("env")
797
798 list_cachedir = os.path.join(__opts__["cachedir"], "file_lists/hgfs")
799 if not os.path.isdir(list_cachedir):
800 try:
801 os.makedirs(list_cachedir)
802 except os.error:
803 log.critical("Unable to make cachedir %s", list_cachedir)
804 return []
805 list_cache = os.path.join(list_cachedir, "{}.p".format(load["saltenv"]))
806 w_lock = os.path.join(list_cachedir, ".{}.w".format(load["saltenv"]))
807 cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache(
808 __opts__, form, list_cache, w_lock
809 )
810 if cache_match is not None:
811 return cache_match
812 if refresh_cache:
813 ret = {}
814 ret["files"] = _get_file_list(load)
815 ret["dirs"] = _get_dir_list(load)
816 if save_cache:
817 salt.fileserver.write_file_list_cache(__opts__, ret, list_cache, w_lock)
818 return ret.get(form, [])
819 # Shouldn't get here, but if we do, this prevents a TypeError
820 return []
821
822
823 def file_list(load):
824 """
825 Return a list of all files on the file server in a specified environment
826 """
827 return _file_lists(load, "files")
828
829
830 def _get_file_list(load):
831 """
832 Get a list of all files on the file server in a specified environment
833 """
834 if "env" in load:
835 # "env" is not supported; Use "saltenv".
836 load.pop("env")
837
838 if "saltenv" not in load or load["saltenv"] not in envs():
839 return []
840 ret = set()
841 for repo in init():
842 repo["repo"].open()
843 ref = _get_ref(repo, load["saltenv"])
844 if ref:
845 manifest = repo["repo"].manifest(rev=ref[1])
846 for tup in manifest:
847 relpath = os.path.relpath(tup[4], repo["root"])
848 # Don't add files outside the hgfs_root
849 if not relpath.startswith("../"):
850 ret.add(os.path.join(repo["mountpoint"], relpath))
851 repo["repo"].close()
852 return sorted(ret)
853
854
855 def file_list_emptydirs(load): # pylint: disable=W0613
856 """
857 Return a list of all empty directories on the master
858 """
859 # Cannot have empty dirs in hg
860 return []
861
862
863 def dir_list(load):
864 """
865 Return a list of all directories on the master
866 """
867 return _file_lists(load, "dirs")
868
869
870 def _get_dir_list(load):
871 """
872 Get a list of all directories on the master
873 """
874 if "env" in load:
875 # "env" is not supported; Use "saltenv".
876 load.pop("env")
877
878 if "saltenv" not in load or load["saltenv"] not in envs():
879 return []
880 ret = set()
881 for repo in init():
882 repo["repo"].open()
883 ref = _get_ref(repo, load["saltenv"])
884 if ref:
885 manifest = repo["repo"].manifest(rev=ref[1])
886 for tup in manifest:
887 filepath = tup[4]
888 split = filepath.rsplit("/", 1)
889 while len(split) > 1:
890 relpath = os.path.relpath(split[0], repo["root"])
891 # Don't add '.'
892 if relpath != ".":
893 # Don't add files outside the hgfs_root
894 if not relpath.startswith("../"):
895 ret.add(os.path.join(repo["mountpoint"], relpath))
896 split = split[0].rsplit("/", 1)
897 repo["repo"].close()
898 if repo["mountpoint"]:
899 ret.add(repo["mountpoint"])
900 return sorted(ret)