"Fossies" - the Fresh Open Source Software Archive 
Member "salt-3002.2/salt/states/zfs.py" (18 Nov 2020, 34619 Bytes) of package /linux/misc/salt-3002.2.tar.gz:
As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style:
standard) with prefixed line numbers.
Alternatively you can here
view or
download the uninterpreted source code file.
For more information about "zfs.py" see the
Fossies "Dox" file reference documentation and the latest
Fossies "Diffs" side-by-side code changes report:
3002.1_vs_3002.2.
1 """
2 States for managing zfs datasets
3
4 :maintainer: Jorge Schrauwen <sjorge@blackdot.be>
5 :maturity: new
6 :depends: salt.utils.zfs, salt.modules.zfs
7 :platform: smartos, illumos, solaris, freebsd, linux
8
9 .. versionadded:: 2016.3.0
10 .. versionchanged:: 2018.3.1
11 Big refactor to remove duplicate code, better type conversions and improved
12 consistency in output.
13
14 .. code-block:: yaml
15
16 test/shares/yuki:
17 zfs.filesystem_present:
18 - create_parent: true
19 - properties:
20 quota: 16G
21
22 test/iscsi/haruhi:
23 zfs.volume_present:
24 - create_parent: true
25 - volume_size: 16M
26 - sparse: true
27 - properties:
28 readonly: on
29
30 test/shares/yuki@frozen:
31 zfs.snapshot_present
32
33 moka_origin:
34 zfs.hold_present:
35 - snapshot: test/shares/yuki@frozen
36
37 test/shares/moka:
38 zfs.filesystem_present:
39 - cloned_from: test/shares/yuki@frozen
40
41 test/shares/moka@tsukune:
42 zfs.snapshot_absent
43
44 """
45
46 import logging
47 from datetime import datetime
48
49 from salt.utils.odict import OrderedDict
50
51 log = logging.getLogger(__name__)
52
53 # Define the state's virtual name
54 __virtualname__ = "zfs"
55
56 # Compare modifiers for zfs.schedule_snapshot
57 comp_hour = {"minute": 0}
58 comp_day = {"minute": 0, "hour": 0}
59 comp_month = {"minute": 0, "hour": 0, "day": 1}
60 comp_year = {"minute": 0, "hour": 0, "day": 1, "month": 1}
61
62
63 def __virtual__():
64 """
65 Provides zfs state
66 """
67 if not __grains__.get("zfs_support"):
68 return False, "The zfs state cannot be loaded: zfs not supported"
69 return __virtualname__
70
71
72 def _absent(name, dataset_type, force=False, recursive=False):
73 """
74 internal shared function for *_absent
75
76 name : string
77 name of dataset
78 dataset_type : string [filesystem, volume, snapshot, or bookmark]
79 type of dataset to remove
80 force : boolean
81 try harder to destroy the dataset
82 recursive : boolean
83 also destroy all the child datasets
84
85 """
86 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
87
88 ## log configuration
89 dataset_type = dataset_type.lower()
90 log.debug("zfs.%s_absent::%s::config::force = %s", dataset_type, name, force)
91 log.debug(
92 "zfs.%s_absent::%s::config::recursive = %s", dataset_type, name, recursive
93 )
94
95 ## destroy dataset if needed
96 if __salt__["zfs.exists"](name, **{"type": dataset_type}):
97 ## NOTE: dataset found with the name and dataset_type
98 if not __opts__["test"]:
99 mod_res = __salt__["zfs.destroy"](
100 name, **{"force": force, "recursive": recursive}
101 )
102 else:
103 mod_res = OrderedDict([("destroyed", True)])
104
105 ret["result"] = mod_res["destroyed"]
106 if ret["result"]:
107 ret["changes"][name] = "destroyed"
108 ret["comment"] = "{} {} was destroyed".format(dataset_type, name,)
109 else:
110 ret["comment"] = "failed to destroy {} {}".format(dataset_type, name,)
111 if "error" in mod_res:
112 ret["comment"] = mod_res["error"]
113 else:
114 ## NOTE: no dataset found with name of the dataset_type
115 ret["comment"] = "{} {} is absent".format(dataset_type, name)
116
117 return ret
118
119
120 def filesystem_absent(name, force=False, recursive=False):
121 """
122 ensure filesystem is absent on the system
123
124 name : string
125 name of filesystem
126 force : boolean
127 try harder to destroy the dataset (zfs destroy -f)
128 recursive : boolean
129 also destroy all the child datasets (zfs destroy -r)
130
131 .. warning::
132
133 If a volume with ``name`` exists, this state will succeed without
134 destroying the volume specified by ``name``. This module is dataset type sensitive.
135
136 """
137 if not __utils__["zfs.is_dataset"](name):
138 ret = {
139 "name": name,
140 "changes": {},
141 "result": False,
142 "comment": "invalid dataset name: {}".format(name),
143 }
144 else:
145 ret = _absent(name, "filesystem", force, recursive)
146 return ret
147
148
149 def volume_absent(name, force=False, recursive=False):
150 """
151 ensure volume is absent on the system
152
153 name : string
154 name of volume
155 force : boolean
156 try harder to destroy the dataset (zfs destroy -f)
157 recursive : boolean
158 also destroy all the child datasets (zfs destroy -r)
159
160 .. warning::
161
162 If a filesystem with ``name`` exists, this state will succeed without
163 destroying the filesystem specified by ``name``. This module is dataset type sensitive.
164
165 """
166 if not __utils__["zfs.is_dataset"](name):
167 ret = {
168 "name": name,
169 "changes": {},
170 "result": False,
171 "comment": "invalid dataset name: {}".format(name),
172 }
173 else:
174 ret = _absent(name, "volume", force, recursive)
175 return ret
176
177
178 def snapshot_absent(name, force=False, recursive=False):
179 """
180 ensure snapshot is absent on the system
181
182 name : string
183 name of snapshot
184 force : boolean
185 try harder to destroy the dataset (zfs destroy -f)
186 recursive : boolean
187 also destroy all the child datasets (zfs destroy -r)
188
189 """
190 if not __utils__["zfs.is_snapshot"](name):
191 ret = {
192 "name": name,
193 "changes": {},
194 "result": False,
195 "comment": "invalid snapshot name: {}".format(name),
196 }
197 else:
198 ret = _absent(name, "snapshot", force, recursive)
199 return ret
200
201
202 def bookmark_absent(name, force=False, recursive=False):
203 """
204 ensure bookmark is absent on the system
205
206 name : string
207 name of snapshot
208 force : boolean
209 try harder to destroy the dataset (zfs destroy -f)
210 recursive : boolean
211 also destroy all the child datasets (zfs destroy -r)
212
213 """
214 if not __utils__["zfs.is_bookmark"](name):
215 ret = {
216 "name": name,
217 "changes": {},
218 "result": False,
219 "comment": "invalid bookmark name: {}".format(name),
220 }
221 else:
222 ret = _absent(name, "bookmark", force, recursive)
223 return ret
224
225
226 def hold_absent(name, snapshot, recursive=False):
227 """
228 ensure hold is absent on the system
229
230 name : string
231 name of hold
232 snapshot : string
233 name of snapshot
234 recursive : boolean
235 recursively releases a hold with the given tag on the snapshots of all descendent file systems.
236
237 """
238 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
239
240 ## log configuration
241 log.debug("zfs.hold_absent::%s::config::snapshot = %s", name, snapshot)
242 log.debug("zfs.hold_absent::%s::config::recursive = %s", name, recursive)
243
244 ## check we have a snapshot/tag name
245 if not __utils__["zfs.is_snapshot"](snapshot):
246 ret["result"] = False
247 ret["comment"] = "invalid snapshot name: {}".format(snapshot)
248 return ret
249
250 if (
251 __utils__["zfs.is_snapshot"](name)
252 or __utils__["zfs.is_bookmark"](name)
253 or name == "error"
254 ):
255 ret["result"] = False
256 ret["comment"] = "invalid tag name: {}".format(name)
257 return ret
258
259 ## release hold if required
260 holds = __salt__["zfs.holds"](snapshot)
261 if name in holds:
262 ## NOTE: hold found for snapshot, release it
263 if not __opts__["test"]:
264 mod_res = __salt__["zfs.release"](
265 name, snapshot, **{"recursive": recursive}
266 )
267 else:
268 mod_res = OrderedDict([("released", True)])
269
270 ret["result"] = mod_res["released"]
271 if ret["result"]:
272 ret["changes"] = {snapshot: {name: "released"}}
273 ret["comment"] = "hold {} released".format(name,)
274 else:
275 ret["comment"] = "failed to release hold {}".format(name,)
276 if "error" in mod_res:
277 ret["comment"] = mod_res["error"]
278 elif "error" in holds:
279 ## NOTE: we have an error
280 ret["result"] = False
281 ret["comment"] = holds["error"]
282 else:
283 ## NOTE: no hold found with name for snapshot
284 ret["comment"] = "hold {} is absent".format(name,)
285
286 return ret
287
288
289 def hold_present(name, snapshot, recursive=False):
290 """
291 ensure hold is present on the system
292
293 name : string
294 name of holdt
295 snapshot : string
296 name of snapshot
297 recursive : boolean
298 recursively add hold with the given tag on the snapshots of all descendent file systems.
299
300 """
301 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
302
303 ## log configuration
304 log.debug("zfs.hold_present::%s::config::snapshot = %s", name, snapshot)
305 log.debug("zfs.hold_present::%s::config::recursive = %s", name, recursive)
306
307 ## check we have a snapshot/tag name
308 if not __utils__["zfs.is_snapshot"](snapshot):
309 ret["result"] = False
310 ret["comment"] = "invalid snapshot name: {}".format(snapshot)
311 return ret
312
313 if (
314 __utils__["zfs.is_snapshot"](name)
315 or __utils__["zfs.is_bookmark"](name)
316 or name == "error"
317 ):
318 ret["result"] = False
319 ret["comment"] = "invalid tag name: {}".format(name)
320 return ret
321
322 ## place hold if required
323 holds = __salt__["zfs.holds"](snapshot)
324 if name in holds:
325 ## NOTE: hold with name already exists for snapshot
326 ret["comment"] = "hold {} is present for {}".format(name, snapshot,)
327 else:
328 ## NOTE: no hold found with name for snapshot
329 if not __opts__["test"]:
330 mod_res = __salt__["zfs.hold"](name, snapshot, **{"recursive": recursive})
331 else:
332 mod_res = OrderedDict([("held", True)])
333
334 ret["result"] = mod_res["held"]
335 if ret["result"]:
336 ret["changes"] = OrderedDict([(snapshot, OrderedDict([(name, "held")]))])
337 ret["comment"] = "hold {} added to {}".format(name, snapshot)
338 else:
339 ret["comment"] = "failed to add hold {} to {}".format(name, snapshot)
340 if "error" in mod_res:
341 ret["comment"] = mod_res["error"]
342
343 return ret
344
345
346 def _dataset_present(
347 dataset_type,
348 name,
349 volume_size=None,
350 sparse=False,
351 create_parent=False,
352 properties=None,
353 cloned_from=None,
354 ):
355 """
356 internal handler for filesystem_present/volume_present
357
358 dataset_type : string
359 volume or filesystem
360 name : string
361 name of volume
362 volume_size : string
363 size of volume
364 sparse : boolean
365 create sparse volume
366 create_parent : boolean
367 creates all the non-existing parent datasets.
368 any property specified on the command line using the -o option is ignored.
369 cloned_from : string
370 name of snapshot to clone
371 properties : dict
372 additional zfs properties (-o)
373
374 .. note::
375 ``cloned_from`` is only use if the volume does not exist yet,
376 when ``cloned_from`` is set after the volume exists it will be ignored.
377
378 .. note::
379 Properties do not get cloned, if you specify the properties in the state file
380 they will be applied on a subsequent run.
381
382 ``volume_size`` is considered a property, so the volume's size will be
383 corrected when the properties get updated if it differs from the
384 original volume.
385
386 The sparse parameter is ignored when using ``cloned_from``.
387
388 """
389 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
390
391 ## fallback dataset_type to filesystem if out of range
392 if dataset_type not in ["filesystem", "volume"]:
393 dataset_type = "filesystem"
394
395 ## ensure properties are zfs values
396 if volume_size:
397 volume_size = __utils__["zfs.from_size"](volume_size)
398 if properties:
399 properties = __utils__["zfs.from_auto_dict"](properties)
400 elif properties is None:
401 properties = {}
402
403 ## log configuration
404 log.debug(
405 "zfs.%s_present::%s::config::volume_size = %s", dataset_type, name, volume_size
406 )
407 log.debug("zfs.%s_present::%s::config::sparse = %s", dataset_type, name, sparse)
408 log.debug(
409 "zfs.%s_present::%s::config::create_parent = %s",
410 dataset_type,
411 name,
412 create_parent,
413 )
414 log.debug(
415 "zfs.%s_present::%s::config::cloned_from = %s", dataset_type, name, cloned_from
416 )
417 log.debug(
418 "zfs.%s_present::%s::config::properties = %s", dataset_type, name, properties
419 )
420
421 ## check we have valid filesystem name/volume name/clone snapshot
422 if not __utils__["zfs.is_dataset"](name):
423 ret["result"] = False
424 ret["comment"] = "invalid dataset name: {}".format(name)
425 return ret
426
427 if cloned_from and not __utils__["zfs.is_snapshot"](cloned_from):
428 ret["result"] = False
429 ret["comment"] = "{} is not a snapshot".format(cloned_from)
430 return ret
431
432 ## ensure dataset is in correct state
433 ## NOTE: update the dataset
434 if __salt__["zfs.exists"](name, **{"type": dataset_type}):
435 ## NOTE: fetch current volume properties
436 properties_current = __salt__["zfs.get"](
437 name, type=dataset_type, fields="value", depth=0, parsable=True,
438 ).get(name, OrderedDict())
439
440 ## NOTE: add volsize to properties
441 if volume_size:
442 properties["volsize"] = volume_size
443
444 ## NOTE: build list of properties to update
445 properties_update = []
446 for prop in properties:
447 ## NOTE: skip unexisting properties
448 if prop not in properties_current:
449 log.warning(
450 "zfs.%s_present::%s::update - unknown property: %s",
451 dataset_type,
452 name,
453 prop,
454 )
455 continue
456
457 ## NOTE: compare current and wanted value
458 if properties_current[prop]["value"] != properties[prop]:
459 properties_update.append(prop)
460
461 ## NOTE: update pool properties
462 for prop in properties_update:
463 if not __opts__["test"]:
464 mod_res = __salt__["zfs.set"](name, **{prop: properties[prop]})
465 else:
466 mod_res = OrderedDict([("set", True)])
467
468 if mod_res["set"]:
469 if name not in ret["changes"]:
470 ret["changes"][name] = {}
471 ret["changes"][name][prop] = properties[prop]
472 else:
473 ret["result"] = False
474 if ret["comment"] == "":
475 ret["comment"] = "The following properties were not updated:"
476 ret["comment"] = "{} {}".format(ret["comment"], prop)
477
478 ## NOTE: update comment
479 if ret["result"] and name in ret["changes"]:
480 ret["comment"] = "{} {} was updated".format(dataset_type, name)
481 elif ret["result"]:
482 ret["comment"] = "{} {} is uptodate".format(dataset_type, name)
483 else:
484 ret["comment"] = "{} {} failed to be updated".format(dataset_type, name)
485
486 ## NOTE: create or clone the dataset
487 else:
488 mod_res_action = "cloned" if cloned_from else "created"
489 if __opts__["test"]:
490 ## NOTE: pretend to create/clone
491 mod_res = OrderedDict([(mod_res_action, True)])
492 elif cloned_from:
493 ## NOTE: add volsize to properties
494 if volume_size:
495 properties["volsize"] = volume_size
496
497 ## NOTE: clone the dataset
498 mod_res = __salt__["zfs.clone"](
499 cloned_from,
500 name,
501 **{"create_parent": create_parent, "properties": properties}
502 )
503 else:
504 ## NOTE: create the dataset
505 mod_res = __salt__["zfs.create"](
506 name,
507 **{
508 "create_parent": create_parent,
509 "properties": properties,
510 "volume_size": volume_size,
511 "sparse": sparse,
512 }
513 )
514
515 ret["result"] = mod_res[mod_res_action]
516 if ret["result"]:
517 ret["changes"][name] = mod_res_action
518 if properties:
519 ret["changes"][name] = properties
520 ret["comment"] = "{} {} was {}".format(dataset_type, name, mod_res_action,)
521 else:
522 ret["comment"] = "failed to {} {} {}".format(
523 mod_res_action[:-1], dataset_type, name,
524 )
525 if "error" in mod_res:
526 ret["comment"] = mod_res["error"]
527
528 return ret
529
530
531 def filesystem_present(name, create_parent=False, properties=None, cloned_from=None):
532 """
533 ensure filesystem exists and has properties set
534
535 name : string
536 name of filesystem
537 create_parent : boolean
538 creates all the non-existing parent datasets.
539 any property specified on the command line using the -o option is ignored.
540 cloned_from : string
541 name of snapshot to clone
542 properties : dict
543 additional zfs properties (-o)
544
545 .. note::
546 ``cloned_from`` is only use if the filesystem does not exist yet,
547 when ``cloned_from`` is set after the filesystem exists it will be ignored.
548
549 .. note::
550 Properties do not get cloned, if you specify the properties in the
551 state file they will be applied on a subsequent run.
552
553 """
554 return _dataset_present(
555 "filesystem",
556 name,
557 create_parent=create_parent,
558 properties=properties,
559 cloned_from=cloned_from,
560 )
561
562
563 def volume_present(
564 name,
565 volume_size,
566 sparse=False,
567 create_parent=False,
568 properties=None,
569 cloned_from=None,
570 ):
571 """
572 ensure volume exists and has properties set
573
574 name : string
575 name of volume
576 volume_size : string
577 size of volume
578 sparse : boolean
579 create sparse volume
580 create_parent : boolean
581 creates all the non-existing parent datasets.
582 any property specified on the command line using the -o option is ignored.
583 cloned_from : string
584 name of snapshot to clone
585 properties : dict
586 additional zfs properties (-o)
587
588 .. note::
589 ``cloned_from`` is only use if the volume does not exist yet,
590 when ``cloned_from`` is set after the volume exists it will be ignored.
591
592 .. note::
593 Properties do not get cloned, if you specify the properties in the state file
594 they will be applied on a subsequent run.
595
596 ``volume_size`` is considered a property, so the volume's size will be
597 corrected when the properties get updated if it differs from the
598 original volume.
599
600 The sparse parameter is ignored when using ``cloned_from``.
601
602 """
603 return _dataset_present(
604 "volume",
605 name,
606 volume_size,
607 sparse=sparse,
608 create_parent=create_parent,
609 properties=properties,
610 cloned_from=cloned_from,
611 )
612
613
614 def bookmark_present(name, snapshot):
615 """
616 ensure bookmark exists
617
618 name : string
619 name of bookmark
620 snapshot : string
621 name of snapshot
622
623 """
624 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
625
626 ## log configuration
627 log.debug("zfs.bookmark_present::%s::config::snapshot = %s", name, snapshot)
628
629 ## check we have valid snapshot/bookmark name
630 if not __utils__["zfs.is_snapshot"](snapshot):
631 ret["result"] = False
632 ret["comment"] = "invalid snapshot name: {}".format(name)
633 return ret
634
635 if "#" not in name and "/" not in name:
636 ## NOTE: simple snapshot name
637 # take the snapshot name and replace the snapshot but with the simple name
638 # e.g. pool/fs@snap + bm --> pool/fs#bm
639 name = "{}#{}".format(snapshot[: snapshot.index("@")], name)
640 ret["name"] = name
641
642 if not __utils__["zfs.is_bookmark"](name):
643 ret["result"] = False
644 ret["comment"] = "invalid bookmark name: {}".format(name)
645 return ret
646
647 ## ensure bookmark exists
648 if not __salt__["zfs.exists"](name, **{"type": "bookmark"}):
649 ## NOTE: bookmark the snapshot
650 if not __opts__["test"]:
651 mod_res = __salt__["zfs.bookmark"](snapshot, name)
652 else:
653 mod_res = OrderedDict([("bookmarked", True)])
654
655 ret["result"] = mod_res["bookmarked"]
656 if ret["result"]:
657 ret["changes"][name] = snapshot
658 ret["comment"] = "{} bookmarked as {}".format(snapshot, name)
659 else:
660 ret["comment"] = "failed to bookmark {}".format(snapshot)
661 if "error" in mod_res:
662 ret["comment"] = mod_res["error"]
663 else:
664 ## NOTE: bookmark already exists
665 ret["comment"] = "bookmark is present"
666
667 return ret
668
669
670 def snapshot_present(name, recursive=False, properties=None):
671 """
672 ensure snapshot exists and has properties set
673
674 name : string
675 name of snapshot
676 recursive : boolean
677 recursively create snapshots of all descendent datasets
678 properties : dict
679 additional zfs properties (-o)
680
681 .. note:
682 Properties are only set at creation time
683
684 """
685 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
686
687 ## log configuration
688 log.debug("zfs.snapshot_present::%s::config::recursive = %s", name, recursive)
689 log.debug("zfs.snapshot_present::%s::config::properties = %s", name, properties)
690
691 ## ensure properties are zfs values
692 if properties:
693 properties = __utils__["zfs.from_auto_dict"](properties)
694
695 ## check we have valid snapshot name
696 if not __utils__["zfs.is_snapshot"](name):
697 ret["result"] = False
698 ret["comment"] = "invalid snapshot name: {}".format(name)
699 return ret
700
701 ## ensure snapshot exits
702 if not __salt__["zfs.exists"](name, **{"type": "snapshot"}):
703 ## NOTE: create the snapshot
704 if not __opts__["test"]:
705 mod_res = __salt__["zfs.snapshot"](
706 name, **{"recursive": recursive, "properties": properties}
707 )
708 else:
709 mod_res = OrderedDict([("snapshotted", True)])
710
711 ret["result"] = mod_res["snapshotted"]
712 if ret["result"]:
713 ret["changes"][name] = "snapshotted"
714 if properties:
715 ret["changes"][name] = properties
716 ret["comment"] = "snapshot {} was created".format(name)
717 else:
718 ret["comment"] = "failed to create snapshot {}".format(name)
719 if "error" in mod_res:
720 ret["comment"] = mod_res["error"]
721 else:
722 ## NOTE: snapshot already exists
723 ret["comment"] = "snapshot is present"
724
725 return ret
726
727
728 def promoted(name):
729 """
730 ensure a dataset is not a clone
731
732 name : string
733 name of fileset or volume
734
735 .. warning::
736
737 only one dataset can be the origin,
738 if you promote a clone the original will now point to the promoted dataset
739
740 """
741 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
742
743 ## check we if we have a valid dataset name
744 if not __utils__["zfs.is_dataset"](name):
745 ret["result"] = False
746 ret["comment"] = "invalid dataset name: {}".format(name)
747 return ret
748
749 ## ensure dataset is the primary instance
750 if not __salt__["zfs.exists"](name, **{"type": "filesystem,volume"}):
751 ## NOTE: we don't have a dataset
752 ret["result"] = False
753 ret["comment"] = "dataset {} does not exist".format(name)
754 else:
755 ## NOTE: check if we have a blank origin (-)
756 if (
757 __salt__["zfs.get"](
758 name, **{"properties": "origin", "fields": "value", "parsable": True}
759 )[name]["origin"]["value"]
760 == "-"
761 ):
762 ## NOTE: we're already promoted
763 ret["comment"] = "{} already promoted".format(name)
764 else:
765 ## NOTE: promote dataset
766 if not __opts__["test"]:
767 mod_res = __salt__["zfs.promote"](name)
768 else:
769 mod_res = OrderedDict([("promoted", True)])
770
771 ret["result"] = mod_res["promoted"]
772 if ret["result"]:
773 ret["changes"][name] = "promoted"
774 ret["comment"] = "{} promoted".format(name)
775 else:
776 ret["comment"] = "failed to promote {}".format(name)
777 if "error" in mod_res:
778 ret["comment"] = mod_res["error"]
779
780 return ret
781
782
783 def _schedule_snapshot_retrieve(dataset, prefix, snapshots):
784 """
785 Update snapshots dict with current snapshots
786
787 dataset: string
788 name of filesystem or volume
789 prefix : string
790 prefix for the snapshots
791 e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
792 snapshots : OrderedDict
793 preseeded OrderedDict with configuration
794
795 """
796 ## NOTE: retrieve all snapshots for the dataset
797 for snap in sorted(
798 __salt__["zfs.list"](
799 dataset, **{"recursive": True, "depth": 1, "type": "snapshot"}
800 ).keys()
801 ):
802 ## NOTE: we only want the actualy name
803 ## myzpool/data@zbck-20171201_000248 -> zbck-20171201_000248
804 snap_name = snap[snap.index("@") + 1 :]
805
806 ## NOTE: we only want snapshots matching our prefix
807 if not snap_name.startswith("{}-".format(prefix)):
808 continue
809
810 ## NOTE: retrieve the holds for this snapshot
811 snap_holds = __salt__["zfs.holds"](snap)
812
813 ## NOTE: this snapshot has no holds, eligable for pruning
814 if not snap_holds:
815 snapshots["_prunable"].append(snap)
816
817 ## NOTE: update snapshots based on holds (if any)
818 ## we are only interested in the ones from our schedule
819 ## if we find any others we skip them
820 for hold in snap_holds:
821 if hold in snapshots["_schedule"].keys():
822 snapshots[hold].append(snap)
823
824 return snapshots
825
826
827 def _schedule_snapshot_prepare(dataset, prefix, snapshots):
828 """
829 Update snapshots dict with info for a new snapshot
830
831 dataset: string
832 name of filesystem or volume
833 prefix : string
834 prefix for the snapshots
835 e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
836 snapshots : OrderedDict
837 preseeded OrderedDict with configuration
838
839 """
840 ## NOTE: generate new snapshot name
841 snapshot_create_name = "{dataset}@{prefix}-{timestamp}".format(
842 dataset=dataset,
843 prefix=prefix,
844 timestamp=datetime.now().strftime("%Y%m%d_%H%M%S"),
845 )
846
847 ## NOTE: figure out if we need to create the snapshot
848 timestamp_now = datetime.now().replace(second=0, microsecond=0)
849 snapshots["_create"][snapshot_create_name] = []
850 for hold, hold_count in snapshots["_schedule"].items():
851 ## NOTE: skip hold if we don't keep snapshots for it
852 if hold_count == 0:
853 continue
854
855 ## NOTE: figure out if we need the current hold on the new snapshot
856 if snapshots[hold]:
857 ## NOTE: extract datetime from snapshot name
858 timestamp = datetime.strptime(
859 snapshots[hold][-1], "{}@{}-%Y%m%d_%H%M%S".format(dataset, prefix),
860 ).replace(second=0, microsecond=0)
861
862 ## NOTE: compare current timestamp to timestamp from snapshot
863 if hold == "minute" and timestamp_now <= timestamp:
864 continue
865 elif hold == "hour" and timestamp_now.replace(
866 **comp_hour
867 ) <= timestamp.replace(**comp_hour):
868 continue
869 elif hold == "day" and timestamp_now.replace(
870 **comp_day
871 ) <= timestamp.replace(**comp_day):
872 continue
873 elif hold == "month" and timestamp_now.replace(
874 **comp_month
875 ) <= timestamp.replace(**comp_month):
876 continue
877 elif hold == "year" and timestamp_now.replace(
878 **comp_year
879 ) <= timestamp.replace(**comp_year):
880 continue
881
882 ## NOTE: add hold entry for snapshot
883 snapshots["_create"][snapshot_create_name].append(hold)
884
885 return snapshots
886
887
888 def scheduled_snapshot(name, prefix, recursive=True, schedule=None):
889 """
890 maintain a set of snapshots based on a schedule
891
892 name : string
893 name of filesystem or volume
894 prefix : string
895 prefix for the snapshots
896 e.g. 'test' will result in snapshots being named 'test-yyyymmdd_hhmm'
897 recursive : boolean
898 create snapshots for all children also
899 schedule : dict
900 dict holding the schedule, the following keys are available (minute, hour,
901 day, month, and year) by default all are set to 0 the value indicated the
902 number of snapshots of that type to keep around.
903
904 .. warning::
905
906 snapshots will only be created and pruned every time the state runs.
907 a schedule must be setup to automatically run the state. this means that if
908 you run the state daily the hourly snapshot will only be made once per day!
909
910 .. versionchanged:: 2018.3.0
911
912 switched to localtime from gmtime so times now take into account timezones.
913
914 """
915 ret = {"name": name, "changes": {}, "result": True, "comment": ""}
916
917 ## initialize defaults
918 schedule_holds = ["minute", "hour", "day", "month", "year"]
919 snapshots = OrderedDict(
920 [("_create", OrderedDict()), ("_prunable", []), ("_schedule", OrderedDict())]
921 )
922
923 ## strict configuration validation
924 ## NOTE: we need a valid dataset
925 if not __utils__["zfs.is_dataset"](name):
926 ret["result"] = False
927 ret["comment"] = "invalid dataset name: {}".format(name)
928
929 if not __salt__["zfs.exists"](name, **{"type": "filesystem,volume"}):
930 ret["comment"] = "dataset {} does not exist".format(name)
931 ret["result"] = False
932
933 ## NOTE: prefix must be 4 or longer
934 if not prefix or len(prefix) < 4:
935 ret["comment"] = "prefix ({}) must be at least 4 long".format(prefix)
936 ret["result"] = False
937
938 ## NOTE: validate schedule
939 total_count = 0
940 for hold in schedule_holds:
941 snapshots[hold] = []
942 if hold not in schedule:
943 snapshots["_schedule"][hold] = 0
944 elif isinstance(schedule[hold], int):
945 snapshots["_schedule"][hold] = schedule[hold]
946 else:
947 ret["result"] = False
948 ret["comment"] = "schedule value for {} is not an integer".format(hold,)
949 break
950 total_count += snapshots["_schedule"][hold]
951 if ret["result"] and total_count == 0:
952 ret["result"] = False
953 ret["comment"] = "schedule is not valid, you need to keep atleast 1 snapshot"
954
955 ## NOTE: return if configuration is not valid
956 if not ret["result"]:
957 return ret
958
959 ## retrieve existing snapshots
960 snapshots = _schedule_snapshot_retrieve(name, prefix, snapshots)
961
962 ## prepare snapshot
963 snapshots = _schedule_snapshot_prepare(name, prefix, snapshots)
964
965 ## log configuration
966 log.debug("zfs.scheduled_snapshot::%s::config::recursive = %s", name, recursive)
967 log.debug("zfs.scheduled_snapshot::%s::config::prefix = %s", name, prefix)
968 log.debug("zfs.scheduled_snapshot::%s::snapshots = %s", name, snapshots)
969
970 ## create snapshot(s)
971 for snapshot_name, snapshot_holds in snapshots["_create"].items():
972 ## NOTE: skip if new snapshot has no holds
973 if not snapshot_holds:
974 continue
975
976 ## NOTE: create snapshot
977 if not __opts__["test"]:
978 mod_res = __salt__["zfs.snapshot"](
979 snapshot_name, **{"recursive": recursive}
980 )
981 else:
982 mod_res = OrderedDict([("snapshotted", True)])
983
984 if not mod_res["snapshotted"]:
985 ret["result"] = False
986 ret["comment"] = "error creating snapshot ({})".format(snapshot_name)
987 else:
988 ## NOTE: create holds (if we have a snapshot)
989 for hold in snapshot_holds:
990 if not __opts__["test"]:
991 mod_res = __salt__["zfs.hold"](
992 hold, snapshot_name, **{"recursive": recursive}
993 )
994 else:
995 mod_res = OrderedDict([("held", True)])
996
997 if not mod_res["held"]:
998 ret["result"] = False
999 ret["comment"] = "error adding hold ({}) to snapshot ({})".format(
1000 hold, snapshot_name,
1001 )
1002 break
1003
1004 snapshots[hold].append(snapshot_name)
1005
1006 if ret["result"]:
1007 ret["comment"] = "scheduled snapshots updated"
1008 if "created" not in ret["changes"]:
1009 ret["changes"]["created"] = []
1010 ret["changes"]["created"].append(snapshot_name)
1011
1012 ## prune hold(s)
1013 for hold, hold_count in snapshots["_schedule"].items():
1014 while ret["result"] and len(snapshots[hold]) > hold_count:
1015 ## NOTE: pop oldest snapshot
1016 snapshot_name = snapshots[hold].pop(0)
1017
1018 ## NOTE: release hold for snapshot
1019 if not __opts__["test"]:
1020 mod_res = __salt__["zfs.release"](
1021 hold, snapshot_name, **{"recursive": recursive}
1022 )
1023 else:
1024 mod_res = OrderedDict([("released", True)])
1025
1026 if not mod_res["released"]:
1027 ret["result"] = False
1028 ret["comment"] = "error adding hold ({}) to snapshot ({})".format(
1029 hold, snapshot_name,
1030 )
1031
1032 ## NOTE: mark as prunable
1033 if not __salt__["zfs.holds"](snapshot_name):
1034 snapshots["_prunable"].append(snapshot_name)
1035
1036 ## prune snapshot(s)
1037 for snapshot_name in snapshots["_prunable"]:
1038 ## NOTE: destroy snapshot
1039 if not __opts__["test"]:
1040 mod_res = __salt__["zfs.destroy"](snapshot_name, **{"recursive": recursive})
1041 else:
1042 mod_res = OrderedDict([("destroyed", True)])
1043
1044 if not mod_res["destroyed"]:
1045 ret["result"] = False
1046 ret["comment"] = "error prunding snapshot ({1})".format(snapshot_name,)
1047 break
1048
1049 if ret["result"] and snapshots["_prunable"]:
1050 ret["comment"] = "scheduled snapshots updated"
1051 ret["changes"]["pruned"] = snapshots["_prunable"]
1052
1053 if ret["result"] and not ret["changes"]:
1054 ret["comment"] = "scheduled snapshots are up to date"
1055
1056 return ret
1057
1058
1059 # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4