Merge previous changes
Jonathan Carter
3 years ago
0 | 0 | language: python |
1 | 1 | python: |
2 | - 2.7 | |
3 | - 3.5 | |
4 | 2 | - 3.6 |
5 | 3 | - 3.7 |
6 | 4 | - 3.8 |
7 | dist: bionic | |
8 | services: | |
9 | - postgresql | |
5 | dist: focal | |
6 | addons: | |
7 | postgresql: "12" | |
8 | apt: | |
9 | packages: | |
10 | - postgresql-12 | |
10 | 11 | install: |
11 | 12 | - pip install . |
12 | 13 | before_script: |
0 | # 3.10.0 | |
1 | ||
2 | 2020-05-17 | |
3 | ||
4 | * added metadata defaults and reactors | |
0 | # 4.0.0 | |
1 | ||
2 | 2020-06-22 | |
3 | ||
4 | * new metadata processor API (BACKWARDS INCOMPATIBLE) | |
5 | * removed `template_node` node attribute (BACKWARDS INCOMPATIBLE) | |
6 | * removed support for Python 2.7 (BACKWARDS INCOMPATIBLE) | |
7 | * removed support for Python 3.4 (BACKWARDS INCOMPATIBLE) | |
8 | * removed support for Python 3.5 (BACKWARDS INCOMPATIBLE) | |
9 | * removed `members_add/remove` attribute for groups (BACKWARDS INCOMPATIBLE) | |
10 | * removed `bw --adhoc-nodes` (BACKWARDS INCOMPATIBLE) | |
11 | * added `locking_node` node attribute | |
5 | 12 | * added `bw diff` |
13 | * added `bw metadata -b` | |
14 | * added `bw metadata --hide-defaults` | |
15 | * added `bw metadata --hide-reactors` | |
16 | * added `bw metadata --hide-groups` | |
17 | * added `bw metadata --hide-node` | |
18 | * added `git_deploy` items (formerly a plugin) | |
19 | * added paging and color-coding for metadata sources to `bw metadata` | |
20 | * removed `bw metadata --table`, now done automatically (BACKWARDS INCOMPATIBLE) | |
21 | * removed `bw repo plugin` (BACKWARDS INCOMPATIBLE) | |
22 | * removed `bw test --secret-rotation` (BACKWARDS INCOMPATIBLE) | |
23 | * renamed `bw test --metadata-collisions` to `bw test --metadata-conflicts` (BACKWARDS INCOMPATIBLE) | |
24 | * reworked passing multi-value options on CLI (BACKWARDS INCOMPATIBLE) | |
25 | * `bw apply` will now exit with return code 1 if even a single item fails | |
6 | 26 | * `items/` is now searched recursively |
27 | * failed items will now show what commands they ran and what their output was | |
7 | 28 | |
8 | 29 | |
9 | 30 | # 3.9.0 |
25 | 25 | |
26 | 26 | ------------------------------------------------------------------------ |
27 | 27 | |
28 | BundleWrap is © 2013 - 2019 [Torsten Rehn](mailto:torsten@rehn.email) | |
28 | BundleWrap is © 2013 - 2020 [Torsten Rehn](mailto:torsten@rehn.email) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | VERSION = (3, 10, 0) | |
0 | VERSION = (4, 0, 0) | |
4 | 1 | VERSION_STRING = ".".join([str(v) for v in VERSION]) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import exists, join |
4 | 1 | |
5 | 2 | from .exceptions import BundleError, NoSuchBundle, RepositoryError |
6 | from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE, DoNotRunAgain | |
3 | from .metadata import DoNotRunAgain | |
7 | 4 | from .utils import cached_property |
8 | 5 | from .utils.text import bold, mark_for_translation as _ |
9 | 6 | from .utils.text import validate_name |
14 | 11 | FILENAME_METADATA = "metadata.py" |
15 | 12 | |
16 | 13 | |
17 | def metadata_processor_classic(func): | |
18 | """ | |
19 | Decorator that tags metadata processors. | |
20 | """ | |
21 | func._is_metadata_processor = True | |
22 | func._is_classic_metadata_processor = True | |
23 | return func | |
14 | def metadata_reactor_for_bundle(bundle_name): | |
15 | reactor_names = set() | |
16 | ||
17 | def metadata_reactor(func): | |
18 | """ | |
19 | Decorator that tags metadata reactors. | |
20 | """ | |
21 | if func.__name__ == "defaults": | |
22 | raise ValueError(_( | |
23 | "metadata reactor in bundle '{}' cannot be named 'defaults'" | |
24 | ).format(bundle_name)) | |
25 | if func.__name__ in reactor_names: | |
26 | raise ValueError(_( | |
27 | "duplicate metadata reactor '{reactor}' in bundle '{bundle}'" | |
28 | ).format(bundle=bundle_name, reactor=func.__name__)) | |
29 | reactor_names.add(func.__name__) | |
30 | func._is_metadata_reactor = True | |
31 | return func | |
32 | ||
33 | return metadata_reactor | |
24 | 34 | |
25 | 35 | |
26 | def metadata_reactor(func): | |
27 | """ | |
28 | Decorator that tags metadata reactors. | |
29 | """ | |
30 | func._is_metadata_processor = True | |
31 | func._is_metadata_reactor = True | |
32 | return func | |
33 | ||
34 | ||
35 | class Bundle(object): | |
36 | class Bundle: | |
36 | 37 | """ |
37 | 38 | A collection of config items, bound to a node. |
38 | 39 | """ |
61 | 62 | if not exists(self.bundle_file): |
62 | 63 | return {} |
63 | 64 | else: |
65 | base_env={ | |
66 | 'node': self.node, | |
67 | 'repo': self.repo, | |
68 | } | |
69 | for item_class in self.repo.item_classes: | |
70 | base_env[item_class.BUNDLE_ATTRIBUTE_NAME] = {} | |
71 | ||
64 | 72 | return self.repo.get_all_attrs_from_file( |
65 | 73 | self.bundle_file, |
66 | base_env={ | |
67 | 'node': self.node, | |
68 | 'repo': self.repo, | |
69 | }, | |
74 | base_env=base_env, | |
70 | 75 | ) |
71 | 76 | |
72 | 77 | @cached_property |
97 | 102 | ) |
98 | 103 | |
99 | 104 | @cached_property |
100 | def _metadata_processors(self): | |
101 | with io.job(_("{node} {bundle} collecting metadata processors").format( | |
105 | def _metadata_defaults_and_reactors(self): | |
106 | with io.job(_("{node} {bundle} collecting metadata reactors").format( | |
102 | 107 | node=bold(self.node.name), |
103 | 108 | bundle=bold(self.name), |
104 | 109 | )): |
105 | 110 | if not exists(self.metadata_file): |
106 | return {}, set(), set() | |
111 | return {}, set() | |
112 | ||
107 | 113 | defaults = {} |
108 | 114 | reactors = set() |
109 | classic_processors = set() | |
110 | 115 | internal_names = set() |
111 | 116 | for name, attr in self.repo.get_all_attrs_from_file( |
112 | 117 | self.metadata_file, |
113 | 118 | base_env={ |
114 | 'DEFAULTS': DEFAULTS, | |
115 | 'DONE': DONE, | |
116 | 'OVERWRITE': OVERWRITE, | |
117 | 'RUN_ME_AGAIN': RUN_ME_AGAIN, | |
118 | 119 | 'DoNotRunAgain': DoNotRunAgain, |
119 | 'metadata_processor': metadata_processor_classic, | |
120 | 'metadata_reactor': metadata_reactor, | |
120 | 'metadata_reactor': metadata_reactor_for_bundle(self.name), | |
121 | 121 | 'node': self.node, |
122 | 122 | 'repo': self.repo, |
123 | 123 | }, |
124 | 124 | ).items(): |
125 | 125 | if name == "defaults": |
126 | 126 | defaults = attr |
127 | elif getattr(attr, '_is_metadata_processor', False): | |
127 | elif getattr(attr, '_is_metadata_reactor', False): | |
128 | 128 | internal_name = getattr(attr, '__name__', name) |
129 | 129 | if internal_name in internal_names: |
130 | 130 | raise BundleError(_( |
131 | "Metadata processor '{name}' in bundle {bundle} for node {node} has " | |
131 | "Metadata reactor '{name}' in bundle {bundle} for node {node} has " | |
132 | 132 | "__name__ '{internal_name}', which was previously used by another " |
133 | "metadata processor in the same metadata.py. BundleWrap uses __name__ " | |
134 | "internally to tell metadata processors apart, so this is a problem. " | |
135 | "Perhaps you used a decorator on your metadata processors that " | |
133 | "metadata reactor in the same metadata.py. BundleWrap uses __name__ " | |
134 | "internally to tell metadata reactors apart, so this is a problem. " | |
135 | "Perhaps you used a decorator on your metadata reactors that " | |
136 | 136 | "doesn't use functools.wraps? You should use that." |
137 | 137 | ).format( |
138 | 138 | bundle=self.name, |
141 | 141 | name=name, |
142 | 142 | )) |
143 | 143 | internal_names.add(internal_name) |
144 | if getattr(attr, '_is_metadata_reactor', False): | |
145 | reactors.add(attr) | |
146 | elif getattr(attr, '_is_classic_metadata_processor', False): | |
147 | classic_processors.add(attr) | |
148 | else: | |
149 | # this should never happen | |
150 | raise AssertionError | |
151 | return defaults, reactors, classic_processors | |
144 | reactors.add(attr) | |
145 | return defaults, reactors |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from cProfile import Profile |
4 | 1 | from functools import wraps |
5 | 2 | from os import environ |
6 | 3 | from os.path import abspath |
7 | from pipes import quote | |
4 | from shlex import quote | |
8 | 5 | from sys import argv, exit, stderr, stdout |
9 | 6 | from traceback import format_exc, print_exc |
10 | 7 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime |
4 | 1 | from sys import exit |
5 | 2 | |
27 | 24 | |
28 | 25 | def bw_apply(repo, args): |
29 | 26 | errors = [] |
30 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
31 | pending_nodes = target_nodes[:] | |
27 | target_nodes = get_target_nodes(repo, args['targets']) | |
28 | pending_nodes = target_nodes.copy() | |
32 | 29 | |
33 | 30 | io.progress_set_total(count_items(pending_nodes)) |
34 | 31 | |
35 | 32 | repo.hooks.apply_start( |
36 | 33 | repo, |
37 | args['target'], | |
34 | args['targets'], | |
38 | 35 | target_nodes, |
39 | 36 | interactive=args['interactive'], |
40 | 37 | ) |
97 | 94 | worker_pool.run() |
98 | 95 | |
99 | 96 | total_duration = datetime.now() - start_time |
97 | totals = stats(results) | |
100 | 98 | |
101 | 99 | if args['summary'] and results: |
102 | stats_summary(results, total_duration) | |
100 | stats_summary(results, totals, total_duration) | |
103 | 101 | error_summary(errors) |
104 | 102 | |
105 | 103 | repo.hooks.apply_end( |
106 | 104 | repo, |
107 | args['target'], | |
105 | args['targets'], | |
108 | 106 | target_nodes, |
109 | 107 | duration=total_duration, |
110 | 108 | ) |
111 | 109 | |
112 | exit(1 if errors else 0) | |
110 | exit(1 if errors or totals['failed'] else 0) | |
113 | 111 | |
114 | 112 | |
115 | def stats_summary(results, total_duration): | |
113 | def stats(results): | |
116 | 114 | totals = { |
117 | 115 | 'items': 0, |
118 | 116 | 'correct': 0, |
120 | 118 | 'skipped': 0, |
121 | 119 | 'failed': 0, |
122 | 120 | } |
121 | for result in results: | |
122 | totals['items'] += result.total | |
123 | for metric in ('correct', 'fixed', 'skipped', 'failed'): | |
124 | totals[metric] += getattr(result, metric) | |
125 | return totals | |
123 | 126 | |
127 | ||
128 | def stats_summary(results, totals, total_duration): | |
124 | 129 | rows = [[ |
125 | 130 | bold(_("node")), |
126 | 131 | _("items"), |
132 | 137 | ], ROW_SEPARATOR] |
133 | 138 | |
134 | 139 | for result in results: |
135 | totals['items'] += result.total | |
136 | for metric in ('correct', 'fixed', 'skipped', 'failed'): | |
137 | totals[metric] += getattr(result, metric) | |
138 | 140 | rows.append([ |
139 | 141 | result.node_name, |
140 | 142 | str(result.total), |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from code import interact |
4 | 1 | |
5 | 2 | from .. import VERSION_STRING |
245 | 245 | ).format(x=red("!!!"))) |
246 | 246 | exit(1) |
247 | 247 | |
248 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
248 | target_nodes = get_target_nodes(repo, args['targets']) | |
249 | 249 | |
250 | 250 | if args['branch'] or args['cmd_change'] or args['cmd_reset'] or args['prompt']: |
251 | 251 | intermissions = [] |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from ..group import GROUP_ATTR_DEFAULTS |
4 | 1 | from ..utils.text import bold, mark_for_translation as _ |
5 | 2 | from ..utils.ui import io |
12 | 9 | |
13 | 10 | def bw_groups(repo, args): |
14 | 11 | if not args['groups']: |
15 | for group in repo.groups: | |
12 | for group in sorted(repo.groups): | |
16 | 13 | io.stdout(group.name) |
17 | 14 | else: |
18 | groups = [repo.get_group(group.strip()) for group in args['groups'].split(",")] | |
15 | groups = {repo.get_group(group.strip()) for group in args['groups']} | |
19 | 16 | if not args['attrs']: |
20 | subgroups = set(groups) | |
17 | subgroups = groups.copy() | |
21 | 18 | for group in groups: |
22 | subgroups = subgroups.union(group.subgroups) | |
19 | subgroups.update(group.subgroups) | |
23 | 20 | for subgroup in sorted(subgroups): |
24 | 21 | io.stdout(subgroup.name) |
25 | 22 | else: |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from sys import exit |
4 | 1 | |
5 | 2 | from ..exceptions import NoSuchGroup, NoSuchNode |
30 | 27 | target = repo.get_group(args['node_or_group']) |
31 | 28 | target_type = 'group' |
32 | 29 | except NoSuchGroup: |
33 | if args['adhoc_nodes']: | |
34 | target = repo.create_node(args['node_or_group']) | |
35 | target_type = 'node' | |
36 | else: | |
37 | io.stderr(_("{x} No such node or group: {node_or_group}").format( | |
38 | node_or_group=args['node_or_group'], | |
39 | x=red("!!!"), | |
40 | )) | |
41 | exit(1) | |
30 | io.stderr(_("{x} No such node or group: {node_or_group}").format( | |
31 | node_or_group=args['node_or_group'], | |
32 | x=red("!!!"), | |
33 | )) | |
34 | exit(1) | |
42 | 35 | else: |
43 | 36 | if args['item']: |
44 | 37 | target = get_item(target, args['item']) |
57 | 50 | if args['dict']: |
58 | 51 | if args['group_membership']: |
59 | 52 | if target_type in ('node', 'repo'): |
60 | for group in target.groups: | |
53 | for group in sorted(target.groups): | |
61 | 54 | io.stdout(group.name) |
62 | 55 | else: |
63 | for node in target.nodes: | |
56 | for node in sorted(target.nodes): | |
64 | 57 | io.stdout(node.name) |
65 | 58 | elif args['metadata']: |
66 | for node in target.nodes: | |
59 | for node in sorted(target.nodes): | |
67 | 60 | io.stdout("{}\t{}".format(node.name, node.metadata_hash())) |
68 | 61 | else: |
69 | 62 | cdict = target.cached_cdict if args['item'] else target.cdict |
71 | 64 | io.stdout("REMOVE") |
72 | 65 | else: |
73 | 66 | for key, value in sorted(cdict.items()): |
74 | io.stdout("{}\t{}".format(key, value) if args['item'] else "{} {}".format(value, key)) | |
67 | io.stdout( | |
68 | "{}\t{}".format(key, value) if args['item'] | |
69 | else "{} {}".format(value, key) | |
70 | ) | |
75 | 71 | else: |
76 | 72 | if args['group_membership']: |
77 | 73 | io.stdout(target.group_membership_hash()) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os import makedirs |
4 | 1 | from os.path import dirname, exists, join |
5 | 2 | from sys import exit |
26 | 23 | |
27 | 24 | |
28 | 25 | def bw_items(repo, args): |
29 | node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) | |
26 | node = get_node(repo, args['node']) | |
30 | 27 | if args['preview'] and not args['item']: |
31 | 28 | io.stderr(_("{x} no ITEM given for preview").format(x=red("!!!"))) |
32 | 29 | exit(1) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os import environ |
4 | 1 | |
5 | 2 | from ..concurrency import WorkerPool |
39 | 36 | |
40 | 37 | def bw_lock_add(repo, args): |
41 | 38 | errors = [] |
42 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
39 | target_nodes = get_target_nodes(repo, args['targets']) | |
43 | 40 | target_nodes = remove_dummy_nodes(target_nodes) |
44 | 41 | pending_nodes = target_nodes[:] |
45 | 42 | max_node_name_length = max([len(node.name) for node in target_nodes]) |
58 | 55 | 'kwargs': { |
59 | 56 | 'comment': args['comment'], |
60 | 57 | 'expiry': args['expiry'], |
61 | 'item_selectors': args['items'].split(","), | |
58 | 'item_selectors': args['items'], | |
62 | 59 | }, |
63 | 60 | } |
64 | 61 | |
93 | 90 | |
94 | 91 | def bw_lock_remove(repo, args): |
95 | 92 | errors = [] |
96 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
93 | target_nodes = get_target_nodes(repo, args['targets']) | |
97 | 94 | target_nodes = remove_dummy_nodes(target_nodes) |
98 | 95 | pending_nodes = target_nodes[:] |
99 | 96 | max_node_name_length = max([len(node.name) for node in target_nodes]) |
149 | 146 | |
150 | 147 | def bw_lock_show(repo, args): |
151 | 148 | errors = [] |
152 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
149 | target_nodes = get_target_nodes(repo, args['targets']) | |
153 | 150 | target_nodes = remove_dummy_nodes(target_nodes) |
154 | 151 | pending_nodes = target_nodes[:] |
155 | 152 | locks_on_node = {} |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
0 | from collections import OrderedDict | |
1 | from decimal import Decimal | |
2 | from sys import version_info | |
2 | 3 | |
3 | from decimal import Decimal | |
4 | ||
5 | from ..metadata import metadata_to_json | |
4 | from ..metadata import deepcopy_metadata, metadata_to_json | |
6 | 5 | from ..utils import Fault |
7 | from ..utils.cmdline import get_node, get_target_nodes | |
8 | from ..utils.dicts import value_at_key_path | |
6 | from ..utils.cmdline import get_target_nodes | |
7 | from ..utils.dicts import ( | |
8 | delete_key_at_path, | |
9 | replace_key_at_path, | |
10 | value_at_key_path, | |
11 | ) | |
9 | 12 | from ..utils.table import ROW_SEPARATOR, render_table |
10 | from ..utils.text import bold, force_text, mark_for_translation as _, red | |
13 | from ..utils.text import ( | |
14 | ansi_clean, | |
15 | blue, | |
16 | bold, | |
17 | force_text, | |
18 | green, | |
19 | mark_for_translation as _, | |
20 | red, | |
21 | yellow, | |
22 | ) | |
11 | 23 | from ..utils.ui import io, page_lines |
12 | 24 | |
13 | 25 | |
26 | def _color_for_source(key, source): | |
27 | if source.startswith("metadata_defaults:"): | |
28 | return blue(key) | |
29 | elif source.startswith("metadata_reactor:"): | |
30 | return green(key) | |
31 | elif source.startswith("group:"): | |
32 | return yellow(key) | |
33 | elif source.startswith("node:"): | |
34 | return red(key) | |
35 | else: | |
36 | return key | |
37 | ||
38 | ||
39 | def _colorize_path( | |
40 | metadata, | |
41 | path, | |
42 | sources, | |
43 | hide_defaults, | |
44 | hide_reactors, | |
45 | hide_groups, | |
46 | hide_node, | |
47 | ): | |
48 | if not isinstance(value_at_key_path(metadata, path), (dict, list, tuple, set)): | |
49 | # only last source relevant for atomic types | |
50 | sources = [sources[-1]] | |
51 | sources_filtered = False | |
52 | for src in sources.copy(): | |
53 | if ( | |
54 | (src.startswith("metadata_defaults:") and hide_defaults) or | |
55 | (src.startswith("metadata_reactor:") and hide_reactors) or | |
56 | (src.startswith("group:") and hide_groups) or | |
57 | (src.startswith("node:") and hide_node) | |
58 | ): | |
59 | sources.remove(src) | |
60 | sources_filtered = True | |
61 | if not sources: | |
62 | delete_key_at_path(metadata, path) | |
63 | return None | |
64 | elif len(sources) == 1: | |
65 | if sources_filtered: | |
66 | # do not colorize if a key is really mixed-source | |
67 | colorized_key = path[-1] | |
68 | else: | |
69 | colorized_key = _color_for_source(path[-1], sources[0]) | |
70 | replace_key_at_path( | |
71 | metadata, | |
72 | path, | |
73 | colorized_key, | |
74 | ) | |
75 | return colorized_key | |
76 | ||
77 | ||
78 | def _sort_dict_colorblind(old_dict): | |
79 | if version_info < (3, 7): | |
80 | new_dict = OrderedDict() | |
81 | else: | |
82 | new_dict = {} | |
83 | ||
84 | for key in sorted(old_dict.keys(), key=lambda k: ansi_clean(k)): | |
85 | if isinstance(old_dict[key], dict): | |
86 | new_dict[key] = _sort_dict_colorblind(old_dict[key]) | |
87 | else: | |
88 | new_dict[key] = old_dict[key] | |
89 | ||
90 | return new_dict | |
91 | ||
92 | ||
93 | def _list_starts_with(list_a, list_b): | |
94 | """ | |
95 | Returns True if list_a starts with list_b. | |
96 | """ | |
97 | list_a = tuple(list_a) | |
98 | list_b = tuple(list_b) | |
99 | try: | |
100 | return list_a[:len(list_b)] == list_b | |
101 | except IndexError: | |
102 | return False | |
103 | ||
104 | ||
14 | 105 | def bw_metadata(repo, args): |
15 | if args['table']: | |
16 | if not args['keys']: | |
17 | io.stdout(_("{x} at least one key is required with --table").format(x=red("!!!"))) | |
106 | target_nodes = get_target_nodes(repo, args['targets']) | |
107 | key_paths = sorted([path.strip().split("/") for path in args['keys']]) | |
108 | if len(target_nodes) > 1: | |
109 | if not key_paths: | |
110 | io.stdout(_("{x} at least one key is required when viewing multiple nodes").format(x=red("!!!"))) | |
18 | 111 | exit(1) |
19 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
20 | key_paths = [path.strip().split(" ") for path in " ".join(args['keys']).split(",")] | |
21 | table = [[bold(_("node"))] + [bold(" ".join(path)) for path in key_paths], ROW_SEPARATOR] | |
22 | for node in target_nodes: | |
112 | if args['blame']: | |
113 | io.stdout(_("{x} blame information can only be shown for a single node").format(x=red("!!!"))) | |
114 | exit(1) | |
115 | ||
116 | table = [[bold(_("node"))] + [bold("/".join(path)) for path in key_paths], ROW_SEPARATOR] | |
117 | for node in sorted(target_nodes): | |
23 | 118 | values = [] |
24 | 119 | for key_path in key_paths: |
25 | 120 | metadata = node.metadata |
37 | 132 | table.append([bold(node.name)] + values) |
38 | 133 | page_lines(render_table(table)) |
39 | 134 | else: |
40 | node = get_node(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
135 | node = target_nodes.pop() | |
41 | 136 | if args['blame']: |
42 | key_paths = [path.strip() for path in " ".join(args['keys']).split(",")] | |
43 | 137 | table = [[bold(_("path")), bold(_("source"))], ROW_SEPARATOR] |
44 | 138 | for path, blamed in sorted(node.metadata_blame.items()): |
45 | joined_path = " ".join(path) | |
139 | joined_path = "/".join(path) | |
46 | 140 | for key_path in key_paths: |
47 | 141 | if joined_path.startswith(key_path): |
48 | 142 | table.append([joined_path, ", ".join(blamed)]) |
49 | 143 | break |
50 | 144 | page_lines(render_table(table)) |
51 | 145 | else: |
52 | for line in metadata_to_json( | |
53 | value_at_key_path(node.metadata, args['keys']), | |
54 | ).splitlines(): | |
55 | io.stdout(force_text(line)) | |
146 | metadata = deepcopy_metadata(node.metadata) | |
147 | blame = list(node.metadata_blame.items()) | |
148 | # sort descending by key path length since we will be replacing | |
149 | # the keys and can't access paths beneath replaced keys anymore | |
150 | blame.sort(key=lambda e: len(e[0]), reverse=True) | |
151 | ||
152 | for path, blamed in blame: | |
153 | if key_paths: | |
154 | # remove all paths we did not ask to see | |
155 | path_seen = False | |
156 | for filtered_path in key_paths: | |
157 | if ( | |
158 | _list_starts_with(path, filtered_path) or | |
159 | _list_starts_with(filtered_path, path) | |
160 | ): | |
161 | path_seen = True | |
162 | break | |
163 | if not path_seen: | |
164 | delete_key_at_path(metadata, path) | |
165 | continue | |
166 | ||
167 | colorized_key = _colorize_path( | |
168 | metadata, | |
169 | path, | |
170 | blamed, | |
171 | args['hide_defaults'], | |
172 | args['hide_reactors'], | |
173 | args['hide_groups'], | |
174 | args['hide_node'], | |
175 | ) | |
176 | for key_path in key_paths: | |
177 | if colorized_key and list(path) == key_path[:len(path)]: | |
178 | # we just replaced a key in the filtered path | |
179 | key_path[len(path) - 1] = colorized_key | |
180 | ||
181 | # now we need to recreate the dict, sorting the keys as if | |
182 | # they were not colored (otherwise we'd end up sorted by | |
183 | # color) | |
184 | metadata_sorted = _sort_dict_colorblind(metadata) | |
185 | ||
186 | page_lines([ | |
187 | force_text(line).replace("\\u001b", "\033") | |
188 | for line in metadata_to_json( | |
189 | metadata_sorted, | |
190 | sort_keys=False, | |
191 | ).splitlines() | |
192 | ]) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os import environ |
4 | 1 | from sys import exit |
5 | 2 | |
11 | 8 | from ..group import GROUP_ATTR_DEFAULTS |
12 | 9 | |
13 | 10 | |
14 | NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'groups', 'hostname']) | |
11 | NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'file_path', 'groups', 'hostname']) | |
15 | 12 | NODE_ATTRS_LISTS = ('bundles', 'groups') |
16 | 13 | |
17 | 14 | |
24 | 21 | inline, |
25 | 22 | ): |
26 | 23 | rows = [[entity_label], ROW_SEPARATOR] |
27 | selected_attrs = [attr.strip() for attr in selected_attrs.split(",")] | |
28 | if selected_attrs == ['all']: | |
24 | selected_attrs = {attr.strip() for attr in selected_attrs} | |
25 | ||
26 | if selected_attrs == {'all'}: | |
29 | 27 | selected_attrs = available_attrs |
28 | elif 'all' in selected_attrs: | |
29 | io.stderr(_( | |
30 | "{x} invalid attribute list requested ('all' and extraneous): {attr}" | |
31 | ).format(x=red("!!!"), attr=", ".join(sorted(selected_attrs)))) | |
32 | exit(1) | |
33 | ||
30 | 34 | for attr in selected_attrs: |
31 | 35 | if attr not in available_attrs: |
32 | 36 | io.stderr(_("{x} unknown attribute: {attr}").format(x=red("!!!"), attr=attr)) |
33 | 37 | exit(1) |
34 | 38 | rows[0].append(bold(attr)) |
39 | ||
35 | 40 | has_list_attrs = False |
36 | for entity in entities: | |
41 | for entity in sorted(entities): | |
37 | 42 | attr_values = [[entity.name]] |
38 | 43 | for attr in selected_attrs: |
39 | 44 | if attr in available_attrs_lists: |
66 | 71 | |
67 | 72 | |
68 | 73 | def bw_nodes(repo, args): |
69 | if args['target'] is not None: | |
70 | nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
74 | if args['targets']: | |
75 | nodes = get_target_nodes(repo, args['targets']) | |
71 | 76 | else: |
72 | 77 | nodes = repo.nodes |
73 | 78 | if not args['attrs']: |
74 | for node in nodes: | |
79 | for node in sorted(nodes): | |
75 | 80 | io.stdout(node.name) |
76 | 81 | else: |
77 | 82 | _attribute_table( |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from argparse import ArgumentParser, SUPPRESS | |
0 | from argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS | |
4 | 1 | from os import environ, getcwd |
5 | 2 | |
6 | 3 | from .. import VERSION_STRING |
16 | 13 | from .metadata import bw_metadata |
17 | 14 | from .nodes import bw_nodes |
18 | 15 | from .plot import bw_plot_group, bw_plot_node, bw_plot_node_groups |
19 | from .repo import bw_repo_bundle_create, bw_repo_create, bw_repo_plugin_install, \ | |
20 | bw_repo_plugin_list, bw_repo_plugin_search, bw_repo_plugin_remove, bw_repo_plugin_update | |
16 | from .repo import bw_repo_bundle_create, bw_repo_create | |
21 | 17 | from .run import bw_run |
22 | 18 | from .stats import bw_stats |
23 | 19 | from .test import bw_test |
37 | 33 | default=False, |
38 | 34 | dest='add_ssh_host_keys', |
39 | 35 | help=_("set StrictHostKeyChecking=no instead of yes for SSH"), |
40 | ) | |
41 | parser.add_argument( | |
42 | "-A", | |
43 | "--adhoc-nodes", | |
44 | action='store_true', | |
45 | default=False, | |
46 | dest='adhoc_nodes', | |
47 | help=_( | |
48 | "treat unknown node names as adhoc 'virtual' nodes that receive configuration only " | |
49 | "through groups whose member_patterns match the node name given on the command line " | |
50 | "(which also has to be a resolvable hostname)"), | |
51 | 36 | ) |
52 | 37 | parser.add_argument( |
53 | 38 | "-d", |
88 | 73 | |
89 | 74 | # bw apply |
90 | 75 | help_apply = _("Applies the configuration defined in your repository to your nodes") |
91 | parser_apply = subparsers.add_parser("apply", description=help_apply, help=help_apply) | |
76 | parser_apply = subparsers.add_parser( | |
77 | "apply", | |
78 | description=help_apply, | |
79 | help=help_apply, | |
80 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
81 | ) | |
92 | 82 | parser_apply.set_defaults(func=bw_apply) |
93 | 83 | parser_apply.add_argument( |
94 | 'target', | |
95 | metavar=_("TARGETS"), | |
84 | 'targets', | |
85 | metavar=_("TARGET"), | |
86 | nargs='+', | |
96 | 87 | type=str, |
97 | 88 | help=HELP_get_target_nodes, |
98 | 89 | ) |
115 | 106 | parser_apply.add_argument( |
116 | 107 | "-o", |
117 | 108 | "--only", |
118 | default="", | |
109 | default=[], | |
119 | 110 | dest='autoonly', |
120 | help=_( | |
121 | "e.g. 'file:/foo,tag:foo,bundle:bar' " | |
122 | "to skip EVERYTHING BUT all instances of file:/foo " | |
123 | "and items with tag 'foo', " | |
124 | "or in bundle 'bar', " | |
125 | "or a dependency of any of these" | |
126 | ), | |
111 | help=_("""skip all items not matching any SELECTOR: | |
112 | ||
113 | file:/my_path # this specific item | |
114 | tag:my_tag # items with this tag | |
115 | bundle:my_bundle # items in this bundle | |
116 | ||
117 | dependencies of selected items will NOT be skipped | |
118 | """), | |
127 | 119 | metavar=_("SELECTOR"), |
120 | nargs='+', | |
128 | 121 | type=str, |
129 | 122 | ) |
130 | 123 | bw_apply_p_default = int(environ.get("BW_NODE_WORKERS", "4")) |
150 | 143 | parser_apply.add_argument( |
151 | 144 | "-s", |
152 | 145 | "--skip", |
153 | default="", | |
146 | default=[], | |
154 | 147 | dest='autoskip', |
155 | help=_( | |
156 | "e.g. 'file:/foo,tag:foo,bundle:bar' " | |
157 | "to skip all instances of file:/foo " | |
158 | "and items with tag 'foo', " | |
159 | "or in bundle 'bar'" | |
160 | ), | |
148 | help=_("""skip items matching any SELECTOR: | |
149 | ||
150 | file:/my_path # this specific item | |
151 | tag:my_tag # items with this tag | |
152 | bundle:my_bundle # items in this bundle | |
153 | """), | |
161 | 154 | metavar=_("SELECTOR"), |
155 | nargs='+', | |
162 | 156 | type=str, |
163 | 157 | ) |
164 | 158 | parser_apply.add_argument( |
208 | 202 | |
209 | 203 | # bw diff |
210 | 204 | help_diff = _("Show differences between nodes") |
211 | parser_diff = subparsers.add_parser("diff", description=help_diff, help=help_diff) | |
205 | parser_diff = subparsers.add_parser( | |
206 | "diff", | |
207 | description=help_diff, | |
208 | help=help_diff, | |
209 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
210 | ) | |
212 | 211 | parser_diff.set_defaults(func=bw_diff) |
213 | 212 | parser_diff.add_argument( |
214 | 213 | "-b", |
267 | 266 | help=_("compare metadata instead of configuration"), |
268 | 267 | ) |
269 | 268 | parser_diff.add_argument( |
270 | 'target', | |
271 | metavar=_("TARGETS"), | |
269 | 'targets', | |
270 | metavar=_("TARGET"), | |
271 | nargs='+', | |
272 | 272 | type=str, |
273 | 273 | help=HELP_get_target_nodes, |
274 | 274 | ) |
278 | 278 | parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups) |
279 | 279 | parser_groups.set_defaults(func=bw_groups) |
280 | 280 | parser_groups.add_argument( |
281 | "-a", "--attrs", | |
282 | dest='attrs', | |
283 | metavar=_("ATTR"), | |
284 | nargs='+', | |
285 | type=str, | |
286 | help=_("show table with the given attributes for each group " | |
287 | "(e.g. 'all', 'members', 'os', ...)"), | |
288 | ) | |
289 | parser_groups.add_argument( | |
281 | 290 | "-i", |
282 | 291 | "--inline", |
283 | 292 | action='store_true', |
287 | 296 | parser_groups.add_argument( |
288 | 297 | 'groups', |
289 | 298 | default=None, |
290 | metavar=_("GROUP1,GROUP2..."), | |
291 | nargs='?', | |
292 | type=str, | |
293 | help=_("show the given groups and their subgroups"), | |
294 | ) | |
295 | parser_groups.add_argument( | |
296 | 'attrs', | |
297 | default=None, | |
298 | metavar=_("ATTR1,ATTR2..."), | |
299 | nargs='?', | |
300 | type=str, | |
301 | help=_("show table with the given attributes for each group " | |
302 | "(e.g. 'all', 'members', 'os', ...)"), | |
299 | metavar=_("GROUP"), | |
300 | nargs='*', | |
301 | type=str, | |
302 | help=_("show the given groups (and their subgroups, unless --attrs is used)"), | |
303 | 303 | ) |
304 | 304 | |
305 | 305 | # bw hash |
402 | 402 | |
403 | 403 | # bw lock |
404 | 404 | help_lock = _("Manage locks on nodes used to prevent collisions between BundleWrap users") |
405 | parser_lock = subparsers.add_parser("lock", description=help_lock, help=help_lock) | |
405 | parser_lock = subparsers.add_parser( | |
406 | "lock", | |
407 | description=help_lock, | |
408 | help=help_lock, | |
409 | ) | |
406 | 410 | parser_lock_subparsers = parser_lock.add_subparsers() |
407 | 411 | |
408 | 412 | # bw lock add |
411 | 415 | "add", |
412 | 416 | description=help_lock_add, |
413 | 417 | help=help_lock_add, |
418 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
414 | 419 | ) |
415 | 420 | parser_lock_add.set_defaults(func=bw_lock_add) |
416 | 421 | parser_lock_add.add_argument( |
417 | 'target', | |
418 | metavar=_("TARGETS"), | |
422 | 'targets', | |
423 | metavar=_("TARGET"), | |
424 | nargs='+', | |
419 | 425 | type=str, |
420 | 426 | help=HELP_get_target_nodes, |
421 | 427 | ) |
440 | 446 | parser_lock_add.add_argument( |
441 | 447 | "-i", |
442 | 448 | "--items", |
443 | default="*", | |
449 | default=["*"], | |
444 | 450 | dest='items', |
445 | help=_("comma-separated list of item selectors the lock applies to " | |
446 | "(defaults to \"*\" meaning all)"), | |
451 | help=_("""lock only items matching any SELECTOR: | |
452 | ||
453 | file:/my_path # this specific item | |
454 | tag:my_tag # items with this tag | |
455 | bundle:my_bundle # items in this bundle | |
456 | """), | |
457 | metavar=_("SELECTOR"), | |
458 | nargs='+', | |
447 | 459 | type=str, |
448 | 460 | ) |
449 | 461 | bw_lock_add_p_default = int(environ.get("BW_NODE_WORKERS", "4")) |
463 | 475 | "remove", |
464 | 476 | description=help_lock_remove, |
465 | 477 | help=help_lock_remove, |
478 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
466 | 479 | ) |
467 | 480 | parser_lock_remove.set_defaults(func=bw_lock_remove) |
468 | 481 | parser_lock_remove.add_argument( |
469 | 'target', | |
470 | metavar=_("TARGETS"), | |
482 | 'targets', | |
483 | metavar=_("TARGET"), | |
484 | nargs='+', | |
471 | 485 | type=str, |
472 | 486 | help=HELP_get_target_nodes, |
473 | 487 | ) |
494 | 508 | "show", |
495 | 509 | description=help_lock_show, |
496 | 510 | help=help_lock_show, |
511 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
497 | 512 | ) |
498 | 513 | parser_lock_show.set_defaults(func=bw_lock_show) |
499 | 514 | parser_lock_show.add_argument( |
500 | 'target', | |
515 | 'targets', | |
501 | 516 | metavar=_("TARGETS"), |
517 | nargs='+', | |
502 | 518 | type=str, |
503 | 519 | help=HELP_get_target_nodes, |
504 | 520 | ) |
514 | 530 | ) |
515 | 531 | |
516 | 532 | # bw metadata |
517 | help_metadata = ("View a JSON representation of a node's metadata") | |
533 | help_metadata = ("View a JSON representation of a node's metadata (defaults blue, reactors green, groups yellow, node red) or a table of selected metadata keys from multiple nodes") | |
518 | 534 | parser_metadata = subparsers.add_parser( |
519 | 535 | "metadata", |
520 | 536 | description=help_metadata, |
521 | 537 | help=help_metadata, |
538 | formatter_class=RawTextHelpFormatter, | |
522 | 539 | ) |
523 | 540 | parser_metadata.set_defaults(func=bw_metadata) |
524 | 541 | parser_metadata.add_argument( |
525 | 'target', | |
526 | metavar=_("NODE"), | |
527 | type=str, | |
528 | help=_("node to print JSON-formatted metadata for"), | |
542 | 'targets', | |
543 | metavar=_("TARGET"), | |
544 | nargs='+', | |
545 | type=str, | |
546 | help=HELP_get_target_nodes, | |
529 | 547 | ) |
530 | 548 | parser_metadata.add_argument( |
531 | 'keys', | |
549 | "-k", "--keys", | |
532 | 550 | default=[], |
551 | dest='keys', | |
533 | 552 | metavar=_("KEY"), |
534 | 553 | nargs='*', |
535 | 554 | type=str, |
536 | help=_("print only partial metadata from the given space-separated key path (e.g. `bw metadata mynode users jdoe` to show `mynode.metadata['users']['jdoe']`)"), | |
555 | help=_("show only partial metadata from the given key paths (e.g. `bw metadata mynode -k users/jdoe` to show `mynode.metadata['users']['jdoe']`)"), | |
537 | 556 | ) |
538 | 557 | parser_metadata.add_argument( |
539 | "--blame", | |
558 | "-b", "--blame", | |
540 | 559 | action='store_true', |
541 | 560 | dest='blame', |
542 | 561 | help=_("show where each piece of metadata comes from"), |
543 | 562 | ) |
544 | 563 | parser_metadata.add_argument( |
545 | "-t", | |
546 | "--table", | |
547 | action='store_true', | |
548 | dest='table', | |
549 | help=_( | |
550 | "show a table of selected metadata values from multiple nodes instead; " | |
551 | "allows for multiple comma-separated paths in KEY; " | |
552 | "allows for node selectors in NODE (e.g. 'NODE1,NODE2,GROUP1,bundle:BUNDLE1...')" | |
553 | ), | |
564 | "-D", "--hide-defaults", | |
565 | action='store_true', | |
566 | dest='hide_defaults', | |
567 | help=_("hide values set by defaults in metadata.py"), | |
568 | ) | |
569 | parser_metadata.add_argument( | |
570 | "-G", "--hide-groups", | |
571 | action='store_true', | |
572 | dest='hide_groups', | |
573 | help=_("hide values set in groups.py"), | |
574 | ) | |
575 | parser_metadata.add_argument( | |
576 | "-N", "--hide-node", | |
577 | action='store_true', | |
578 | dest='hide_node', | |
579 | help=_("hide values set in nodes.py"), | |
580 | ) | |
581 | parser_metadata.add_argument( | |
582 | "-R", "--hide-reactors", | |
583 | action='store_true', | |
584 | dest='hide_reactors', | |
585 | help=_("hide values set by reactors in metadata.py"), | |
554 | 586 | ) |
555 | 587 | |
556 | 588 | # bw nodes |
557 | 589 | help_nodes = _("List nodes in this repository") |
558 | parser_nodes = subparsers.add_parser("nodes", description=help_nodes, help=help_nodes) | |
590 | parser_nodes = subparsers.add_parser( | |
591 | "nodes", | |
592 | description=help_nodes, | |
593 | help=help_nodes, | |
594 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
595 | ) | |
559 | 596 | parser_nodes.set_defaults(func=bw_nodes) |
560 | 597 | parser_nodes.add_argument( |
561 | 598 | "-i", |
565 | 602 | help=_("keep lists on a single line (for grep)"), |
566 | 603 | ) |
567 | 604 | parser_nodes.add_argument( |
568 | 'target', | |
569 | default=None, | |
570 | metavar=_("TARGETS"), | |
571 | nargs='?', | |
605 | 'targets', | |
606 | default=None, | |
607 | metavar=_("TARGET"), | |
608 | nargs='*', | |
572 | 609 | type=str, |
573 | 610 | help=HELP_get_target_nodes, |
574 | 611 | ) |
575 | 612 | parser_nodes.add_argument( |
576 | 'attrs', | |
577 | default=None, | |
578 | metavar=_("ATTR1,ATTR2..."), | |
579 | nargs='?', | |
613 | "-a", | |
614 | "--attrs", | |
615 | default=None, | |
616 | dest='attrs', | |
617 | metavar=_("ATTR"), | |
618 | nargs='+', | |
580 | 619 | type=str, |
581 | 620 | help=_("show table with the given attributes for each node " |
582 | 621 | "(e.g. 'all', 'groups', 'bundles', 'hostname', 'os', ...)"), |
656 | 695 | dest='depends_reverse', |
657 | 696 | help=_("do not show reverse dependencies ('needed_by')"), |
658 | 697 | ) |
659 | parser_plot_subparsers_node.add_argument( | |
660 | "--no-depends-static", | |
661 | action='store_false', | |
662 | dest='depends_static', | |
663 | help=_("do not show static dependencies"), | |
664 | ) | |
665 | 698 | |
666 | 699 | # bw plot groups-for-node |
667 | 700 | help_plot_node_groups = _("Show where a specific node gets its groups from") |
702 | 735 | parser_repo_subparsers_create = parser_repo_subparsers.add_parser("create") |
703 | 736 | parser_repo_subparsers_create.set_defaults(func=bw_repo_create) |
704 | 737 | |
705 | # bw repo plugin | |
706 | parser_repo_subparsers_plugin = parser_repo_subparsers.add_parser("plugin") | |
707 | parser_repo_subparsers_plugin_subparsers = parser_repo_subparsers_plugin.add_subparsers() | |
708 | ||
709 | # bw repo plugin install | |
710 | parser_repo_subparsers_plugin_install = parser_repo_subparsers_plugin_subparsers.add_parser("install") | |
711 | parser_repo_subparsers_plugin_install.set_defaults(func=bw_repo_plugin_install) | |
712 | parser_repo_subparsers_plugin_install.add_argument( | |
713 | 'plugin', | |
714 | metavar=_("PLUGIN_NAME"), | |
715 | type=str, | |
716 | help=_("name of plugin to install"), | |
717 | ) | |
718 | parser_repo_subparsers_plugin_install.add_argument( | |
719 | "-f", | |
720 | "--force", | |
721 | action='store_true', | |
722 | dest='force', | |
723 | help=_("overwrite existing files when installing"), | |
724 | ) | |
725 | ||
726 | # bw repo plugin list | |
727 | parser_repo_subparsers_plugin_list = parser_repo_subparsers_plugin_subparsers.add_parser("list") | |
728 | parser_repo_subparsers_plugin_list.set_defaults(func=bw_repo_plugin_list) | |
729 | ||
730 | # bw repo plugin remove | |
731 | parser_repo_subparsers_plugin_remove = parser_repo_subparsers_plugin_subparsers.add_parser("remove") | |
732 | parser_repo_subparsers_plugin_remove.set_defaults(func=bw_repo_plugin_remove) | |
733 | parser_repo_subparsers_plugin_remove.add_argument( | |
734 | 'plugin', | |
735 | metavar=_("PLUGIN_NAME"), | |
736 | type=str, | |
737 | help=_("name of plugin to remove"), | |
738 | ) | |
739 | parser_repo_subparsers_plugin_remove.add_argument( | |
740 | "-f", | |
741 | "--force", | |
742 | action='store_true', | |
743 | dest='force', | |
744 | help=_("remove files even if locally modified"), | |
745 | ) | |
746 | ||
747 | # bw repo plugin search | |
748 | parser_repo_subparsers_plugin_search = parser_repo_subparsers_plugin_subparsers.add_parser("search") | |
749 | parser_repo_subparsers_plugin_search.set_defaults(func=bw_repo_plugin_search) | |
750 | parser_repo_subparsers_plugin_search.add_argument( | |
751 | 'term', | |
752 | metavar=_("SEARCH_STRING"), | |
753 | nargs='?', | |
754 | type=str, | |
755 | help=_("look for this string in plugin names and descriptions"), | |
756 | ) | |
757 | ||
758 | # bw repo plugin update | |
759 | parser_repo_subparsers_plugin_update = parser_repo_subparsers_plugin_subparsers.add_parser("update") | |
760 | parser_repo_subparsers_plugin_update.set_defaults(func=bw_repo_plugin_update) | |
761 | parser_repo_subparsers_plugin_update.add_argument( | |
762 | 'plugin', | |
763 | default=None, | |
764 | metavar=_("PLUGIN_NAME"), | |
765 | nargs='?', | |
766 | type=str, | |
767 | help=_("name of plugin to update"), | |
768 | ) | |
769 | parser_repo_subparsers_plugin_update.add_argument( | |
770 | "-c", | |
771 | "--check-only", | |
772 | action='store_true', | |
773 | dest='check_only', | |
774 | help=_("only show what would be updated"), | |
775 | ) | |
776 | parser_repo_subparsers_plugin_update.add_argument( | |
777 | "-f", | |
778 | "--force", | |
779 | action='store_true', | |
780 | dest='force', | |
781 | help=_("overwrite local modifications when updating"), | |
782 | ) | |
783 | ||
784 | 738 | # bw run |
785 | 739 | help_run = _("Run a one-off command on a number of nodes") |
786 | parser_run = subparsers.add_parser("run", description=help_run, help=help_run) | |
740 | parser_run = subparsers.add_parser( | |
741 | "run", | |
742 | description=help_run, | |
743 | help=help_run, | |
744 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
745 | ) | |
787 | 746 | parser_run.set_defaults(func=bw_run) |
788 | 747 | parser_run.add_argument( |
789 | 'target', | |
790 | metavar=_("TARGETS"), | |
748 | 'targets', | |
749 | metavar=_("TARGET"), | |
750 | nargs='+', | |
791 | 751 | type=str, |
792 | 752 | help=HELP_get_target_nodes, |
793 | 753 | ) |
853 | 813 | "change in future releases). Currently, the default is -IJKM " |
854 | 814 | "if specific nodes are given and -HIJKMS if testing the " |
855 | 815 | "entire repo.") |
856 | parser_test = subparsers.add_parser("test", description=help_test, help=help_test) | |
816 | parser_test = subparsers.add_parser( | |
817 | "test", | |
818 | description=help_test, | |
819 | help=help_test, | |
820 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
821 | ) | |
857 | 822 | parser_test.set_defaults(func=bw_test) |
858 | 823 | parser_test.add_argument( |
859 | 'target', | |
860 | default=None, | |
861 | metavar=_("TARGETS"), | |
862 | nargs='?', | |
863 | type=str, | |
864 | help=HELP_get_target_nodes + _(" (defaults to all)"), | |
865 | ) | |
866 | parser_test.add_argument( | |
867 | "-c", | |
868 | "--plugin-conflicts", | |
869 | action='store_true', | |
870 | dest='plugin_conflicts', | |
871 | help=_("check for local modifications to files installed by plugins"), | |
824 | 'targets', | |
825 | default=None, | |
826 | metavar=_("TARGET"), | |
827 | nargs='*', | |
828 | type=str, | |
829 | help=HELP_get_target_nodes + _("\n(defaults to all)"), | |
872 | 830 | ) |
873 | 831 | parser_test.add_argument( |
874 | 832 | "-d", |
934 | 892 | ) |
935 | 893 | parser_test.add_argument( |
936 | 894 | "-M", |
937 | "--metadata-collisions", | |
938 | action='store_true', | |
939 | dest='metadata_collisions', | |
940 | help=_("check for conflicting metadata keys in group metadata"), | |
895 | "--metadata-conflicts", | |
896 | action='store_true', | |
897 | dest='metadata_conflicts', | |
898 | help=_("check for conflicting metadata keys in group metadata, reactors, and defaults"), | |
941 | 899 | ) |
942 | 900 | parser_test.add_argument( |
943 | 901 | "-o", |
947 | 905 | help=_("check for bundles not assigned to any node"), |
948 | 906 | ) |
949 | 907 | parser_test.add_argument( |
950 | "-s", | |
951 | "--secret-rotation", | |
952 | default=None, | |
953 | dest='ignore_secret_identifiers', | |
954 | help=_("ensure every string passed to repo.vault.[human_]password_for() is used at least " | |
955 | "twice (using it only once means you're probably managing only one end of an " | |
956 | "authentication, making it dangerous to rotate your .secrets.cfg); PATTERNS is a " | |
957 | "comma-separated list of regex patterns for strings to ignore in this check " | |
958 | "(just pass an empty string if you don't need to ignore anything)"), | |
959 | metavar="PATTERNS", | |
960 | type=str, | |
961 | ) | |
962 | parser_test.add_argument( | |
963 | 908 | "-S", |
964 | 909 | "--subgroup-loops", |
965 | 910 | action='store_true', |
969 | 914 | |
970 | 915 | # bw verify |
971 | 916 | help_verify = _("Inspect the health or 'correctness' of a node without changing it") |
972 | parser_verify = subparsers.add_parser("verify", description=help_verify, help=help_verify) | |
917 | parser_verify = subparsers.add_parser( | |
918 | "verify", | |
919 | description=help_verify, | |
920 | help=help_verify, | |
921 | formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes | |
922 | ) | |
973 | 923 | parser_verify.set_defaults(func=bw_verify) |
974 | 924 | parser_verify.add_argument( |
975 | 'target', | |
976 | metavar=_("TARGETS"), | |
925 | 'targets', | |
926 | metavar=_("TARGET"), | |
927 | nargs='+', | |
977 | 928 | type=str, |
978 | 929 | help=HELP_get_target_nodes, |
979 | 930 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from ..deps import prepare_dependencies |
4 | 1 | from ..utils.plot import graph_for_items, plot_group, plot_node_groups |
5 | 2 | from ..utils.cmdline import get_group, get_node |
25 | 22 | |
26 | 23 | |
27 | 24 | def bw_plot_node(repo, args): |
28 | node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) | |
25 | node = get_node(repo, args['node']) | |
29 | 26 | for line in graph_for_items( |
30 | 27 | node.name, |
31 | 28 | prepare_dependencies(node.items, node.os, node.os_version), |
32 | 29 | cluster=args['cluster'], |
33 | 30 | concurrency=args['depends_concurrency'], |
34 | static=args['depends_static'], | |
35 | 31 | regular=args['depends_regular'], |
36 | 32 | reverse=args['depends_reverse'], |
37 | 33 | auto=args['depends_auto'], |
40 | 36 | |
41 | 37 | |
42 | 38 | def bw_plot_node_groups(repo, args): |
43 | node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) | |
39 | node = get_node(repo, args['node']) | |
44 | 40 | for line in plot_node_groups(node): |
45 | 41 | io.stdout(line) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from sys import exit | |
4 | ||
5 | from ..exceptions import NoSuchPlugin, PluginLocalConflict | |
6 | from ..plugins import PluginManager | |
7 | 0 | from ..repo import Repository |
8 | from ..utils.text import blue, bold, mark_for_translation as _, red | |
9 | from ..utils.ui import io | |
10 | 1 | |
11 | 2 | |
12 | 3 | def bw_repo_bundle_create(repo, args): |
15 | 6 | |
16 | 7 | def bw_repo_create(path, args): |
17 | 8 | Repository.create(path) |
18 | ||
19 | ||
20 | def bw_repo_plugin_install(repo, args): | |
21 | pm = PluginManager(repo.path) | |
22 | try: | |
23 | manifest = pm.install(args['plugin'], force=args['force']) | |
24 | io.stdout(_("{x} Installed '{plugin}' (v{version})").format( | |
25 | x=blue("i"), | |
26 | plugin=args['plugin'], | |
27 | version=manifest['version'], | |
28 | )) | |
29 | if 'help' in manifest: | |
30 | io.stdout("") | |
31 | for line in manifest['help'].split("\n"): | |
32 | io.stdout(line) | |
33 | except NoSuchPlugin: | |
34 | io.stderr(_("{x} No such plugin: {plugin}").format(x=red("!!!"), plugin=args['plugin'])) | |
35 | exit(1) | |
36 | except PluginLocalConflict as e: | |
37 | io.stderr(_("{x} Plugin installation failed: {reason}").format( | |
38 | reason=e.message, | |
39 | x=red("!!!"), | |
40 | )) | |
41 | exit(1) | |
42 | ||
43 | ||
44 | def bw_repo_plugin_list(repo, args): | |
45 | pm = PluginManager(repo.path) | |
46 | for plugin, version in pm.list(): | |
47 | io.stdout(_("{plugin} (v{version})").format(plugin=plugin, version=version)) | |
48 | ||
49 | ||
50 | def bw_repo_plugin_remove(repo, args): | |
51 | pm = PluginManager(repo.path) | |
52 | try: | |
53 | pm.remove(args['plugin'], force=args['force']) | |
54 | except NoSuchPlugin: | |
55 | io.stdout(_("{x} Plugin '{plugin}' is not installed").format( | |
56 | x=red("!!!"), | |
57 | plugin=args['plugin'], | |
58 | )) | |
59 | exit(1) | |
60 | ||
61 | ||
62 | def bw_repo_plugin_search(repo, args): | |
63 | pm = PluginManager(repo.path) | |
64 | for plugin, desc in pm.search(args['term']): | |
65 | io.stdout(_("{plugin} {desc}").format(desc=desc, plugin=bold(plugin))) | |
66 | ||
67 | ||
68 | def bw_repo_plugin_update(repo, args): | |
69 | pm = PluginManager(repo.path) | |
70 | if args['plugin']: | |
71 | old_version, new_version = pm.update( | |
72 | args['plugin'], | |
73 | check_only=args['check_only'], | |
74 | force=args['force'], | |
75 | ) | |
76 | if old_version != new_version: | |
77 | io.stdout(_("{plugin} {old_version} → {new_version}").format( | |
78 | new_version=new_version, | |
79 | old_version=old_version, | |
80 | plugin=bold(args['plugin']), | |
81 | )) | |
82 | else: | |
83 | for plugin, version in pm.list(): | |
84 | old_version, new_version = pm.update( | |
85 | plugin, | |
86 | check_only=args['check_only'], | |
87 | force=args['force'], | |
88 | ) | |
89 | if old_version != new_version: | |
90 | io.stdout(_("{plugin} {old_version} → {new_version}").format( | |
91 | new_version=new_version, | |
92 | old_version=old_version, | |
93 | plugin=bold(plugin), | |
94 | )) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime |
4 | try: | |
5 | from itertools import zip_longest | |
6 | except ImportError: # Python 2 | |
7 | from itertools import izip_longest as zip_longest | |
1 | from itertools import zip_longest | |
8 | 2 | from sys import exit |
9 | 3 | |
10 | 4 | from ..concurrency import WorkerPool |
115 | 109 | |
116 | 110 | def bw_run(repo, args): |
117 | 111 | errors = [] |
118 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
119 | pending_nodes = target_nodes[:] | |
112 | target_nodes = get_target_nodes(repo, args['targets']) | |
113 | pending_nodes = target_nodes.copy() | |
120 | 114 | io.progress_set_total(len(pending_nodes)) |
121 | 115 | |
122 | 116 | repo.hooks.run_start( |
123 | 117 | repo, |
124 | args['target'], | |
118 | args['targets'], | |
125 | 119 | target_nodes, |
126 | 120 | args['command'], |
127 | 121 | ) |
175 | 169 | |
176 | 170 | repo.hooks.run_end( |
177 | 171 | repo, |
178 | args['target'], | |
172 | args['targets'], | |
179 | 173 | target_nodes, |
180 | 174 | args['command'], |
181 | 175 | duration=datetime.now() - start_time, |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from operator import itemgetter |
4 | 1 | |
5 | 2 | from ..utils.table import ROW_SEPARATOR, render_table |
10 | 7 | def bw_stats(repo, args): |
11 | 8 | items = {} |
12 | 9 | metadata_defaults = set() |
13 | metadata_processors = set() | |
14 | 10 | metadata_reactors = set() |
15 | 11 | for node in repo.nodes: |
16 | 12 | for metadata_default_name, metadata_default in node.metadata_defaults: |
17 | 13 | metadata_defaults.add(metadata_default_name) |
18 | # TODO remove this in 4.0 | |
19 | for metadata_processor_name, metadata_processor in node._metadata_processors[2]: | |
20 | metadata_processors.add(metadata_processor_name) | |
21 | 14 | for metadata_reactor_name, metadata_reactor in node.metadata_reactors: |
22 | 15 | metadata_reactors.add(metadata_reactor_name) |
23 | 16 | for item in node.items: |
34 | 27 | [str(len(repo.groups)), _("groups")], |
35 | 28 | [str(len(repo.bundle_names)), _("bundles")], |
36 | 29 | [str(len(metadata_defaults)), _("metadata defaults")], |
37 | [str(len(metadata_processors)), _("metadata processors")], | |
38 | 30 | [str(len(metadata_reactors)), _("metadata reactors")], |
39 | 31 | [str(sum([len(list(node.items)) for node in repo.nodes])), _("items")], |
40 | 32 | ROW_SEPARATOR, |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from copy import copy |
4 | from re import compile as compile_regex | |
5 | 1 | from sys import exit |
6 | 2 | |
7 | 3 | from ..deps import DummyItem |
8 | 4 | from ..exceptions import FaultUnavailable, ItemDependencyLoop |
9 | 5 | from ..itemqueue import ItemTestQueue |
10 | from ..metadata import check_for_unsolvable_metadata_key_conflicts, check_metadata_keys | |
11 | from ..plugins import PluginManager | |
6 | from ..metadata import check_for_metadata_conflicts | |
12 | 7 | from ..repo import Repository |
13 | 8 | from ..utils.cmdline import count_items, get_target_nodes |
14 | 9 | from ..utils.plot import explain_item_dependency_loop |
93 | 88 | )) |
94 | 89 | |
95 | 90 | |
96 | def test_metadata_collisions(node): | |
97 | with io.job(_("{node} checking for metadata collisions").format(node=bold(node.name))): | |
98 | check_for_unsolvable_metadata_key_conflicts(node) | |
99 | io.stdout(_("{x} {node} has no metadata collisions").format( | |
100 | x=green("✓"), | |
101 | node=bold(node.name), | |
102 | )) | |
103 | ||
104 | ||
105 | def test_metadata_keys(node): | |
106 | with io.job(_("{node} checking metadata keys").format(node=bold(node.name))): | |
107 | check_metadata_keys(node) | |
108 | io.stdout(_("{x} {node} has valid metadata keys").format( | |
91 | def test_metadata_conflicts(node): | |
92 | with io.job(_("{node} checking for metadata conflicts").format(node=bold(node.name))): | |
93 | check_for_metadata_conflicts(node) | |
94 | io.stdout(_("{x} {node} has no metadata conflicts").format( | |
109 | 95 | x=green("✓"), |
110 | 96 | node=bold(node.name), |
111 | 97 | )) |
127 | 113 | )) |
128 | 114 | if orphaned_bundles: |
129 | 115 | exit(1) |
130 | ||
131 | ||
132 | def test_secret_identifiers(repo, ignore_patterns): | |
133 | # create a new object to make sure we don't double-count any calls | |
134 | # from previous tests | |
135 | pristine_repo = Repository(repo.path) | |
136 | pristine_repo.hash() # shortest way to create all configuration | |
137 | patterns = set() | |
138 | for raw_pattern in ignore_patterns.split(","): | |
139 | if raw_pattern: | |
140 | patterns.add(compile_regex(raw_pattern)) | |
141 | for identifier, call_count in pristine_repo.vault._call_log.items(): | |
142 | if call_count == 1: | |
143 | for pattern in patterns: | |
144 | if pattern.search(identifier): | |
145 | break | |
146 | else: | |
147 | io.stderr(_( | |
148 | "{x} identifier passed only once to repo.vault.[human_]password_for(): {i}" | |
149 | ).format( | |
150 | i=bold(identifier), | |
151 | x=red("✘"), | |
152 | )) | |
153 | exit(1) | |
154 | io.stdout(_( | |
155 | "{x} all arguments to repo.vault.[human_]password_for() used at least twice" | |
156 | ).format(x=green("✓"))) | |
157 | 116 | |
158 | 117 | |
159 | 118 | def test_empty_groups(repo): |
170 | 129 | )) |
171 | 130 | if empty_groups: |
172 | 131 | exit(1) |
173 | ||
174 | ||
175 | def test_plugin_conflicts(repo): | |
176 | pm = PluginManager(repo.path) | |
177 | for plugin, version in pm.list(): | |
178 | if QUIT_EVENT.is_set(): | |
179 | break | |
180 | local_changes = pm.local_modifications(plugin) | |
181 | if local_changes: | |
182 | io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format( | |
183 | plugin=plugin, | |
184 | x=red("✘"), | |
185 | )) | |
186 | for path, actual_checksum, should_checksum in local_changes: | |
187 | io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format( | |
188 | actual_checksum=actual_checksum, | |
189 | path=path, | |
190 | should_checksum=should_checksum, | |
191 | )) | |
192 | exit(1) | |
193 | else: | |
194 | io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format( | |
195 | plugin=plugin, | |
196 | x=green("✓"), | |
197 | )) | |
198 | 132 | |
199 | 133 | |
200 | 134 | def test_determinism_config(repo, nodes, iterations): |
285 | 219 | args['determinism_metadata'] > 1 or |
286 | 220 | args['hooks_node'] or |
287 | 221 | args['hooks_repo'] or |
288 | args['ignore_secret_identifiers'] is not None or | |
289 | 222 | args['items'] or |
290 | args['metadata_keys'] or | |
291 | args['metadata_collisions'] or | |
223 | args['metadata_conflicts'] or | |
292 | 224 | args['orphaned_bundles'] or |
293 | 225 | args['empty_groups'] or |
294 | args['plugin_conflicts'] or | |
295 | 226 | args['subgroup_loops'] |
296 | 227 | ) |
297 | if args['target']: | |
298 | nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
228 | if args['targets']: | |
229 | nodes = get_target_nodes(repo, args['targets']) | |
299 | 230 | if not options_selected: |
300 | 231 | args['hooks_node'] = True |
301 | 232 | args['items'] = True |
302 | args['metadata_collisions'] = True | |
233 | args['metadata_conflicts'] = True | |
303 | 234 | args['metadata_keys'] = True |
304 | 235 | else: |
305 | 236 | nodes = copy(list(repo.nodes)) |
307 | 238 | args['hooks_node'] = True |
308 | 239 | args['hooks_repo'] = True |
309 | 240 | args['items'] = True |
310 | args['metadata_collisions'] = True | |
241 | args['metadata_conflicts'] = True | |
311 | 242 | args['metadata_keys'] = True |
312 | 243 | args['subgroup_loops'] = True |
313 | 244 | |
314 | if args['ignore_secret_identifiers'] is not None and not QUIT_EVENT.is_set(): | |
315 | test_secret_identifiers(repo, args['ignore_secret_identifiers']) | |
316 | ||
317 | if args['plugin_conflicts'] and not QUIT_EVENT.is_set(): | |
318 | test_plugin_conflicts(repo) | |
319 | ||
320 | 245 | if args['subgroup_loops'] and not QUIT_EVENT.is_set(): |
321 | 246 | test_subgroup_loops(repo) |
322 | 247 | |
326 | 251 | if args['orphaned_bundles'] and not QUIT_EVENT.is_set(): |
327 | 252 | test_orphaned_bundles(repo) |
328 | 253 | |
329 | if args['metadata_keys'] and not QUIT_EVENT.is_set(): | |
254 | if args['metadata_conflicts'] and not QUIT_EVENT.is_set(): | |
330 | 255 | io.progress_set_total(len(nodes)) |
331 | 256 | for node in nodes: |
332 | 257 | if QUIT_EVENT.is_set(): |
333 | 258 | break |
334 | test_metadata_keys(node) | |
335 | io.progress_advance() | |
336 | io.progress_set_total(0) | |
337 | ||
338 | if args['metadata_collisions'] and not QUIT_EVENT.is_set(): | |
339 | io.progress_set_total(len(nodes)) | |
340 | for node in nodes: | |
341 | if QUIT_EVENT.is_set(): | |
342 | break | |
343 | test_metadata_collisions(node) | |
259 | test_metadata_conflicts(node) | |
344 | 260 | io.progress_advance() |
345 | 261 | io.progress_set_total(0) |
346 | 262 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime |
4 | 1 | from sys import exit |
5 | 2 | |
115 | 112 | def bw_verify(repo, args): |
116 | 113 | errors = [] |
117 | 114 | node_stats = {} |
118 | pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
115 | pending_nodes = get_target_nodes(repo, args['targets']) | |
119 | 116 | start_time = datetime.now() |
120 | 117 | io.progress_set_total(count_items(pending_nodes)) |
121 | 118 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from ..utils.text import mark_for_translation as _ |
4 | 1 | from ..utils.ui import io |
5 | 2 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED |
4 | 1 | from datetime import datetime |
5 | 2 | from random import randint |
12 | 9 | JOIN_TIMEOUT = 5 # seconds |
13 | 10 | |
14 | 11 | |
15 | class WorkerPool(object): | |
12 | class WorkerPool: | |
16 | 13 | """ |
17 | 14 | Manages a bunch of worker threads. |
18 | 15 | """ |
48 | 45 | io.debug(_("worker pool {pool} waiting for next task to complete").format( |
49 | 46 | pool=self.pool_id, |
50 | 47 | )) |
51 | while True: | |
52 | # we must use a timeout here to allow Python <3.3 to call | |
53 | # its SIGINT handler | |
54 | # see also http://stackoverflow.com/q/25676835 | |
55 | completed, pending = wait( | |
56 | self.pending_futures.keys(), | |
57 | return_when=FIRST_COMPLETED, | |
58 | timeout=0.1, | |
59 | ) | |
60 | if completed: | |
61 | break | |
48 | completed, pending = wait( | |
49 | self.pending_futures.keys(), | |
50 | return_when=FIRST_COMPLETED, | |
51 | ) | |
62 | 52 | future = completed.pop() |
63 | 53 | |
64 | 54 | start_time = self.pending_futures[future]['start_time'] |
78 | 68 | task=task_id, |
79 | 69 | worker=worker_id, |
80 | 70 | )) |
81 | if not hasattr(exception, '__traceback__'): # Python 2 | |
82 | exception.__traceback__ = future.exception_info()[1] | |
83 | 71 | exception.__task_id = task_id |
84 | 72 | raise exception |
85 | 73 | else: |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from .exceptions import BundleError, ItemDependencyError, NoSuchItem |
4 | 1 | from .items import Item |
5 | 2 | from .items.actions import Action |
7 | 4 | from .utils.ui import io |
8 | 5 | |
9 | 6 | |
10 | class DummyItem(object): | |
7 | class DummyItem: | |
11 | 8 | bundle = None |
12 | 9 | triggered = False |
13 | 10 | |
140 | 137 | """ |
141 | 138 | item._flattened_deps = set(item._deps) |
142 | 139 | |
143 | for dep in item._deps: | |
140 | for dep in item._deps.copy(): | |
144 | 141 | try: |
145 | 142 | dep_item = items[dep] |
146 | 143 | except KeyError: |
147 | raise ItemDependencyError(_( | |
148 | "'{item}' in bundle '{bundle}' has a dependency (needs) " | |
149 | "on '{dep}', which doesn't exist" | |
150 | ).format( | |
151 | item=item.id, | |
152 | bundle=item.bundle.name, | |
153 | dep=dep, | |
154 | )) | |
144 | if dep.startswith("tag:"): | |
145 | # sometimes it is useful to be able to depend on a tag | |
146 | # without having to make sure it actually exists | |
147 | item._deps.remove(dep) | |
148 | continue | |
149 | else: | |
150 | raise ItemDependencyError(_( | |
151 | "'{item}' in bundle '{bundle}' has a dependency (needs) " | |
152 | "on '{dep}', which doesn't exist" | |
153 | ).format( | |
154 | item=item.id, | |
155 | bundle=item.bundle.name, | |
156 | dep=dep, | |
157 | )) | |
155 | 158 | # Don't recurse if we have already resolved nested dependencies |
156 | 159 | # for this item. Also serves as a guard against infinite |
157 | 160 | # recursion when there are loops. |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from sys import version_info | |
4 | ||
5 | ||
6 | class UnicodeException(Exception): | |
7 | def __init__(self, msg=""): | |
8 | if version_info >= (3, 0): | |
9 | super(UnicodeException, self).__init__(msg) | |
10 | else: | |
11 | super(UnicodeException, self).__init__(msg.encode('utf-8')) | |
12 | ||
13 | ||
14 | class ActionFailure(UnicodeException): | |
0 | class ActionFailure(Exception): | |
15 | 1 | """ |
16 | 2 | Raised when an action failes to meet the expected rcode/output. |
17 | 3 | """ |
27 | 13 | self.obj = obj |
28 | 14 | |
29 | 15 | |
30 | class FaultUnavailable(UnicodeException): | |
16 | class FaultUnavailable(Exception): | |
31 | 17 | """ |
32 | 18 | Raised when a Fault object cannot be resolved. |
33 | 19 | """ |
34 | 20 | pass |
35 | 21 | |
36 | 22 | |
37 | class GracefulApplyException(UnicodeException): | |
23 | class GracefulApplyException(Exception): | |
38 | 24 | """ |
39 | 25 | Raised when a problem has been encountered in `bw apply`, but a more |
40 | 26 | verbose error has already been printed. |
42 | 28 | pass |
43 | 29 | |
44 | 30 | |
45 | class NoSuchBundle(UnicodeException): | |
31 | class NoSuchBundle(Exception): | |
46 | 32 | """ |
47 | 33 | Raised when a bundle of unknown name is requested. |
48 | 34 | """ |
49 | 35 | pass |
50 | 36 | |
51 | 37 | |
52 | class NoSuchGroup(UnicodeException): | |
38 | class NoSuchGroup(Exception): | |
53 | 39 | """ |
54 | 40 | Raised when a group of unknown name is requested. |
55 | 41 | """ |
56 | 42 | pass |
57 | 43 | |
58 | 44 | |
59 | class NoSuchItem(UnicodeException): | |
45 | class NoSuchItem(Exception): | |
60 | 46 | """ |
61 | 47 | Raised when an item of unknown name is requested. |
62 | 48 | """ |
63 | 49 | pass |
64 | 50 | |
65 | 51 | |
66 | class NoSuchNode(UnicodeException): | |
52 | class NoSuchNode(Exception): | |
67 | 53 | """ |
68 | 54 | Raised when a node of unknown name is requested. |
69 | 55 | """ |
70 | 56 | pass |
71 | 57 | |
72 | 58 | |
73 | class NoSuchPlugin(UnicodeException): | |
74 | """ | |
75 | Raised when a plugin of unknown name is requested. | |
76 | """ | |
77 | pass | |
78 | ||
79 | ||
80 | class RemoteException(UnicodeException): | |
59 | class RemoteException(Exception): | |
81 | 60 | """ |
82 | 61 | Raised when a shell command on a node fails. |
83 | 62 | """ |
84 | 63 | pass |
85 | 64 | |
86 | 65 | |
87 | class RepositoryError(UnicodeException): | |
66 | class RepositoryError(Exception): | |
88 | 67 | """ |
89 | 68 | Indicates that somethings is wrong with the current repository. |
90 | 69 | """ |
134 | 113 | pass |
135 | 114 | |
136 | 115 | |
137 | class PluginError(RepositoryError): | |
138 | """ | |
139 | Indicates an error related to a plugin. | |
140 | """ | |
141 | pass | |
142 | ||
143 | ||
144 | class PluginLocalConflict(PluginError): | |
145 | """ | |
146 | Raised when a plugin tries to overwrite locally-modified files. | |
147 | """ | |
148 | pass | |
149 | ||
150 | ||
151 | class SkipNode(UnicodeException): | |
116 | class SkipNode(Exception): | |
152 | 117 | """ |
153 | 118 | Can be raised by hooks to skip a node. |
154 | 119 | """ |
162 | 127 | pass |
163 | 128 | |
164 | 129 | |
165 | class UsageException(UnicodeException): | |
130 | class UsageException(Exception): | |
166 | 131 | """ |
167 | 132 | Raised when command line options don't make sense. |
168 | 133 | """ |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
0 | from os import mkdir | |
1 | from os.path import exists, join | |
3 | 2 | import re |
4 | 3 | |
4 | from tomlkit import dumps as toml_dump, parse as toml_parse | |
5 | ||
5 | 6 | from .exceptions import NoSuchGroup, NoSuchNode, RepositoryError |
6 | from .utils import cached_property, names | |
7 | from .utils.dicts import hash_statedict | |
8 | from .utils.text import mark_for_translation as _, validate_name | |
7 | from .utils import cached_property, error_context, get_file_contents, names | |
8 | from .utils.dicts import ( | |
9 | dict_to_toml, | |
10 | hash_statedict, | |
11 | set_key_at_path, | |
12 | validate_dict, | |
13 | COLLECTION_OF_STRINGS, | |
14 | TUPLE_OF_INTS, | |
15 | ) | |
16 | from .utils.text import mark_for_translation as _, toml_clean, validate_name | |
9 | 17 | |
10 | 18 | |
11 | 19 | GROUP_ATTR_DEFAULTS = { |
13 | 21 | 'cmd_wrapper_outer': "sudo sh -c {}", |
14 | 22 | 'dummy': False, |
15 | 23 | 'kubectl_context': None, |
24 | 'locking_node': None, | |
16 | 25 | 'os': 'linux', |
17 | 26 | # Setting os_version to 0 by default will probably yield less |
18 | 27 | # surprises than setting it to max_int. Users will probably |
32 | 41 | 'use_shadow_passwords': True, |
33 | 42 | } |
34 | 43 | |
44 | GROUP_ATTR_TYPES = { | |
45 | 'bundles': COLLECTION_OF_STRINGS, | |
46 | 'cmd_wrapper_inner': str, | |
47 | 'cmd_wrapper_outer': str, | |
48 | 'dummy': bool, | |
49 | 'file_path': str, | |
50 | 'kubectl_context': (str, type(None)), | |
51 | 'locking_node': (str, type(None)), | |
52 | 'member_patterns': COLLECTION_OF_STRINGS, | |
53 | 'members': COLLECTION_OF_STRINGS, | |
54 | 'metadata': dict, | |
55 | 'os': str, | |
56 | 'os_version': TUPLE_OF_INTS, | |
57 | 'subgroups': COLLECTION_OF_STRINGS, | |
58 | 'subgroup_patterns': COLLECTION_OF_STRINGS, | |
59 | 'use_shadow_passwords': bool, | |
60 | } | |
61 | ||
35 | 62 | |
36 | 63 | def _build_error_chain(loop_node, last_node, nodes_in_between): |
37 | 64 | """ |
53 | 80 | return error_chain |
54 | 81 | |
55 | 82 | |
56 | class Group(object): | |
83 | class Group: | |
57 | 84 | """ |
58 | 85 | A group of nodes. |
59 | 86 | """ |
60 | def __init__(self, group_name, infodict=None): | |
61 | if infodict is None: | |
62 | infodict = {} | |
87 | def __init__(self, group_name, attributes=None): | |
88 | if attributes is None: | |
89 | attributes = {} | |
63 | 90 | |
64 | 91 | if not validate_name(group_name): |
65 | 92 | raise RepositoryError(_("'{}' is not a valid group name.").format(group_name)) |
66 | 93 | |
94 | with error_context(group_name=group_name): | |
95 | validate_dict(attributes, GROUP_ATTR_TYPES) | |
96 | ||
97 | self._attributes = attributes | |
98 | self._immediate_subgroup_patterns = { | |
99 | re.compile(pattern) for pattern in | |
100 | set(attributes.get('subgroup_patterns', set())) | |
101 | } | |
102 | self._member_patterns = { | |
103 | re.compile(pattern) for pattern in | |
104 | set(attributes.get('member_patterns', set())) | |
105 | } | |
67 | 106 | self.name = group_name |
68 | self.bundle_names = infodict.get('bundles', []) | |
69 | self.immediate_subgroup_names = infodict.get('subgroups', []) | |
70 | self.immediate_subgroup_patterns = infodict.get('subgroup_patterns', []) | |
71 | self.members_add = infodict.get('members_add', None) | |
72 | self.members_remove = infodict.get('members_remove', None) | |
73 | self.metadata = infodict.get('metadata', {}) | |
74 | self.node_patterns = infodict.get('member_patterns', []) | |
75 | self.static_member_names = infodict.get('members', []) | |
107 | self.file_path = attributes.get('file_path') | |
76 | 108 | |
77 | 109 | for attr in GROUP_ATTR_DEFAULTS: |
78 | 110 | # defaults are applied in node.py |
79 | setattr(self, attr, infodict.get(attr)) | |
111 | setattr(self, attr, attributes.get(attr)) | |
80 | 112 | |
81 | 113 | def __lt__(self, other): |
82 | 114 | return self.name < other.name |
113 | 145 | yield node |
114 | 146 | |
115 | 147 | @cached_property |
116 | def _static_nodes(self): | |
117 | result = set() | |
118 | result.update(self._nodes_from_members) | |
119 | result.update(self._nodes_from_patterns) | |
120 | return result | |
121 | ||
122 | @property | |
123 | def _subgroup_names_from_patterns(self): | |
124 | for pattern in self.immediate_subgroup_patterns: | |
125 | compiled_pattern = re.compile(pattern) | |
126 | for group in self.repo.groups: | |
127 | if compiled_pattern.search(group.name) is not None and group != self: | |
128 | yield group.name | |
129 | ||
130 | @property | |
131 | 148 | def _nodes_from_members(self): |
132 | for node_name in self.static_member_names: | |
149 | for node_name in self._attributes.get('members', set()): | |
133 | 150 | try: |
134 | 151 | yield self.repo.get_node(node_name) |
135 | 152 | except NoSuchNode: |
142 | 159 | )) |
143 | 160 | |
144 | 161 | @property |
145 | def _nodes_from_patterns(self): | |
146 | for pattern in self.node_patterns: | |
147 | compiled_pattern = re.compile(pattern) | |
148 | for node in self.repo.nodes: | |
149 | if not compiled_pattern.search(node.name) is None: | |
150 | yield node | |
162 | def _subgroup_names_from_patterns(self): | |
163 | for pattern in self._immediate_subgroup_patterns: | |
164 | for group in self.repo.groups: | |
165 | if pattern.search(group.name) is not None and group != self: | |
166 | yield group.name | |
151 | 167 | |
152 | 168 | def _check_subgroup_names(self, visited_names): |
153 | 169 | """ |
154 | 170 | Recursively finds subgroups and checks for loops. |
155 | 171 | """ |
156 | 172 | for name in set( |
157 | list(self.immediate_subgroup_names) + | |
173 | list(self._attributes.get('subgroups', set())) + | |
158 | 174 | list(self._subgroup_names_from_patterns) |
159 | 175 | ): |
160 | 176 | if name not in visited_names: |
209 | 225 | yield self.repo.get_group(group_name) |
210 | 226 | |
211 | 227 | @cached_property |
228 | def toml(self): | |
229 | if not self.file_path or not self.file_path.endswith(".toml"): | |
230 | raise ValueError(_("group {} not in TOML format").format(self.name)) | |
231 | return toml_parse(get_file_contents(self.file_path)) | |
232 | ||
233 | def toml_save(self): | |
234 | try: | |
235 | toml_doc = self.toml | |
236 | except ValueError: | |
237 | attributes = self._attributes.copy() | |
238 | del attributes['file_path'] | |
239 | toml_doc = dict_to_toml(attributes) | |
240 | self.file_path = join(self.repo.path, "groups", self.name + ".toml") | |
241 | if not exists(join(self.repo.path, "groups")): | |
242 | mkdir(join(self.repo.path, "groups")) | |
243 | with open(self.file_path, 'w') as f: | |
244 | f.write(toml_clean(toml_dump(toml_doc))) | |
245 | ||
246 | def toml_set(self, path, value): | |
247 | if not isinstance(path, tuple): | |
248 | path = path.split("/") | |
249 | set_key_at_path(self.toml, path, value) | |
250 | ||
251 | @cached_property | |
212 | 252 | def immediate_subgroups(self): |
213 | 253 | """ |
214 | 254 | Iterator over all immediate subgroups as group objects. |
215 | 255 | """ |
216 | 256 | for group_name in set( |
217 | list(self.immediate_subgroup_names) + | |
257 | list(self._attributes.get('subgroups', set())) + | |
218 | 258 | list(self._subgroup_names_from_patterns) |
219 | 259 | ): |
220 | 260 | try: |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from .deps import ( |
4 | 1 | DummyItem, |
5 | 2 | find_item, |
13 | 10 | from .utils.ui import io |
14 | 11 | |
15 | 12 | |
16 | class BaseQueue(object): | |
13 | class BaseQueue: | |
17 | 14 | def __init__(self, items, node_os, node_os_version): |
18 | 15 | self.items_with_deps = prepare_dependencies(items, node_os, node_os_version) |
19 | 16 | self.items_without_deps = [] |
0 | # -*- coding: utf-8 -*- | |
1 | 0 | """ |
2 | 1 | Note that modules in this package have to use absolute imports because |
3 | 2 | Repository.item_classes loads them as files. |
4 | 3 | """ |
5 | from __future__ import unicode_literals | |
6 | 4 | from copy import copy |
7 | 5 | from datetime import datetime |
8 | 6 | from inspect import cleandoc |
15 | 13 | from bundlewrap.utils.text import force_text, mark_for_translation as _ |
16 | 14 | from bundlewrap.utils.text import blue, bold, italic, wrap_question |
17 | 15 | from bundlewrap.utils.ui import io |
16 | from bundlewrap.operations import run_local | |
18 | 17 | |
19 | 18 | |
20 | 19 | BUILTIN_ITEM_ATTRIBUTES = { |
49 | 48 | return result |
50 | 49 | |
51 | 50 | |
52 | class ItemStatus(object): | |
51 | class ItemStatus: | |
53 | 52 | """ |
54 | 53 | Holds information on a particular Item such as whether it needs |
55 | 54 | fixing and what's broken. |
89 | 88 | return copy |
90 | 89 | |
91 | 90 | |
92 | class Item(object): | |
91 | class Item: | |
93 | 92 | """ |
94 | 93 | A single piece of configuration (e.g. a file, a package, a service). |
95 | 94 | """ |
96 | BLOCK_CONCURRENT = [] | |
97 | 95 | BUNDLE_ATTRIBUTE_NAME = None |
98 | 96 | ITEM_ATTRIBUTES = {} |
99 | 97 | ITEM_TYPE_NAME = None |
150 | 148 | self.name = name |
151 | 149 | self.node = bundle.node |
152 | 150 | self.when_creating = {} |
151 | self._command_results = [] | |
153 | 152 | self._faults_missing_for_attributes = set() |
154 | 153 | self._precedes_items = [] |
155 | 154 | |
349 | 348 | skipped based on the given set of locks. |
350 | 349 | """ |
351 | 350 | for lock in mine: |
352 | for selector in lock['items']: | |
353 | if self.covered_by_autoskip_selector(selector): | |
354 | io.debug(_("{item} on {node} whitelisted by lock {lock}").format( | |
355 | item=self.id, | |
356 | lock=lock['id'], | |
357 | node=self.node.name, | |
358 | )) | |
359 | return False | |
351 | if self.covered_by_autoskip_selector(lock['items']): | |
352 | io.debug(_("{item} on {node} whitelisted by lock {lock}").format( | |
353 | item=self.id, | |
354 | lock=lock['id'], | |
355 | node=self.node.name, | |
356 | )) | |
357 | return False | |
360 | 358 | for lock in others: |
361 | for selector in lock['items']: | |
362 | if self.covered_by_autoskip_selector(selector): | |
363 | io.debug(_("{item} on {node} blacklisted by lock {lock}").format( | |
364 | item=self.id, | |
365 | lock=lock['id'], | |
366 | node=self.node.name, | |
367 | )) | |
368 | return True | |
359 | if self.covered_by_autoskip_selector(lock['items']): | |
360 | io.debug(_("{item} on {node} blacklisted by lock {lock}").format( | |
361 | item=self.id, | |
362 | lock=lock['id'], | |
363 | node=self.node.name, | |
364 | )) | |
365 | return True | |
369 | 366 | return False |
370 | 367 | |
371 | 368 | def _test(self): |
424 | 421 | type=cls.ITEM_TYPE_NAME, |
425 | 422 | )) |
426 | 423 | |
424 | @classmethod | |
427 | 425 | def _validate_required_attributes(cls, bundle, item_id, attributes): |
428 | 426 | missing = [] |
429 | 427 | for attrname in cls.REQUIRED_ATTRIBUTES: |
440 | 438 | |
441 | 439 | def apply( |
442 | 440 | self, |
443 | autoskip_selector="", | |
444 | autoonly_selector="", | |
441 | autoskip_selector=[], | |
442 | autoonly_selector=[], | |
445 | 443 | my_soft_locks=(), |
446 | 444 | other_peoples_soft_locks=(), |
447 | 445 | interactive=False, |
616 | 614 | ) |
617 | 615 | return (status_code, details) |
618 | 616 | |
617 | def run_local(self, command, **kwargs): | |
618 | result = run_local(command, **kwargs) | |
619 | self._command_results.append({ | |
620 | 'command': command, | |
621 | 'result': result, | |
622 | }) | |
623 | return result | |
624 | ||
625 | def run(self, command, **kwargs): | |
626 | result = self.node.run(command, **kwargs) | |
627 | self._command_results.append({ | |
628 | 'command': command, | |
629 | 'result': result, | |
630 | }) | |
631 | return result | |
632 | ||
619 | 633 | def ask(self, status_should, status_actual, relevant_keys): |
620 | 634 | """ |
621 | 635 | Returns a string asking the user if this item should be |
641 | 655 | True if this item should be skipped based on the given selector |
642 | 656 | string (e.g. "tag:foo,bundle:bar"). |
643 | 657 | """ |
644 | components = [c.strip() for c in autoskip_selector.split(",")] | |
658 | components = [c.strip() for c in autoskip_selector] | |
645 | 659 | if ( |
646 | 660 | "*" in components or |
647 | 661 | self.id in components or |
661 | 675 | """ |
662 | 676 | if not autoonly_selector: |
663 | 677 | return True |
664 | components = [c.strip() for c in autoonly_selector.split(",")] | |
678 | components = [c.strip() for c in autoonly_selector] | |
665 | 679 | if ( |
666 | 680 | self.id in components or |
667 | 681 | "bundle:{}".format(self.bundle.name) in components or |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import ActionFailure, BundleError |
182 | 179 | item=self.id, |
183 | 180 | node=bold(self.node.name), |
184 | 181 | )): |
185 | result = self.bundle.node.run( | |
182 | result = super().run( | |
186 | 183 | self.attributes['command'], |
187 | 184 | data_stdin=data_stdin, |
188 | 185 | may_fail=True, |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from collections import defaultdict |
4 | 1 | from os.path import normpath |
5 | from pipes import quote | |
2 | from shlex import quote | |
6 | 3 | |
7 | 4 | from bundlewrap.exceptions import BundleError |
8 | 5 | from bundlewrap.items import Item |
90 | 87 | return |
91 | 88 | |
92 | 89 | for path in status.sdict.get('paths_to_purge', []): |
93 | self.node.run("rm -rf -- {}".format(quote(path))) | |
90 | self.run("rm -rf -- {}".format(quote(path))) | |
94 | 91 | |
95 | 92 | for fix_type in ('mode', 'owner', 'group'): |
96 | 93 | if fix_type in status.keys_to_fix: |
104 | 101 | chmod_command = "chmod {} {}" |
105 | 102 | else: |
106 | 103 | chmod_command = "chmod {} -- {}" |
107 | self.node.run(chmod_command.format( | |
104 | self.run(chmod_command.format( | |
108 | 105 | self.attributes['mode'], |
109 | 106 | quote(self.name), |
110 | 107 | )) |
129 | 126 | # one of the two special bits to be set. |
130 | 127 | if status.sdict is not None and int(status.sdict['mode'], 8) & 0o6000: |
131 | 128 | if not int(self.attributes['mode'], 8) & 0o4000: |
132 | self.node.run("chmod u-s {}".format(quote(self.name))) | |
129 | self.run("chmod u-s {}".format(quote(self.name))) | |
133 | 130 | if not int(self.attributes['mode'], 8) & 0o2000: |
134 | self.node.run("chmod g-s {}".format(quote(self.name))) | |
131 | self.run("chmod g-s {}".format(quote(self.name))) | |
135 | 132 | |
136 | 133 | def _fix_owner(self, status): |
137 | 134 | group = self.attributes['group'] or "" |
141 | 138 | command = "chown {}{} {}" |
142 | 139 | else: |
143 | 140 | command = "chown {}{} -- {}" |
144 | self.node.run(command.format( | |
141 | self.run(command.format( | |
145 | 142 | quote(self.attributes['owner'] or ""), |
146 | 143 | group, |
147 | 144 | quote(self.name), |
149 | 146 | _fix_group = _fix_owner |
150 | 147 | |
151 | 148 | def _fix_type(self, status): |
152 | self.node.run("rm -rf -- {}".format(quote(self.name))) | |
153 | self.node.run("mkdir -p -- {}".format(quote(self.name))) | |
149 | self.run("rm -rf -- {}".format(quote(self.name))) | |
150 | self.run("mkdir -p -- {}".format(quote(self.name))) | |
154 | 151 | if self.attributes['mode']: |
155 | 152 | self._fix_mode(status) |
156 | 153 | if self.attributes['owner'] or self.attributes['group']: |
157 | 154 | self._fix_owner(status) |
158 | 155 | |
159 | 156 | def _get_paths_to_purge(self): |
160 | result = self.node.run("find {} -maxdepth 1 -print0".format(quote(self.name))) | |
157 | result = self.run("find {} -maxdepth 1 -print0".format(quote(self.name))) | |
161 | 158 | for line in result.stdout.split(b"\0"): |
162 | 159 | line = line.decode('utf-8') |
163 | 160 | for item_type in ('directory', 'file', 'symlink'): |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from base64 import b64decode |
4 | 1 | from collections import defaultdict |
5 | 2 | from contextlib import contextmanager |
6 | 3 | from datetime import datetime |
7 | 4 | from os.path import basename, dirname, exists, join, normpath |
8 | from pipes import quote | |
5 | from shlex import quote | |
9 | 6 | from subprocess import call |
10 | 7 | from sys import exc_info |
11 | 8 | from traceback import format_exception |
257 | 254 | command = "chmod {} {}" |
258 | 255 | else: |
259 | 256 | command = "chmod {} -- {}" |
260 | self.node.run(command.format( | |
257 | self.run(command.format( | |
261 | 258 | self.attributes['mode'], |
262 | 259 | quote(self.name), |
263 | 260 | )) |
270 | 267 | command = "chown {}{} {}" |
271 | 268 | else: |
272 | 269 | command = "chown {}{} -- {}" |
273 | self.node.run(command.format( | |
270 | self.run(command.format( | |
274 | 271 | quote(self.attributes['owner'] or ""), |
275 | 272 | group, |
276 | 273 | quote(self.name), |
279 | 276 | |
280 | 277 | def _fix_type(self, status): |
281 | 278 | if status.sdict: |
282 | self.node.run("rm -rf -- {}".format(quote(self.name))) | |
279 | self.run("rm -rf -- {}".format(quote(self.name))) | |
283 | 280 | if not status.must_be_deleted: |
284 | self.node.run("mkdir -p -- {}".format(quote(dirname(self.name)))) | |
281 | self.run("mkdir -p -- {}".format(quote(dirname(self.name)))) | |
285 | 282 | self._fix_content_hash(status) |
286 | 283 | |
287 | 284 | def get_auto_deps(self, items): |
0 | from atexit import register as at_exit | |
1 | from os import remove, setpgrp | |
2 | from os.path import isfile, join | |
3 | from shlex import quote | |
4 | from shutil import rmtree | |
5 | from subprocess import PIPE, Popen | |
6 | from tempfile import mkdtemp, NamedTemporaryFile | |
7 | ||
8 | from bundlewrap.exceptions import BundleError, RepositoryError | |
9 | from bundlewrap.items import Item | |
10 | from bundlewrap.utils import cached_property | |
11 | from bundlewrap.utils.text import is_subdirectory, mark_for_translation as _, randstr | |
12 | from bundlewrap.utils.ui import io | |
13 | ||
14 | ||
15 | REPO_MAP_FILENAME = "git_deploy_repos" | |
16 | REMOTE_STATE_FILENAME = ".bundlewrap_git_deploy" | |
17 | ||
18 | ||
19 | def is_ref(rev): | |
20 | """ | |
21 | Braindead check to see if our rev is a branch or tag name. False | |
22 | negatives are OK since this is only used for optimization. | |
23 | """ | |
24 | for char in rev: | |
25 | if char not in "0123456789abcdef": | |
26 | return True | |
27 | return False | |
28 | ||
29 | ||
30 | def clone_to_dir(remote_url, rev): | |
31 | """ | |
32 | Clones the given URL to a temporary directory, using a shallow clone | |
33 | if the given revision is definitely not a commit hash. | |
34 | ||
35 | Returns the path to the directory. | |
36 | """ | |
37 | tmpdir = mkdtemp() | |
38 | if is_ref(rev): | |
39 | git_cmdline = ["clone", "--bare", "--depth", "1", "--no-single-branch", remote_url, "."] | |
40 | else: | |
41 | git_cmdline = ["clone", "--bare", remote_url, "."] | |
42 | git_command(git_cmdline, tmpdir) | |
43 | return tmpdir | |
44 | ||
45 | ||
46 | def get_local_repo_path(bw_repo_path, repo_name): | |
47 | """ | |
48 | From the given BundleWrap repo, get the filesystem path to the git | |
49 | repo associated with the given internal repo name. | |
50 | """ | |
51 | repo_map_path = join(bw_repo_path, REPO_MAP_FILENAME) | |
52 | if not isfile(repo_map_path): | |
53 | io.stderr(_("missing repo map for git_deploy at {}").format(repo_map_path)) | |
54 | io.stderr(_("you must create this file with the following format:")) | |
55 | io.stderr(_(" <value of repo attribute on git_deploy item>: " | |
56 | "<absolute path to local git repo>")) | |
57 | io.stderr(_("since the path is local, you should also add the " | |
58 | "{} file to your gitignore").format(REPO_MAP_FILENAME)) | |
59 | raise RepositoryError(_("missing repo map for git_deploy")) | |
60 | ||
61 | with open(join(bw_repo_path, REPO_MAP_FILENAME)) as f: | |
62 | repo_map = f.readlines() | |
63 | ||
64 | for line in repo_map: | |
65 | if not line.strip() or line.startswith("#"): | |
66 | continue | |
67 | try: | |
68 | repo, path = line.split(":", 1) | |
69 | except: | |
70 | raise RepositoryError(_("unable to parse line from {path}: '{line}'").format( | |
71 | line=line, | |
72 | path=repo_map_path, | |
73 | )) | |
74 | if repo_name == repo: | |
75 | return path.strip() | |
76 | ||
77 | raise RepositoryError(_("no path found for repo '{repo}' in {path}").format( | |
78 | path=repo_map_path, | |
79 | repo=repo_name, | |
80 | )) | |
81 | ||
82 | ||
83 | def git_command(cmdline, repo_dir): | |
84 | """ | |
85 | Runs the given git command line in the given directory. | |
86 | ||
87 | Returns stdout of the command. | |
88 | """ | |
89 | cmdline = ["git"] + cmdline | |
90 | io.debug(_("running '{}' in {}").format( | |
91 | " ".join(cmdline), | |
92 | repo_dir, | |
93 | )) | |
94 | git_process = Popen( | |
95 | cmdline, | |
96 | cwd=repo_dir, | |
97 | preexec_fn=setpgrp, | |
98 | stderr=PIPE, | |
99 | stdout=PIPE, | |
100 | ) | |
101 | stdout, stderr = git_process.communicate() | |
102 | # FIXME integrate this into Item._command_results | |
103 | if git_process.returncode != 0: | |
104 | io.stderr(_("failed command: {}").format(" ".join(cmdline))) | |
105 | io.stderr(_("stdout:\n{}").format(stdout)) | |
106 | io.stderr(_("stderr:\n{}").format(stderr)) | |
107 | raise RuntimeError(_("`git {command}` failed in {dir}").format( | |
108 | command=cmdline[1], | |
109 | dir=repo_dir, | |
110 | )) | |
111 | return stdout.decode('utf-8').strip() | |
112 | ||
113 | ||
114 | class GitDeploy(Item): | |
115 | """ | |
116 | Facilitates deployment of a given rev from a local git repo to a | |
117 | node. | |
118 | """ | |
119 | BUNDLE_ATTRIBUTE_NAME = "git_deploy" | |
120 | ITEM_ATTRIBUTES = { | |
121 | 'repo': None, | |
122 | 'rev': None, | |
123 | 'use_xattrs': False, | |
124 | } | |
125 | ITEM_TYPE_NAME = "git_deploy" | |
126 | REQUIRED_ATTRIBUTES = ['repo', 'rev'] | |
127 | ||
128 | def __repr__(self): | |
129 | return "<GitDeploy path:{} repo:{} rev:{}>".format( | |
130 | self.name, | |
131 | self.attributes['repo'], | |
132 | self.attributes['rev'], | |
133 | ) | |
134 | ||
135 | @cached_property | |
136 | def _expanded_rev(self): | |
137 | git_cmdline = ["rev-parse", self.attributes['rev']] | |
138 | return git_command( | |
139 | git_cmdline, | |
140 | self._repo_dir, | |
141 | ) | |
142 | ||
143 | @cached_property | |
144 | def _repo_dir(self): | |
145 | if "://" in self.attributes['repo']: | |
146 | repo_dir = clone_to_dir(self.attributes['repo'], self.attributes['rev']) | |
147 | io.debug(_("registering {} for deletion on exit").format(repo_dir)) | |
148 | at_exit(rmtree, repo_dir) | |
149 | else: | |
150 | repo_dir = get_local_repo_path(self.node.repo.path, self.attributes['repo']) | |
151 | return repo_dir | |
152 | ||
153 | def cdict(self): | |
154 | return {'rev': self._expanded_rev} | |
155 | ||
156 | def get_auto_deps(self, items): | |
157 | deps = set() | |
158 | for item in items: | |
159 | if item == self: | |
160 | continue | |
161 | if (( | |
162 | item.ITEM_TYPE_NAME == "file" and | |
163 | is_subdirectory(item.name, self.name) | |
164 | ) or ( | |
165 | item.ITEM_TYPE_NAME in ("file", "symlink") and | |
166 | item.name == self.name | |
167 | )): | |
168 | raise BundleError(_( | |
169 | "{item1} (from bundle '{bundle1}') blocking path to " | |
170 | "{item2} (from bundle '{bundle2}')" | |
171 | ).format( | |
172 | item1=item.id, | |
173 | bundle1=item.bundle.name, | |
174 | item2=self.id, | |
175 | bundle2=self.bundle.name, | |
176 | )) | |
177 | if ( | |
178 | item.ITEM_TYPE_NAME == "directory" and | |
179 | item.name == self.name | |
180 | ): | |
181 | if item.attributes['purge']: | |
182 | raise BundleError(_( | |
183 | "cannot git_deploy into purged directory {}" | |
184 | ).format(item.name)) | |
185 | else: | |
186 | deps.add(item.id) | |
187 | return deps | |
188 | ||
189 | def fix(self, status): | |
190 | archive_local = NamedTemporaryFile(delete=False) | |
191 | try: | |
192 | archive_local.close() | |
193 | git_command( | |
194 | ["archive", "-o", archive_local.name, self._expanded_rev], | |
195 | self._repo_dir, | |
196 | ) | |
197 | temp_filename = ".bundlewrap_tmp_git_deploy_" + randstr() | |
198 | ||
199 | try: | |
200 | self.node.upload( | |
201 | archive_local.name, | |
202 | temp_filename, | |
203 | ) | |
204 | self.run("find {} -mindepth 1 -delete".format(quote(self.name))) | |
205 | self.run("tar -xf {} -C {}".format(temp_filename, quote(self.name))) | |
206 | if self.attributes['use_xattrs']: | |
207 | self.run("attr -q -s bw_git_deploy_rev -V {} {}".format( | |
208 | self._expanded_rev, | |
209 | quote(self.name), | |
210 | )) | |
211 | else: | |
212 | self.run("echo {} > {}".format( | |
213 | self._expanded_rev, | |
214 | quote(join(self.name, REMOTE_STATE_FILENAME)), | |
215 | )) | |
216 | self.run("chmod 400 {}".format( | |
217 | quote(join(self.name, REMOTE_STATE_FILENAME)), | |
218 | )) | |
219 | finally: | |
220 | self.run("rm -f {}".format(temp_filename)) | |
221 | finally: | |
222 | remove(archive_local.name) | |
223 | ||
224 | def sdict(self): | |
225 | if self.attributes['use_xattrs']: | |
226 | status_result = self.run( | |
227 | "attr -q -g bw_git_deploy_rev {}".format(quote(self.name)), | |
228 | may_fail=True, | |
229 | ) | |
230 | else: | |
231 | status_result = self.run( | |
232 | "cat {}".format(quote(join(self.name, REMOTE_STATE_FILENAME))), | |
233 | may_fail=True, | |
234 | ) | |
235 | if status_result.return_code != 0: | |
236 | return None | |
237 | else: | |
238 | return {'rev': status_result.stdout.decode('utf-8').strip()} | |
239 | ||
240 | # FIXME get_auto_deps for dir and ensure dir does not use purge |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from bundlewrap.exceptions import BundleError |
4 | 1 | from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item |
5 | 2 | from bundlewrap.items.users import _USERNAME_VALID_CHARACTERS |
60 | 57 | gid=self.attributes['gid'], |
61 | 58 | groupname=self.name, |
62 | 59 | ) |
63 | self.node.run(command, may_fail=True) | |
60 | self.run(command, may_fail=True) | |
64 | 61 | elif status.must_be_deleted: |
65 | self.node.run("groupdel {}".format(self.name), may_fail=True) | |
62 | self.run("groupdel {}".format(self.name), may_fail=True) | |
66 | 63 | else: |
67 | self.node.run( | |
64 | self.run( | |
68 | 65 | "groupmod -g {gid} {groupname}".format( |
69 | 66 | gid=self.attributes['gid'], |
70 | 67 | groupname=self.name, |
74 | 71 | |
75 | 72 | def sdict(self): |
76 | 73 | # verify content of /etc/group |
77 | grep_result = self.node.run( | |
74 | grep_result = self.run( | |
78 | 75 | "grep -e '^{}:' /etc/group".format(self.name), |
79 | 76 | may_fail=True, |
80 | 77 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from abc import ABCMeta |
4 | 1 | import json |
5 | 2 | from os.path import exists, join |
13 | 10 | from bundlewrap.utils.dicts import merge_dict, reduce_dict |
14 | 11 | from bundlewrap.utils.ui import io |
15 | 12 | from bundlewrap.utils.text import force_text, mark_for_translation as _ |
16 | from six import add_metaclass | |
17 | 13 | import yaml |
18 | 14 | |
19 | 15 | |
23 | 19 | io.debug(run_result.stderr.decode('utf-8')) |
24 | 20 | |
25 | 21 | |
26 | @add_metaclass(ABCMeta) | |
27 | class KubernetesItem(Item): | |
22 | class KubernetesItem(Item, metaclass=ABCMeta): | |
28 | 23 | """ |
29 | 24 | A generic Kubernetes item. |
30 | 25 | """ |
37 | 32 | 'context': None, |
38 | 33 | } |
39 | 34 | KIND = None |
40 | KUBERNETES_APIVERSION = "v1" | |
41 | 35 | NAME_REGEX = r"^[a-z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$" |
42 | 36 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
43 | 37 | |
65 | 59 | |
66 | 60 | def fix(self, status): |
67 | 61 | if status.must_be_deleted: |
68 | result = run_local(self._kubectl + ["delete", self.KIND, self.resource_name]) | |
62 | result = self.run_local(self._kubectl + ["delete", self.KIND, self.resource_name]) | |
69 | 63 | log_error(result) |
70 | 64 | else: |
71 | result = run_local( | |
65 | result = self.run_local( | |
72 | 66 | self._kubectl + ["apply", "-f", "-"], |
73 | 67 | data_stdin=self.manifest.encode('utf-8'), |
74 | 68 | ) |
130 | 124 | |
131 | 125 | merged_manifest = merge_dict( |
132 | 126 | { |
133 | 'apiVersion': self.KUBERNETES_APIVERSION, | |
134 | 127 | 'kind': self.KIND, |
135 | 128 | 'metadata': { |
136 | 129 | 'name': self.name.split("/")[-1], |
177 | 170 | return self._manifest_dict['metadata']['name'] |
178 | 171 | |
179 | 172 | def sdict(self): |
180 | result = run_local(self._kubectl + ["get", "-o", "json", self.KIND, self.resource_name]) | |
173 | result = self.run_local(self._kubectl + ["get", "-o", "json", self.KIND, self.resource_name]) | |
181 | 174 | if result.return_code == 0: |
182 | 175 | full_json_response = json.loads(result.stdout.decode('utf-8')) |
183 | 176 | if full_json_response.get("status", {}).get("phase") == "Terminating": |
231 | 224 | class KubernetesRawItem(KubernetesItem): |
232 | 225 | BUNDLE_ATTRIBUTE_NAME = "k8s_raw" |
233 | 226 | ITEM_TYPE_NAME = "k8s_raw" |
234 | KUBERNETES_APIVERSION = None | |
235 | 227 | NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-zA-Z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$" |
236 | 228 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
237 | 229 | |
270 | 262 | class KubernetesClusterRole(KubernetesItem): |
271 | 263 | BUNDLE_ATTRIBUTE_NAME = "k8s_clusterroles" |
272 | 264 | KIND = "ClusterRole" |
273 | KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" | |
274 | 265 | ITEM_TYPE_NAME = "k8s_clusterrole" |
275 | 266 | NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" |
276 | 267 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
283 | 274 | class KubernetesClusterRoleBinding(KubernetesItem): |
284 | 275 | BUNDLE_ATTRIBUTE_NAME = "k8s_clusterrolebindings" |
285 | 276 | KIND = "ClusterRoleBinding" |
286 | KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" | |
287 | 277 | ITEM_TYPE_NAME = "k8s_clusterrolebinding" |
288 | 278 | NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" |
289 | 279 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
301 | 291 | class KubernetesConfigMap(KubernetesItem): |
302 | 292 | BUNDLE_ATTRIBUTE_NAME = "k8s_configmaps" |
303 | 293 | KIND = "ConfigMap" |
304 | KUBERNETES_APIVERSION = "v1" | |
305 | 294 | ITEM_TYPE_NAME = "k8s_configmap" |
306 | 295 | |
307 | 296 | |
308 | 297 | class KubernetesCronJob(KubernetesItem): |
309 | 298 | BUNDLE_ATTRIBUTE_NAME = "k8s_cronjobs" |
310 | 299 | KIND = "CronJob" |
311 | KUBERNETES_APIVERSION = "batch/v1beta1" | |
312 | 300 | ITEM_TYPE_NAME = "k8s_cronjob" |
313 | 301 | |
314 | 302 | |
315 | 303 | class KubernetesCustomResourceDefinition(KubernetesItem): |
316 | 304 | BUNDLE_ATTRIBUTE_NAME = "k8s_crd" |
317 | 305 | KIND = "CustomResourceDefinition" |
318 | KUBERNETES_APIVERSION = "apiextensions.k8s.io/v1" | |
319 | 306 | ITEM_TYPE_NAME = "k8s_crd" |
320 | 307 | NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" |
321 | 308 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
331 | 318 | class KubernetesDaemonSet(KubernetesItem): |
332 | 319 | BUNDLE_ATTRIBUTE_NAME = "k8s_daemonsets" |
333 | 320 | KIND = "DaemonSet" |
334 | KUBERNETES_APIVERSION = "apps/v1" | |
335 | 321 | ITEM_TYPE_NAME = "k8s_daemonset" |
336 | 322 | |
337 | 323 | def get_auto_deps(self, items): |
348 | 334 | class KubernetesDeployment(KubernetesItem): |
349 | 335 | BUNDLE_ATTRIBUTE_NAME = "k8s_deployments" |
350 | 336 | KIND = "Deployment" |
351 | KUBERNETES_APIVERSION = "apps/v1" | |
352 | 337 | ITEM_TYPE_NAME = "k8s_deployment" |
353 | 338 | |
354 | 339 | def get_auto_deps(self, items): |
365 | 350 | class KubernetesIngress(KubernetesItem): |
366 | 351 | BUNDLE_ATTRIBUTE_NAME = "k8s_ingresses" |
367 | 352 | KIND = "Ingress" |
368 | KUBERNETES_APIVERSION = "networking.k8s.io/v1beta1" | |
369 | 353 | ITEM_TYPE_NAME = "k8s_ingress" |
370 | 354 | |
371 | 355 | def get_auto_deps(self, items): |
382 | 366 | class KubernetesNamespace(KubernetesItem): |
383 | 367 | BUNDLE_ATTRIBUTE_NAME = "k8s_namespaces" |
384 | 368 | KIND = "Namespace" |
385 | KUBERNETES_APIVERSION = "v1" | |
386 | 369 | ITEM_TYPE_NAME = "k8s_namespace" |
387 | 370 | NAME_REGEX = r"^[a-z0-9-\.]{1,253}$" |
388 | 371 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
394 | 377 | class KubernetesNetworkPolicy(KubernetesItem): |
395 | 378 | BUNDLE_ATTRIBUTE_NAME = "k8s_networkpolicies" |
396 | 379 | KIND = "NetworkPolicy" |
397 | KUBERNETES_APIVERSION = "networking.k8s.io/v1" | |
398 | 380 | ITEM_TYPE_NAME = "k8s_networkpolicy" |
399 | 381 | NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-z0-9-\.]{1,253}$" |
400 | 382 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) |
403 | 385 | class KubernetesPersistentVolumeClain(KubernetesItem): |
404 | 386 | BUNDLE_ATTRIBUTE_NAME = "k8s_pvc" |
405 | 387 | KIND = "PersistentVolumeClaim" |
406 | KUBERNETES_APIVERSION = "v1" | |
407 | 388 | ITEM_TYPE_NAME = "k8s_pvc" |
408 | 389 | |
409 | 390 | |
410 | 391 | class KubernetesRole(KubernetesItem): |
411 | 392 | BUNDLE_ATTRIBUTE_NAME = "k8s_roles" |
412 | 393 | KIND = "Role" |
413 | KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" | |
414 | 394 | ITEM_TYPE_NAME = "k8s_role" |
415 | 395 | |
416 | 396 | |
417 | 397 | class KubernetesRoleBinding(KubernetesItem): |
418 | 398 | BUNDLE_ATTRIBUTE_NAME = "k8s_rolebindings" |
419 | 399 | KIND = "RoleBinding" |
420 | KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1" | |
421 | 400 | ITEM_TYPE_NAME = "k8s_rolebinding" |
422 | 401 | |
423 | 402 | def get_auto_deps(self, items): |
429 | 408 | class KubernetesSecret(KubernetesItem): |
430 | 409 | BUNDLE_ATTRIBUTE_NAME = "k8s_secrets" |
431 | 410 | KIND = "Secret" |
432 | KUBERNETES_APIVERSION = "v1" | |
433 | 411 | ITEM_TYPE_NAME = "k8s_secret" |
434 | 412 | |
435 | 413 | def get_auto_deps(self, items): |
439 | 417 | class KubernetesService(KubernetesItem): |
440 | 418 | BUNDLE_ATTRIBUTE_NAME = "k8s_services" |
441 | 419 | KIND = "Service" |
442 | KUBERNETES_APIVERSION = "v1" | |
443 | 420 | ITEM_TYPE_NAME = "k8s_service" |
444 | 421 | |
445 | 422 | |
446 | 423 | class KubernetesServiceAccount(KubernetesItem): |
447 | 424 | BUNDLE_ATTRIBUTE_NAME = "k8s_serviceaccounts" |
448 | 425 | KIND = "ServiceAccount" |
449 | KUBERNETES_APIVERSION = "v1" | |
450 | 426 | ITEM_TYPE_NAME = "k8s_serviceaccount" |
451 | 427 | |
452 | 428 | |
453 | 429 | class KubernetesStatefulSet(KubernetesItem): |
454 | 430 | BUNDLE_ATTRIBUTE_NAME = "k8s_statefulsets" |
455 | 431 | KIND = "StatefulSet" |
456 | KUBERNETES_APIVERSION = "apps/v1" | |
457 | 432 | ITEM_TYPE_NAME = "k8s_statefulset" |
458 | 433 | |
459 | 434 | def get_auto_deps(self, items): |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from abc import ABCMeta, abstractmethod |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
7 | 4 | from bundlewrap.utils.text import mark_for_translation as _ |
8 | from six import add_metaclass | |
9 | 5 | |
10 | 6 | |
11 | @add_metaclass(ABCMeta) | |
12 | class Pkg(Item): | |
7 | class Pkg(Item, metaclass=ABCMeta): | |
13 | 8 | """ |
14 | 9 | A generic package. |
15 | 10 | """ |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items.pkg import Pkg |
18 | 15 | } |
19 | 16 | |
20 | 17 | def pkg_all_installed(self): |
21 | result = self.node.run("dpkg -l | grep '^ii'") | |
18 | result = self.run("dpkg -l | grep '^ii'") | |
22 | 19 | for line in result.stdout.decode('utf-8').strip().split("\n"): |
23 | 20 | pkg_name = line[4:].split()[0].replace(":", "_") |
24 | 21 | yield "{}:{}".format(self.ITEM_TYPE_NAME, pkg_name) |
25 | 22 | |
26 | 23 | def pkg_install(self): |
27 | 24 | runlevel = "" if self.when_creating['start_service'] else "RUNLEVEL=1 " |
28 | self.node.run( | |
25 | self.run( | |
29 | 26 | runlevel + |
30 | 27 | "DEBIAN_FRONTEND=noninteractive " |
31 | 28 | "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends " |
34 | 31 | ) |
35 | 32 | |
36 | 33 | def pkg_installed(self): |
37 | result = self.node.run( | |
34 | result = self.run( | |
38 | 35 | "dpkg -s {} | grep '^Status: '".format(quote(self.name.replace("_", ":"))), |
39 | 36 | may_fail=True, |
40 | 37 | ) |
54 | 51 | return False |
55 | 52 | |
56 | 53 | def pkg_remove(self): |
57 | self.node.run( | |
54 | self.run( | |
58 | 55 | "DEBIAN_FRONTEND=noninteractive " |
59 | 56 | "apt-get -qy purge {}".format(quote(self.name.replace("_", ":"))) |
60 | 57 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.items.pkg import Pkg |
6 | 3 | |
17 | 14 | return ["pkg_dnf", "pkg_yum"] |
18 | 15 | |
19 | 16 | def pkg_all_installed(self): |
20 | result = self.node.run("dnf -d0 -e0 list installed") | |
17 | result = self.run("dnf -d0 -e0 list installed") | |
21 | 18 | for line in result.stdout.decode('utf-8').strip().split("\n"): |
22 | 19 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) |
23 | 20 | |
24 | 21 | def pkg_install(self): |
25 | self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) | |
22 | self.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) | |
26 | 23 | |
27 | 24 | def pkg_installed(self): |
28 | result = self.node.run( | |
25 | result = self.run( | |
29 | 26 | "dnf -d0 -e0 list installed {}".format(quote(self.name)), |
30 | 27 | may_fail=True, |
31 | 28 | ) |
32 | 29 | return result.return_code == 0 |
33 | 30 | |
34 | 31 | def pkg_remove(self): |
35 | self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) | |
32 | self.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | import re |
5 | 2 | |
6 | 3 | from bundlewrap.exceptions import BundleError |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.items.pkg import Pkg |
6 | 3 | |
17 | 14 | return ["pkg_opkg"] |
18 | 15 | |
19 | 16 | def pkg_all_installed(self): |
20 | result = self.node.run("opkg list-installed") | |
17 | result = self.run("opkg list-installed") | |
21 | 18 | for line in result.stdout.decode('utf-8').strip().split("\n"): |
22 | 19 | if line: |
23 | 20 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0]) |
24 | 21 | |
25 | 22 | def pkg_install(self): |
26 | self.node.run("opkg install {}".format(quote(self.name)), may_fail=True) | |
23 | self.run("opkg install {}".format(quote(self.name)), may_fail=True) | |
27 | 24 | |
28 | 25 | def pkg_installed(self): |
29 | result = self.node.run( | |
26 | result = self.run( | |
30 | 27 | "opkg status {} | grep ^Status: | grep installed".format(quote(self.name)), |
31 | 28 | may_fail=True, |
32 | 29 | ) |
33 | 30 | return result.return_code == 0 |
34 | 31 | |
35 | 32 | def pkg_remove(self): |
36 | self.node.run("opkg remove {}".format(quote(self.name)), may_fail=True) | |
33 | self.run("opkg remove {}".format(quote(self.name)), may_fail=True) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import basename, join |
4 | from pipes import quote | |
1 | from shlex import quote | |
5 | 2 | |
6 | 3 | from bundlewrap.items.pkg import Pkg |
7 | 4 | |
24 | 21 | return {'installed': self.attributes['installed']} |
25 | 22 | |
26 | 23 | def pkg_all_installed(self): |
27 | pkgs = self.node.run("pacman -Qq").stdout.decode('utf-8') | |
24 | pkgs = self.run("pacman -Qq").stdout.decode('utf-8') | |
28 | 25 | for line in pkgs.splitlines(): |
29 | 26 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()) |
30 | 27 | |
33 | 30 | local_file = join(self.item_dir, self.attributes['tarball']) |
34 | 31 | remote_file = "/tmp/{}".format(basename(local_file)) |
35 | 32 | self.node.upload(local_file, remote_file) |
36 | self.node.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True) | |
37 | self.node.run("rm -- {}".format(quote(remote_file))) | |
33 | self.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True) | |
34 | self.run("rm -- {}".format(quote(remote_file))) | |
38 | 35 | else: |
39 | self.node.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True) | |
36 | self.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True) | |
40 | 37 | |
41 | 38 | def pkg_installed(self): |
42 | result = self.node.run( | |
39 | result = self.run( | |
43 | 40 | "pacman -Q {}".format(quote(self.name)), |
44 | 41 | may_fail=True, |
45 | 42 | ) |
46 | 43 | return result.return_code == 0 |
47 | 44 | |
48 | 45 | def pkg_remove(self): |
49 | self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True) | |
46 | self.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import join, split |
4 | from pipes import quote | |
1 | from shlex import quote | |
5 | 2 | |
6 | 3 | from bundlewrap.exceptions import BundleError |
7 | 4 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.items.pkg import Pkg |
6 | 3 | |
13 | 10 | ITEM_TYPE_NAME = "pkg_snap" |
14 | 11 | |
15 | 12 | def pkg_all_installed(self): |
16 | result = self.node.run("snap list") | |
13 | result = self.run("snap list") | |
17 | 14 | for line in result.stdout.decode('utf-8').strip().split("\n"): |
18 | 15 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(" ")[0]) |
19 | 16 | |
20 | 17 | def pkg_install(self): |
21 | self.node.run("snap install {}".format(quote(self.name)), may_fail=True) | |
18 | self.run("snap install {}".format(quote(self.name)), may_fail=True) | |
22 | 19 | |
23 | 20 | def pkg_installed(self): |
24 | result = self.node.run( | |
21 | result = self.run( | |
25 | 22 | "snap list {}".format(quote(self.name)), |
26 | 23 | may_fail=True, |
27 | 24 | ) |
28 | 25 | return result.return_code == 0 |
29 | 26 | |
30 | 27 | def pkg_remove(self): |
31 | self.node.run("snap remove {}".format(quote(self.name)), may_fail=True) | |
28 | self.run("snap remove {}".format(quote(self.name)), may_fail=True) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.items.pkg import Pkg |
6 | 3 | |
17 | 14 | return ["pkg_dnf", "pkg_yum"] |
18 | 15 | |
19 | 16 | def pkg_all_installed(self): |
20 | result = self.node.run("yum -d0 -e0 list installed") | |
17 | result = self.run("yum -d0 -e0 list installed") | |
21 | 18 | for line in result.stdout.decode('utf-8').strip().split("\n"): |
22 | 19 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) |
23 | 20 | |
24 | 21 | def pkg_install(self): |
25 | self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) | |
22 | self.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) | |
26 | 23 | |
27 | 24 | def pkg_installed(self): |
28 | result = self.node.run( | |
25 | result = self.run( | |
29 | 26 | "yum -d0 -e0 list installed {}".format(quote(self.name)), |
30 | 27 | may_fail=True, |
31 | 28 | ) |
32 | 29 | return result.return_code == 0 |
33 | 30 | |
34 | 31 | def pkg_remove(self): |
35 | self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) | |
32 | self.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from passlib.apps import postgres_context |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from bundlewrap.exceptions import BundleError |
6 | 3 | from bundlewrap.items import Item |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from collections import defaultdict |
4 | 1 | from os.path import dirname, normpath |
5 | from pipes import quote | |
2 | from shlex import quote | |
6 | 3 | |
7 | 4 | from bundlewrap.exceptions import BundleError |
8 | 5 | from bundlewrap.items import Item |
64 | 61 | command = "chown -h {}{} {}" |
65 | 62 | else: |
66 | 63 | command = "chown -h {}{} -- {}" |
67 | self.node.run(command.format( | |
64 | self.run(command.format( | |
68 | 65 | quote(self.attributes['owner'] or ""), |
69 | 66 | group, |
70 | 67 | quote(self.name), |
73 | 70 | |
74 | 71 | def _fix_target(self, status): |
75 | 72 | if self.node.os in self.node.OS_FAMILY_BSD: |
76 | self.node.run("ln -sfh -- {} {}".format( | |
73 | self.run("ln -sfh -- {} {}".format( | |
77 | 74 | quote(self.attributes['target']), |
78 | 75 | quote(self.name), |
79 | 76 | )) |
80 | 77 | else: |
81 | self.node.run("ln -sfT -- {} {}".format( | |
78 | self.run("ln -sfT -- {} {}".format( | |
82 | 79 | quote(self.attributes['target']), |
83 | 80 | quote(self.name), |
84 | 81 | )) |
85 | 82 | |
86 | 83 | def _fix_type(self, status): |
87 | self.node.run("rm -rf -- {}".format(quote(self.name))) | |
88 | self.node.run("mkdir -p -- {}".format(quote(dirname(self.name)))) | |
89 | self.node.run("ln -s -- {} {}".format( | |
84 | self.run("rm -rf -- {}".format(quote(self.name))) | |
85 | self.run("mkdir -p -- {}".format(quote(dirname(self.name)))) | |
86 | self.run("ln -s -- {} {}".format( | |
90 | 87 | quote(self.attributes['target']), |
91 | 88 | quote(self.name), |
92 | 89 | )) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from logging import ERROR, getLogger |
4 | from pipes import quote | |
1 | from shlex import quote | |
5 | 2 | from string import ascii_lowercase, digits |
6 | 3 | |
7 | 4 | from passlib.hash import bcrypt, md5_crypt, sha256_crypt, sha512_crypt |
136 | 133 | |
137 | 134 | def fix(self, status): |
138 | 135 | if status.must_be_deleted: |
139 | self.node.run("userdel {}".format(self.name), may_fail=True) | |
136 | self.run("userdel {}".format(self.name), may_fail=True) | |
140 | 137 | else: |
141 | 138 | command = "useradd " if status.must_be_created else "usermod " |
142 | 139 | for attr, option in sorted(_ATTRIBUTE_OPTIONS.items()): |
148 | 145 | value = str(self.attributes[attr]) |
149 | 146 | command += "{} {} ".format(option, quote(value)) |
150 | 147 | command += self.name |
151 | self.node.run(command, may_fail=True) | |
148 | self.run(command, may_fail=True) | |
152 | 149 | |
153 | 150 | def display_dicts(self, cdict, sdict, keys): |
154 | 151 | for attr_name, attr_display_name in _ATTRIBUTE_NAMES.items(): |
199 | 196 | password_command = "grep -ae '^{}:' /etc/master.passwd" |
200 | 197 | else: |
201 | 198 | password_command = "grep -ae '^{}:' /etc/passwd" |
202 | passwd_grep_result = self.node.run( | |
199 | passwd_grep_result = self.run( | |
203 | 200 | password_command.format(self.name), |
204 | 201 | may_fail=True, |
205 | 202 | ) |
230 | 227 | if self.attributes['password_hash'] is not None: |
231 | 228 | if self.attributes['use_shadow'] and self.node.os not in self.node.OS_FAMILY_BSD: |
232 | 229 | # verify content of /etc/shadow unless we are on OpenBSD |
233 | shadow_grep_result = self.node.run( | |
230 | shadow_grep_result = self.run( | |
234 | 231 | "grep -e '^{}:' /etc/shadow".format(self.name), |
235 | 232 | may_fail=True, |
236 | 233 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime |
4 | 1 | from getpass import getuser |
5 | 2 | import json |
6 | 3 | from os import environ |
7 | from pipes import quote | |
4 | from shlex import quote | |
8 | 5 | from socket import gethostname |
9 | 6 | from time import time |
10 | 7 | |
11 | from .exceptions import NodeLockedException, RemoteException | |
8 | from .exceptions import NodeLockedException, NoSuchNode, RemoteException | |
12 | 9 | from .utils import cached_property, tempfile |
13 | 10 | from .utils.text import ( |
14 | 11 | blue, |
23 | 20 | from .utils.ui import io |
24 | 21 | |
25 | 22 | |
26 | HARD_LOCK_PATH = "/tmp/bundlewrap.lock" | |
27 | HARD_LOCK_FILE = HARD_LOCK_PATH + "/info" | |
28 | SOFT_LOCK_PATH = "/tmp/bundlewrap.softlock.d" | |
29 | SOFT_LOCK_FILE = "/tmp/bundlewrap.softlock.d/{id}" | |
30 | ||
31 | ||
32 | def get_hard_lock_info(node, local_path): | |
33 | try: | |
34 | node.download(HARD_LOCK_FILE, local_path) | |
35 | with open(local_path, 'r') as fp: | |
36 | return json.load(fp) | |
37 | except (RemoteException, ValueError): | |
38 | io.stderr(_( | |
39 | "{x} {node_bold} corrupted hard lock: " | |
40 | "unable to read or parse lock file contents " | |
41 | "(clear it with `bw run {node} 'rm -Rf {path}'`)" | |
42 | ).format( | |
43 | node_bold=bold(node.name), | |
44 | node=node.name, | |
45 | path=HARD_LOCK_PATH, | |
46 | x=red("!"), | |
47 | )) | |
48 | return {} | |
23 | LOCK_BASE = "/var/lib/bundlewrap" | |
49 | 24 | |
50 | 25 | |
51 | 26 | def identity(): |
55 | 30 | )) |
56 | 31 | |
57 | 32 | |
58 | class NodeLock(object): | |
33 | class NodeLock: | |
59 | 34 | def __init__(self, node, interactive=False, ignore=False): |
60 | 35 | self.node = node |
61 | 36 | self.ignore = ignore |
62 | 37 | self.interactive = interactive |
38 | self.locking_node = _get_locking_node(node) | |
63 | 39 | |
64 | 40 | def __enter__(self): |
65 | if self.node.os not in self.node.OS_FAMILY_UNIX: | |
41 | if self.locking_node.os not in self.locking_node.OS_FAMILY_UNIX: | |
66 | 42 | # no locking required/possible |
67 | 43 | return self |
68 | 44 | with tempfile() as local_path: |
45 | self.locking_node.run("mkdir -p " + quote(LOCK_BASE)) | |
69 | 46 | if not self.ignore: |
70 | 47 | with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))): |
71 | result = self.node.run("mkdir " + quote(HARD_LOCK_PATH), may_fail=True) | |
48 | result = self.locking_node.run("mkdir " + quote(self._hard_lock_dir()), may_fail=True) | |
72 | 49 | if result.return_code != 0: |
73 | info = get_hard_lock_info(self.node, local_path) | |
50 | info = self._get_hard_lock_info(local_path) | |
74 | 51 | expired = False |
75 | 52 | try: |
76 | 53 | d = info['date'] |
97 | 74 | |
98 | 75 | with io.job(_("{node} uploading lock file").format(node=bold(self.node.name))): |
99 | 76 | if self.ignore: |
100 | self.node.run("mkdir -p " + quote(HARD_LOCK_PATH)) | |
77 | self.locking_node.run("mkdir -p " + quote(self._hard_lock_dir())) | |
101 | 78 | with open(local_path, 'w') as f: |
102 | 79 | f.write(json.dumps({ |
103 | 80 | 'date': time(), |
104 | 81 | 'user': identity(), |
105 | 82 | })) |
106 | self.node.upload(local_path, HARD_LOCK_FILE) | |
83 | self.locking_node.upload(local_path, self._hard_lock_file()) | |
107 | 84 | |
108 | 85 | return self |
109 | 86 | |
110 | 87 | def __exit__(self, type, value, traceback): |
111 | if self.node.os not in self.node.OS_FAMILY_UNIX: | |
88 | if self.locking_node.os not in self.locking_node.OS_FAMILY_UNIX: | |
112 | 89 | # no locking required/possible |
113 | 90 | return |
114 | 91 | with io.job(_("{node} removing hard lock").format(node=bold(self.node.name))): |
115 | result = self.node.run("rm -R {}".format(quote(HARD_LOCK_PATH)), may_fail=True) | |
92 | result = self.locking_node.run("rm -R {}".format(quote(self._hard_lock_dir())), may_fail=True) | |
116 | 93 | |
117 | 94 | if result.return_code != 0: |
118 | 95 | io.stderr(_("{x} {node} could not release hard lock").format( |
119 | 96 | node=bold(self.node.name), |
120 | 97 | x=red("!"), |
121 | 98 | )) |
99 | ||
100 | def _get_hard_lock_info(self, local_path): | |
101 | try: | |
102 | self.locking_node.download(self._hard_lock_file(), local_path) | |
103 | with open(local_path, 'r') as fp: | |
104 | return json.load(fp) | |
105 | except (RemoteException, ValueError): | |
106 | io.stderr(_( | |
107 | "{x} {node_bold} corrupted hard lock: " | |
108 | "unable to read or parse lock file contents " | |
109 | "(clear it with `bw run {node} 'rm -Rf {path}'`)" | |
110 | ).format( | |
111 | node_bold=bold(self.locking_node.name), | |
112 | node=self.locking_node.name, | |
113 | path=self._hard_lock_dir(), | |
114 | x=red("!"), | |
115 | )) | |
116 | return {} | |
117 | ||
118 | def _hard_lock_dir(self): | |
119 | return LOCK_BASE + "/hard-" + quote(self.node.name) | |
120 | ||
121 | def _hard_lock_file(self): | |
122 | return self._hard_lock_dir() + "/info" | |
122 | 123 | |
123 | 124 | def _warning_message_hard(self, info): |
124 | 125 | return wrap_question( |
155 | 156 | yield lock |
156 | 157 | |
157 | 158 | |
159 | def _get_locking_node(node): | |
160 | if node.locking_node is not None: | |
161 | try: | |
162 | return node.repo.get_node(node.locking_node) | |
163 | except NoSuchNode: | |
164 | raise Exception("Invalid locking_node {} for {}".format( | |
165 | node.locking_node, | |
166 | node.name, | |
167 | )) | |
168 | else: | |
169 | return node | |
170 | ||
171 | ||
172 | def _soft_lock_dir(node_name): | |
173 | return LOCK_BASE + "/soft-" + quote(node_name) | |
174 | ||
175 | ||
176 | def _soft_lock_file(node_name, lock_id): | |
177 | return _soft_lock_dir(node_name) + "/" + lock_id | |
178 | ||
179 | ||
158 | 180 | def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None): |
159 | assert node.os in node.OS_FAMILY_UNIX | |
181 | locking_node = _get_locking_node(node) | |
182 | assert locking_node.os in locking_node.OS_FAMILY_UNIX | |
160 | 183 | if "\n" in comment: |
161 | 184 | raise ValueError(_("Lock comments must not contain any newlines")) |
162 | 185 | if not item_selectors: |
178 | 201 | with tempfile() as local_path: |
179 | 202 | with open(local_path, 'w') as f: |
180 | 203 | f.write(content + "\n") |
181 | node.run("mkdir -p " + quote(SOFT_LOCK_PATH)) | |
182 | node.upload(local_path, SOFT_LOCK_FILE.format(id=lock_id), mode='0644') | |
204 | locking_node.run("mkdir -p " + quote(_soft_lock_dir(node.name))) | |
205 | locking_node.upload(local_path, _soft_lock_file(node.name, lock_id), mode='0644') | |
183 | 206 | |
184 | 207 | node.repo.hooks.lock_add(node.repo, node, lock_id, item_selectors, expiry_timestamp, comment) |
185 | 208 | |
187 | 210 | |
188 | 211 | |
189 | 212 | def softlock_list(node): |
190 | if node.os not in node.OS_FAMILY_UNIX: | |
213 | locking_node = _get_locking_node(node) | |
214 | if locking_node.os not in locking_node.OS_FAMILY_UNIX: | |
191 | 215 | return [] |
192 | 216 | with io.job(_("{} checking soft locks").format(bold(node.name))): |
193 | cat = node.run("cat {}".format(SOFT_LOCK_FILE.format(id="*")), may_fail=True) | |
217 | cat = locking_node.run("cat {}".format(_soft_lock_file(node.name, "*")), may_fail=True) | |
194 | 218 | if cat.return_code != 0: |
195 | 219 | return [] |
196 | 220 | result = [] |
217 | 241 | |
218 | 242 | |
219 | 243 | def softlock_remove(node, lock_id): |
220 | assert node.os in node.OS_FAMILY_UNIX | |
244 | locking_node = _get_locking_node(node) | |
245 | assert locking_node.os in locking_node.OS_FAMILY_UNIX | |
221 | 246 | io.debug(_("removing soft lock {id} from node {node}").format( |
222 | 247 | id=lock_id, |
223 | 248 | node=node.name, |
224 | 249 | )) |
225 | node.run("rm {}".format(SOFT_LOCK_FILE.format(id=lock_id))) | |
250 | locking_node.run("rm {}".format(_soft_lock_file(node.name, lock_id))) | |
226 | 251 | node.repo.hooks.lock_remove(node.repo, node, lock_id) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from copy import copy |
4 | 1 | from hashlib import sha1 |
5 | 2 | from json import dumps, JSONEncoder |
10 | 7 | from .utils.text import force_text, mark_for_translation as _ |
11 | 8 | |
12 | 9 | |
13 | try: | |
14 | text_type = unicode | |
15 | byte_type = str | |
16 | except NameError: | |
17 | text_type = str | |
18 | byte_type = bytes | |
19 | ||
20 | METADATA_TYPES = ( | |
10 | METADATA_TYPES = ( # only meant for natively atomic types | |
21 | 11 | bool, |
22 | byte_type, | |
12 | bytes, | |
23 | 13 | Fault, |
24 | 14 | int, |
25 | text_type, | |
15 | str, | |
26 | 16 | type(None), |
27 | 17 | ) |
28 | 18 | |
29 | # constants returned as options by metadata processors | |
30 | DONE = 1 | |
31 | RUN_ME_AGAIN = 2 | |
32 | DEFAULTS = 3 | |
33 | OVERWRITE = 4 | |
34 | ||
35 | 19 | |
36 | 20 | class DoNotRunAgain(Exception): |
37 | 21 | """ |
38 | 22 | Raised from metadata reactors to indicate they can be disregarded. |
39 | 23 | """ |
40 | 24 | pass |
25 | ||
26 | ||
27 | def deepcopy_metadata(obj): | |
28 | """ | |
29 | Our own version of deepcopy.copy that doesn't pickle. | |
30 | """ | |
31 | if isinstance(obj, METADATA_TYPES): | |
32 | return obj | |
33 | elif isinstance(obj, dict): | |
34 | if isinstance(obj, ATOMIC_TYPES[dict]): | |
35 | new_obj = atomic({}) | |
36 | else: | |
37 | new_obj = {} | |
38 | for key, value in obj.items(): | |
39 | new_key = copy(key) | |
40 | new_obj[new_key] = deepcopy_metadata(value) | |
41 | elif isinstance(obj, (list, tuple)): | |
42 | if isinstance(obj, (ATOMIC_TYPES[list], ATOMIC_TYPES[tuple])): | |
43 | new_obj = atomic([]) | |
44 | else: | |
45 | new_obj = [] | |
46 | for member in obj: | |
47 | new_obj.append(deepcopy_metadata(member)) | |
48 | elif isinstance(obj, set): | |
49 | if isinstance(obj, ATOMIC_TYPES[set]): | |
50 | new_obj = atomic(set()) | |
51 | else: | |
52 | new_obj = set() | |
53 | for member in obj: | |
54 | new_obj.add(deepcopy_metadata(member)) | |
55 | else: | |
56 | assert False # there should be no other types | |
57 | return new_obj | |
41 | 58 | |
42 | 59 | |
43 | 60 | def validate_metadata(metadata, _top_level=True): |
45 | 62 | raise TypeError(_("metadata must be a dict")) |
46 | 63 | if isinstance(metadata, dict): |
47 | 64 | for key, value in metadata.items(): |
48 | if not isinstance(key, text_type): | |
65 | if not isinstance(key, str): | |
49 | 66 | raise TypeError(_("metadata keys must be str, not: {}").format(repr(key))) |
50 | 67 | validate_metadata(value, _top_level=False) |
51 | 68 | elif isinstance(metadata, (tuple, list, set)): |
70 | 87 | return cls(obj) |
71 | 88 | |
72 | 89 | |
73 | def blame_changed_paths(old_dict, new_dict, blame_dict, blame_name, defaults=False): | |
74 | def is_mergeable(value1, value2): | |
75 | if isinstance(value1, (list, set, tuple)) and isinstance(value2, (list, set, tuple)): | |
76 | return True | |
77 | elif isinstance(value1, dict) and isinstance(value2, dict): | |
78 | return True | |
79 | return False | |
80 | ||
81 | new_paths = map_dict_keys(new_dict) | |
82 | ||
83 | # clean up removed paths from blame_dict | |
84 | for path in list(blame_dict.keys()): | |
85 | if path not in new_paths: | |
86 | del blame_dict[path] | |
87 | ||
88 | for path in new_paths: | |
89 | new_value = value_at_key_path(new_dict, path) | |
90 | try: | |
91 | old_value = value_at_key_path(old_dict, path) | |
92 | except KeyError: | |
93 | blame_dict[path] = (blame_name,) | |
94 | else: | |
95 | if old_value != new_value: | |
96 | if defaults or is_mergeable(old_value, new_value): | |
97 | blame_dict[path] += (blame_name,) | |
98 | else: | |
99 | blame_dict[path] = (blame_name,) | |
100 | return blame_dict | |
101 | ||
102 | ||
103 | def changes_metadata(existing_metadata, new_metadata): | |
104 | """ | |
105 | Returns True if new_metadata contains any keys or values not present | |
106 | in or different from existing_metadata. | |
107 | """ | |
108 | for key, new_value in new_metadata.items(): | |
109 | if key not in existing_metadata: | |
110 | return True | |
111 | if isinstance(new_value, dict): | |
112 | if not isinstance(existing_metadata[key], dict): | |
113 | return True | |
114 | if changes_metadata(existing_metadata[key], new_value): | |
115 | return True | |
116 | if isinstance(existing_metadata[key], Fault) and isinstance(new_value, Fault): | |
117 | # Always consider Faults as equal. It would arguably be more correct to | |
118 | # always assume them to be different, but that would mean that we could | |
119 | # never do change detection between two dicts of metadata. So we have no | |
120 | # choice but to warn users in docs that Faults will always be considered | |
121 | # equal to one another. | |
122 | continue | |
123 | if new_value != existing_metadata[key]: | |
124 | return True | |
125 | return False | |
126 | ||
127 | ||
128 | def check_metadata_keys(node): | |
129 | try: | |
130 | basestring | |
131 | except NameError: # Python 2 | |
132 | basestring = str | |
133 | for path in map_dict_keys(node.metadata): | |
134 | value = path[-1] | |
135 | if not isinstance(value, basestring): | |
136 | raise TypeError(_("metadata key for {node} at path '{path}' is not a string").format( | |
137 | node=node.name, | |
138 | path="'->'".join(path[:-1]), | |
139 | )) | |
140 | ||
141 | ||
142 | def check_metadata_processor_result(input_metadata, result, node_name, metadata_processor_name): | |
143 | """ | |
144 | Validates the return value of a metadata processor and splits it | |
145 | into metadata and options. | |
146 | """ | |
147 | if not isinstance(result, tuple) or not len(result) >= 2: | |
148 | raise ValueError(_( | |
149 | "metadata processor {metaproc} for node {node} did not return " | |
150 | "a tuple of length 2 or greater" | |
151 | ).format( | |
152 | metaproc=metadata_processor_name, | |
153 | node=node_name, | |
154 | )) | |
155 | result_dict, options = result[0], result[1:] | |
156 | if not isinstance(result_dict, dict): | |
157 | raise ValueError(_( | |
158 | "metadata processor {metaproc} for node {node} did not return " | |
159 | "a dict as the first element" | |
160 | ).format( | |
161 | metaproc=metadata_processor_name, | |
162 | node=node_name, | |
163 | )) | |
164 | if ( | |
165 | (DEFAULTS in options or OVERWRITE in options) and | |
166 | id(input_metadata) == id(result_dict) | |
167 | ): | |
168 | raise ValueError(_( | |
169 | "metadata processor {metaproc} for node {node} returned original " | |
170 | "metadata dict plus DEFAULTS or OVERWRITE" | |
171 | ).format( | |
172 | metaproc=metadata_processor_name, | |
173 | node=node_name, | |
174 | )) | |
175 | for option in options: | |
176 | if option not in (DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE): | |
177 | raise ValueError(_( | |
178 | "metadata processor {metaproc} for node {node} returned an " | |
179 | "invalid option: {opt}" | |
180 | ).format( | |
181 | metaproc=metadata_processor_name, | |
182 | node=node_name, | |
183 | opt=repr(option), | |
184 | )) | |
185 | if DONE in options and RUN_ME_AGAIN in options: | |
186 | raise ValueError(_( | |
187 | "metadata processor {metaproc} for node {node} cannot return both " | |
188 | "DONE and RUN_ME_AGAIN" | |
189 | ).format( | |
190 | metaproc=metadata_processor_name, | |
191 | node=node_name, | |
192 | )) | |
193 | if DONE not in options and RUN_ME_AGAIN not in options: | |
194 | raise ValueError(_( | |
195 | "metadata processor {metaproc} for node {node} must return either " | |
196 | "DONE or RUN_ME_AGAIN" | |
197 | ).format( | |
198 | metaproc=metadata_processor_name, | |
199 | node=node_name, | |
200 | )) | |
201 | if DEFAULTS in options and OVERWRITE in options: | |
202 | raise ValueError(_( | |
203 | "metadata processor {metaproc} for node {node} cannot return both " | |
204 | "DEFAULTS and OVERWRITE" | |
205 | ).format( | |
206 | metaproc=metadata_processor_name, | |
207 | node=node_name, | |
208 | )) | |
209 | return result_dict, options | |
210 | ||
211 | ||
212 | def check_for_unsolvable_metadata_key_conflicts(node): | |
90 | def check_for_metadata_conflicts(node): | |
91 | check_for_metadata_conflicts_between_groups(node) | |
92 | check_for_metadata_conflicts_between_defaults_and_reactors(node) | |
93 | ||
94 | ||
95 | def check_for_metadata_conflicts_between_defaults_and_reactors(node): | |
96 | """ | |
97 | Finds conflicting metadata keys in bundle defaults and reactors. | |
98 | ||
99 | Dicts can be merged with dicts, sets can be merged with sets, but | |
100 | any other combination is a conflict. | |
101 | """ | |
102 | TYPE_DICT = 1 | |
103 | TYPE_SET = 2 | |
104 | TYPE_OTHER = 3 | |
105 | ||
106 | def paths_with_types(d): | |
107 | for path in map_dict_keys(d): | |
108 | value = value_at_key_path(d, path) | |
109 | if isinstance(value, dict): | |
110 | yield path, TYPE_DICT | |
111 | elif isinstance(value, set): | |
112 | yield path, TYPE_SET | |
113 | else: | |
114 | yield path, TYPE_OTHER | |
115 | ||
116 | for prefix in ("metadata_defaults:", "metadata_reactor:"): | |
117 | paths = {} | |
118 | for identifier, layer in node._metadata_stack._layers.items(): | |
119 | if identifier.startswith(prefix): | |
120 | for path, current_type in paths_with_types(layer): | |
121 | try: | |
122 | prev_type, prev_identifier = paths[path] | |
123 | except KeyError: | |
124 | paths[path] = current_type, identifier | |
125 | else: | |
126 | if ( | |
127 | prev_type == TYPE_DICT | |
128 | and current_type == TYPE_DICT | |
129 | ): | |
130 | pass | |
131 | elif ( | |
132 | prev_type == TYPE_SET | |
133 | and current_type == TYPE_SET | |
134 | ): | |
135 | pass | |
136 | else: | |
137 | raise ValueError(_( | |
138 | "{a} and {b} are clashing over this key path: {path}" | |
139 | ).format( | |
140 | a=identifier, | |
141 | b=prev_identifier, | |
142 | path="/".join(path), | |
143 | )) | |
144 | ||
145 | ||
146 | def check_for_metadata_conflicts_between_groups(node): | |
213 | 147 | """ |
214 | 148 | Finds metadata keys defined by two groups that are not part of a |
215 | 149 | shared subgroup hierarchy. |
279 | 213 | for chain in chains: |
280 | 214 | metadata = {} |
281 | 215 | for group in chain: |
282 | metadata = merge_dict(metadata, group.metadata) | |
216 | metadata = merge_dict(metadata, group._attributes.get('metadata', {})) | |
283 | 217 | chain_metadata.append(metadata) |
284 | 218 | |
285 | 219 | # create a "key path map" for each chain's metadata |
316 | 250 | ) |
317 | 251 | |
318 | 252 | |
319 | def deepcopy_metadata(obj): | |
320 | """ | |
321 | Our own version of deepcopy.copy that doesn't pickle and ensures | |
322 | a limited range of types is used in metadata. | |
323 | """ | |
324 | if isinstance(obj, METADATA_TYPES): | |
325 | return obj | |
326 | elif isinstance(obj, dict): | |
327 | if isinstance(obj, ATOMIC_TYPES[dict]): | |
328 | new_obj = atomic({}) | |
329 | else: | |
330 | new_obj = {} | |
331 | for key, value in obj.items(): | |
332 | if not isinstance(key, METADATA_TYPES): | |
333 | raise ValueError(_("illegal metadata key type: {}").format(repr(key))) | |
334 | new_key = copy(key) | |
335 | new_obj[new_key] = deepcopy_metadata(value) | |
336 | elif isinstance(obj, (list, tuple)): | |
337 | if isinstance(obj, (ATOMIC_TYPES[list], ATOMIC_TYPES[tuple])): | |
338 | new_obj = atomic([]) | |
339 | else: | |
340 | new_obj = [] | |
341 | for member in obj: | |
342 | new_obj.append(deepcopy_metadata(member)) | |
343 | elif isinstance(obj, set): | |
344 | if isinstance(obj, ATOMIC_TYPES[set]): | |
345 | new_obj = atomic(set()) | |
346 | else: | |
347 | new_obj = set() | |
348 | for member in obj: | |
349 | new_obj.add(deepcopy_metadata(member)) | |
350 | else: | |
351 | raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) | |
352 | return new_obj | |
353 | ||
354 | ||
355 | 253 | def find_groups_causing_metadata_conflict(node_name, chain1, chain2, keypath): |
356 | 254 | """ |
357 | 255 | Given two chains (lists of groups), find one group in each chain |
358 | 256 | that has conflicting metadata with the other for the given key path. |
359 | 257 | """ |
360 | chain1_metadata = [list(map_dict_keys(group.metadata)) for group in chain1] | |
361 | chain2_metadata = [list(map_dict_keys(group.metadata)) for group in chain2] | |
258 | chain1_metadata = [ | |
259 | list(map_dict_keys(group._attributes.get('metadata', {}))) for group in chain1 | |
260 | ] | |
261 | chain2_metadata = [ | |
262 | list(map_dict_keys(group._attributes.get('metadata', {}))) for group in chain2 | |
263 | ] | |
362 | 264 | |
363 | 265 | bad_keypath = None |
364 | 266 | |
404 | 306 | raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) |
405 | 307 | |
406 | 308 | |
407 | def metadata_to_json(metadata): | |
309 | def metadata_to_json(metadata, sort_keys=True): | |
408 | 310 | return dumps( |
409 | 311 | metadata, |
410 | 312 | cls=MetadataJSONEncoder, |
411 | 313 | indent=4, |
412 | sort_keys=True, | |
314 | sort_keys=sort_keys, | |
413 | 315 | ) |
414 | 316 | |
415 | 317 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime, timedelta |
4 | 1 | from hashlib import md5 |
5 | from os import environ | |
2 | from os import environ, mkdir | |
3 | from os.path import exists, join | |
6 | 4 | from threading import Lock |
5 | ||
6 | from tomlkit import dumps as toml_dump, parse as toml_parse | |
7 | 7 | |
8 | 8 | from . import operations |
9 | 9 | from .bundle import Bundle |
13 | 13 | find_item, |
14 | 14 | ) |
15 | 15 | from .exceptions import ( |
16 | DontCache, | |
17 | 16 | GracefulApplyException, |
18 | 17 | ItemDependencyLoop, |
19 | 18 | NodeLockedException, |
22 | 21 | RepositoryError, |
23 | 22 | SkipNode, |
24 | 23 | ) |
25 | from .group import GROUP_ATTR_DEFAULTS | |
24 | from .group import GROUP_ATTR_DEFAULTS, GROUP_ATTR_TYPES | |
26 | 25 | from .itemqueue import ItemQueue |
27 | 26 | from .items import Item |
28 | 27 | from .lock import NodeLock |
29 | 28 | from .metadata import hash_metadata |
30 | from .utils import cached_property, names | |
31 | from .utils.dicts import hash_statedict | |
32 | from .utils.metastack import Metastack | |
29 | from .utils import cached_property, error_context, get_file_contents, names, NO_DEFAULT | |
30 | from .utils.dicts import ( | |
31 | dict_to_toml, | |
32 | hash_statedict, | |
33 | set_key_at_path, | |
34 | validate_dict, | |
35 | value_at_key_path, | |
36 | COLLECTION_OF_STRINGS, | |
37 | ) | |
33 | 38 | from .utils.text import ( |
34 | 39 | blue, |
35 | 40 | bold, |
39 | 44 | green, |
40 | 45 | mark_for_translation as _, |
41 | 46 | red, |
47 | toml_clean, | |
42 | 48 | validate_name, |
43 | 49 | yellow, |
44 | 50 | ) |
45 | 51 | from .utils.ui import io |
46 | 52 | |
47 | 53 | |
48 | class ApplyResult(object): | |
54 | NODE_ATTR_TYPES = GROUP_ATTR_TYPES.copy() | |
55 | NODE_ATTR_TYPES['groups'] = COLLECTION_OF_STRINGS | |
56 | NODE_ATTR_TYPES['hostname'] = str | |
57 | ||
58 | ||
59 | class ApplyResult: | |
49 | 60 | """ |
50 | 61 | Holds information about an apply run for a node. |
51 | 62 | """ |
122 | 133 | if formatted_result is not None: |
123 | 134 | if status_code == Item.STATUS_FAILED: |
124 | 135 | io.stderr(formatted_result) |
136 | if item._command_results: | |
137 | io.stderr(format_item_command_results(item._command_results)) | |
138 | # free up memory | |
139 | del item._command_results | |
125 | 140 | else: |
126 | 141 | io.stdout(formatted_result) |
127 | 142 | |
268 | 283 | parent_groups[group].remove(top_level_group) |
269 | 284 | |
270 | 285 | return order |
286 | ||
287 | ||
288 | def format_item_command_results(results): | |
289 | output = "" | |
290 | ||
291 | for i in range(len(results)): | |
292 | stdout = results[i]['result'].stdout_text.strip() | |
293 | stderr = results[i]['result'].stderr_text.strip() | |
294 | ||
295 | # show command | |
296 | output += "\n{b}".format(b=red('│')) | |
297 | output += "\n{b} {command} (return code: {code}{no_output})".format( | |
298 | b=red('├─'), | |
299 | command=bold(results[i]['command']), | |
300 | code=bold(results[i]['result'].return_code), | |
301 | no_output='' if stdout or stderr else '; no output' | |
302 | ) | |
303 | ||
304 | # show output | |
305 | lines = [] | |
306 | if stdout or stderr: | |
307 | output += "\n{b}".format(b=red("│ ")) | |
308 | if stdout: | |
309 | lines += stdout.strip().split('\n') | |
310 | if stderr: | |
311 | lines += stderr.strip().split('\n') | |
312 | ||
313 | for k in range(len(lines)): | |
314 | output += "\n{b} {line}".format(b=red("│ "), line=lines[k]) | |
315 | ||
316 | output += red("\n╵ ") | |
317 | return output.lstrip('\n') | |
271 | 318 | |
272 | 319 | |
273 | 320 | def format_item_result(result, node, bundle, item, interactive=False, details=None): |
318 | 365 | ) |
319 | 366 | |
320 | 367 | |
321 | class Node(object): | |
368 | class Node: | |
322 | 369 | OS_FAMILY_BSD = ( |
323 | 370 | 'freebsd', |
324 | 371 | 'macos', |
358 | 405 | if not validate_name(name): |
359 | 406 | raise RepositoryError(_("'{}' is not a valid node name").format(name)) |
360 | 407 | |
408 | with error_context(node_name=name): | |
409 | validate_dict(attributes, NODE_ATTR_TYPES) | |
410 | ||
361 | 411 | self._add_host_keys = environ.get('BW_ADD_HOST_KEYS', False) == "1" |
362 | self._bundles = attributes.get('bundles', []) | |
363 | self._compiling_metadata = Lock() | |
364 | self._dynamic_group_lock = Lock() | |
365 | self._dynamic_groups_resolved = False # None means we're currently doing it | |
366 | self._groups = set(attributes.get('groups', set())) | |
367 | self._metadata_so_far = {} | |
368 | self._node_metadata = attributes.get('metadata', {}) | |
412 | self._attributes = attributes | |
369 | 413 | self._ssh_conn_established = False |
370 | 414 | self._ssh_first_conn_lock = Lock() |
371 | self._template_node_name = attributes.get('template_node') | |
415 | self.file_path = attributes.get('file_path') | |
372 | 416 | self.hostname = attributes.get('hostname', name) |
373 | 417 | self.name = name |
374 | 418 | |
383 | 427 | |
384 | 428 | @cached_property |
385 | 429 | def bundles(self): |
386 | if self._dynamic_group_lock.acquire(False): | |
387 | self._dynamic_group_lock.release() | |
388 | else: | |
389 | raise RepositoryError(_( | |
390 | "node bundles cannot be queried with members_add/remove" | |
391 | )) | |
392 | 430 | with io.job(_("{node} loading bundles").format(node=bold(self.name))): |
393 | added_bundles = [] | |
394 | found_bundles = [] | |
431 | bundle_names = set(self._attributes.get('bundles', set())) | |
432 | ||
395 | 433 | for group in self.groups: |
396 | for bundle_name in group.bundle_names: | |
397 | found_bundles.append(bundle_name) | |
398 | ||
399 | for bundle_name in found_bundles + list(self._bundles): | |
400 | if bundle_name not in added_bundles: | |
401 | added_bundles.append(bundle_name) | |
402 | try: | |
403 | yield Bundle(self, bundle_name) | |
404 | except NoSuchBundle: | |
405 | raise NoSuchBundle(_( | |
406 | "Node '{node}' wants bundle '{bundle}', but it doesn't exist." | |
407 | ).format( | |
408 | bundle=bundle_name, | |
409 | node=self.name, | |
410 | )) | |
434 | for bundle_name in set(group._attributes.get('bundles', set())): | |
435 | bundle_names.add(bundle_name) | |
436 | ||
437 | for bundle_name in bundle_names: | |
438 | try: | |
439 | yield Bundle(self, bundle_name) | |
440 | except NoSuchBundle: | |
441 | raise NoSuchBundle(_( | |
442 | "Node '{node}' wants bundle '{bundle}', but it doesn't exist." | |
443 | ).format( | |
444 | bundle=bundle_name, | |
445 | node=self.name, | |
446 | )) | |
411 | 447 | |
412 | 448 | @cached_property |
413 | 449 | def cdict(self): |
424 | 460 | True if this node should be skipped based on the given selector |
425 | 461 | string (e.g. "node:foo,group:bar"). |
426 | 462 | """ |
427 | components = [c.strip() for c in autoskip_selector.split(",")] | |
463 | components = [c.strip() for c in autoskip_selector] | |
428 | 464 | if "node:{}".format(self.name) in components: |
429 | 465 | return True |
430 | 466 | for group in self.groups: |
440 | 476 | def groups(self): |
441 | 477 | _groups = set() |
442 | 478 | |
443 | for group_name in self._groups: | |
444 | _groups.add(self.repo.get_group(group_name)) | |
479 | for group_name in set(self._attributes.get('groups', set())): | |
480 | with error_context(node=self.name): | |
481 | _groups.add(self.repo.get_group(group_name)) | |
445 | 482 | |
446 | 483 | for group in self.repo.groups: |
447 | if self in group._static_nodes: | |
484 | if group in _groups: | |
485 | # we're already in this group, no need to check it again | |
486 | continue | |
487 | if self in group._nodes_from_members: | |
448 | 488 | _groups.add(group) |
449 | ||
450 | # lock to avoid infinite recursion when .members_add/remove | |
451 | # use stuff like node.in_group() that in turn calls this function | |
452 | if self._dynamic_group_lock.acquire(False): | |
453 | cache_result = True | |
454 | self._dynamic_groups_resolved = None | |
455 | # first we remove ourselves from all static groups whose | |
456 | # .members_remove matches us | |
457 | for group in list(_groups): | |
458 | if group.members_remove is not None and group.members_remove(self): | |
459 | try: | |
460 | _groups.remove(group) | |
461 | except KeyError: | |
462 | pass | |
463 | # now add all groups whose .members_add (but not .members_remove) | |
464 | # matches us | |
465 | _groups = _groups.union(self._groups_dynamic) | |
466 | self._dynamic_groups_resolved = True | |
467 | self._dynamic_group_lock.release() | |
468 | else: | |
469 | cache_result = False | |
470 | ||
471 | # we have to add parent groups at the very end, since we might | |
472 | # have added or removed subgroups thru .members_add/remove | |
489 | continue | |
490 | for pattern in group._member_patterns: | |
491 | if pattern.search(self.name) is not None: | |
492 | _groups.add(group) | |
493 | ||
473 | 494 | while True: |
474 | 495 | # Since we're only looking at *immediate* parent groups, |
475 | 496 | # we have to keep doing this until we stop adding parent |
477 | 498 | _original_groups = _groups.copy() |
478 | 499 | for group in list(_groups): |
479 | 500 | for parent_group in group.immediate_parent_groups: |
480 | if cache_result: | |
481 | with self._dynamic_group_lock: | |
482 | self._dynamic_groups_resolved = None | |
483 | if ( | |
484 | not parent_group.members_remove or | |
485 | not parent_group.members_remove(self) | |
486 | ): | |
487 | _groups.add(parent_group) | |
488 | self._dynamic_groups_resolved = True | |
489 | else: | |
490 | _groups.add(parent_group) | |
501 | _groups.add(parent_group) | |
491 | 502 | if _groups == _original_groups: |
492 | 503 | # we didn't add any new parent groups, so we can stop |
493 | 504 | break |
494 | 505 | |
495 | if cache_result: | |
496 | return sorted(_groups) | |
497 | else: | |
498 | raise DontCache(sorted(_groups)) | |
499 | ||
500 | @property | |
501 | def _groups_dynamic(self): | |
502 | """ | |
503 | Returns all groups whose members_add matches this node. | |
504 | """ | |
505 | _groups = set() | |
506 | for group in self.repo.groups: | |
507 | if group.members_add is not None and group.members_add(self): | |
508 | _groups.add(group) | |
509 | if group.members_remove is not None and group.members_remove(self): | |
510 | try: | |
511 | _groups.remove(group) | |
512 | except KeyError: | |
513 | pass | |
514 | 506 | return _groups |
515 | 507 | |
516 | 508 | def has_any_bundle(self, bundle_list): |
688 | 680 | Returns full metadata for a node. MUST NOT be used from inside a |
689 | 681 | metadata processor. Use .partial_metadata instead. |
690 | 682 | """ |
691 | if self._dynamic_groups_resolved is None: | |
692 | # return only metadata set directly at the node level if | |
693 | # we're still in the process of figuring out which groups | |
694 | # we belong to | |
695 | return self._node_metadata | |
696 | else: | |
697 | return self.repo._metadata_for_node(self.name, partial=False) | |
683 | return self.repo._metadata_for_node(self.name, partial=False) | |
698 | 684 | |
699 | 685 | @property |
700 | 686 | def metadata_blame(self): |
701 | 687 | return self.repo._metadata_for_node(self.name, partial=False, blame=True) |
702 | 688 | |
689 | @property | |
690 | def _metadata_stack(self): | |
691 | return self.repo._metadata_for_node(self.name, partial=False, stack=True) | |
692 | ||
693 | def metadata_get(self, path, default=NO_DEFAULT): | |
694 | if not isinstance(path, (tuple, list)): | |
695 | path = path.split("/") | |
696 | try: | |
697 | return value_at_key_path(self.metadata, path) | |
698 | except KeyError: | |
699 | if default != NO_DEFAULT: | |
700 | return default | |
701 | else: | |
702 | raise | |
703 | ||
703 | 704 | def metadata_hash(self): |
704 | 705 | return hash_metadata(self.metadata) |
705 | 706 | |
706 | 707 | @property |
707 | 708 | def metadata_defaults(self): |
708 | return self._metadata_processors[0] | |
709 | ||
710 | @property | |
711 | def _metadata_processors(self): | |
712 | def tuple_with_name(kind, bundle, metadata_processor): | |
713 | return ( | |
714 | "{}:{}.{}".format( | |
715 | kind, | |
716 | bundle.name, | |
717 | metadata_processor.__name__, | |
718 | ), | |
719 | metadata_processor, | |
720 | ) | |
721 | ||
722 | defaults = [] | |
723 | reactors = set() | |
724 | classic_metaprocs = set() | |
725 | ||
726 | 709 | for bundle in self.bundles: |
727 | if bundle._metadata_processors[0]: | |
728 | defaults.append(( | |
710 | if bundle._metadata_defaults_and_reactors[0]: | |
711 | yield ( | |
729 | 712 | "metadata_defaults:{}".format(bundle.name), |
730 | bundle._metadata_processors[0], | |
731 | )) | |
732 | for reactor in bundle._metadata_processors[1]: | |
733 | reactors.add(tuple_with_name("metadata_reactor", bundle, reactor)) | |
734 | for classic_metaproc in bundle._metadata_processors[2]: | |
735 | classic_metaprocs.add(tuple_with_name("metadata_processor", bundle, classic_metaproc)) | |
736 | ||
737 | return defaults, reactors, classic_metaprocs | |
713 | bundle._metadata_defaults_and_reactors[0], | |
714 | ) | |
738 | 715 | |
739 | 716 | @property |
740 | 717 | def metadata_reactors(self): |
741 | return self._metadata_processors[1] | |
718 | for bundle in self.bundles: | |
719 | for reactor in bundle._metadata_defaults_and_reactors[1]: | |
720 | yield ( | |
721 | "metadata_reactor:{}.{}".format( | |
722 | bundle.name, | |
723 | reactor.__name__, | |
724 | ), | |
725 | reactor, | |
726 | ) | |
742 | 727 | |
743 | 728 | @property |
744 | 729 | def partial_metadata(self): |
745 | 730 | """ |
746 | Only to be used from inside metadata processors. Can't use the | |
731 | Only to be used from inside metadata reactors. Can't use the | |
747 | 732 | normal .metadata there because it might deadlock when nodes |
748 | 733 | have interdependent metadata. |
749 | 734 | |
750 | It's OK for metadata processors to work with partial metadata | |
735 | It's OK for metadata reactors to work with partial metadata | |
751 | 736 | because they will be fed all metadata updates until no more |
752 | changes are made by any metadata processor. | |
737 | changes are made by any metadata reactor. | |
753 | 738 | """ |
754 | ||
755 | partial = self.repo._metadata_for_node(self.name, partial=True) | |
756 | ||
757 | # TODO remove this mechanism in bw 4.0, always return Metastacks | |
758 | if self.repo._in_new_metareactor: | |
759 | return Metastack(partial) | |
760 | else: | |
761 | return partial | |
739 | return self.repo._metadata_for_node(self.name, partial=True) | |
762 | 740 | |
763 | 741 | def run(self, command, data_stdin=None, may_fail=False, log_output=False): |
764 | 742 | assert self.os in self.OS_FAMILY_UNIX |
804 | 782 | wrapper_outer=self.cmd_wrapper_outer, |
805 | 783 | ) |
806 | 784 | |
807 | @property | |
808 | def template_node(self): | |
809 | if not self._template_node_name: | |
810 | return None | |
811 | else: | |
812 | target_node = self.repo.get_node(self._template_node_name) | |
813 | if target_node._template_node_name: | |
814 | raise RepositoryError(_( | |
815 | "{template_node} cannot use template_node because {node} uses {template_node} " | |
816 | "as template_node" | |
817 | ).format(node=self.name, template_node=target_node.name)) | |
818 | else: | |
819 | return target_node | |
785 | @cached_property | |
786 | def toml(self): | |
787 | if not self.file_path or not self.file_path.endswith(".toml"): | |
788 | raise ValueError(_("node {} not in TOML format").format(self.name)) | |
789 | return toml_parse(get_file_contents(self.file_path)) | |
790 | ||
791 | def toml_save(self): | |
792 | try: | |
793 | toml_doc = self.toml | |
794 | except ValueError: | |
795 | attributes = self._attributes.copy() | |
796 | del attributes['file_path'] | |
797 | toml_doc = dict_to_toml(attributes) | |
798 | self.file_path = join(self.repo.path, "nodes", self.name + ".toml") | |
799 | if not exists(join(self.repo.path, "nodes")): | |
800 | mkdir(join(self.repo.path, "nodes")) | |
801 | with open(self.file_path, 'w') as f: | |
802 | f.write(toml_clean(toml_dump(toml_doc))) | |
803 | ||
804 | def toml_set(self, path, value): | |
805 | if not isinstance(path, tuple): | |
806 | path = path.split("/") | |
807 | set_key_at_path(self.toml, path, value) | |
820 | 808 | |
821 | 809 | def upload(self, local_path, remote_path, mode=None, owner="", group="", may_fail=False): |
822 | 810 | assert self.os in self.OS_FAMILY_UNIX |
864 | 852 | attr_source = "group:{}".format(group.name) |
865 | 853 | attr_value = getattr(group, attr) |
866 | 854 | |
867 | if self.template_node: | |
868 | attr_source = "template_node" | |
869 | attr_value = getattr(self.template_node, attr) | |
870 | ||
871 | 855 | if getattr(self, "_{}".format(attr)) is not None: |
872 | 856 | attr_source = "node" |
873 | 857 | attr_value = getattr(self, "_{}".format(attr)) |
881 | 865 | attr=attr, |
882 | 866 | source=attr_source, |
883 | 867 | )) |
884 | if self._dynamic_groups_resolved: | |
885 | return attr_value | |
886 | else: | |
887 | raise DontCache(attr_value) | |
888 | method.__name__ = str("_group_attr_{}".format(attr)) # required for cached_property | |
889 | # str() for Python 2 compatibility | |
868 | return attr_value | |
869 | method.__name__ = "_group_attr_{}".format(attr) # required for cached_property | |
890 | 870 | return cached_property(method) |
891 | 871 | |
892 | 872 | for attr, default in GROUP_ATTR_DEFAULTS.items(): |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime |
4 | from pipes import quote | |
1 | from shlex import quote | |
5 | 2 | from select import select |
6 | 3 | from shlex import split |
7 | 4 | from subprocess import Popen, PIPE |
71 | 68 | )) |
72 | 69 | |
73 | 70 | |
74 | class RunResult(object): | |
71 | class RunResult: | |
75 | 72 | def __init__(self): |
76 | 73 | self.duration = None |
77 | 74 | self.return_code = None |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from json import dumps, loads | |
4 | from os import chmod, remove | |
5 | from os.path import exists, join | |
6 | from stat import S_IREAD, S_IRGRP, S_IROTH | |
7 | ||
8 | from requests import get | |
9 | ||
10 | from .exceptions import NoSuchPlugin, PluginError, PluginLocalConflict | |
11 | from .utils import download, hash_local_file | |
12 | from .utils.text import mark_for_translation as _ | |
13 | from .utils.ui import io | |
14 | ||
15 | ||
16 | BASE_URL = "https://raw.githubusercontent.com/bundlewrap/plugins/master" | |
17 | ||
18 | ||
19 | class PluginManager(object): | |
20 | def __init__(self, path, base_url=BASE_URL): | |
21 | self.base_url = base_url | |
22 | self.path = path | |
23 | if exists(join(self.path, "plugins.json")): | |
24 | with open(join(self.path, "plugins.json")) as f: | |
25 | self.plugin_db = loads(f.read()) | |
26 | else: | |
27 | self.plugin_db = {} | |
28 | ||
29 | @property | |
30 | def index(self): | |
31 | return get( | |
32 | "{}/index.json".format(self.base_url) | |
33 | ).json() | |
34 | ||
35 | def install(self, plugin, force=False): | |
36 | if plugin in self.plugin_db: | |
37 | raise PluginError(_("plugin '{plugin}' is already installed").format(plugin=plugin)) | |
38 | ||
39 | manifest = self.manifest_for_plugin(plugin) | |
40 | ||
41 | for file in manifest['provides']: | |
42 | target_path = join(self.path, file) | |
43 | if exists(target_path) and not force: | |
44 | raise PluginLocalConflict(_( | |
45 | "cannot install '{plugin}' because it provides " | |
46 | "'{path}' which already exists" | |
47 | ).format(path=target_path, plugin=plugin)) | |
48 | ||
49 | url = "{}/{}/{}".format(self.base_url, plugin, file) | |
50 | download(url, target_path) | |
51 | ||
52 | # make file read-only to discourage users from editing them | |
53 | # which will block future updates of the plugin | |
54 | chmod(target_path, S_IREAD | S_IRGRP | S_IROTH) | |
55 | ||
56 | self.record_as_installed(plugin, manifest) | |
57 | ||
58 | return manifest | |
59 | ||
60 | def list(self): | |
61 | for plugin, info in self.plugin_db.items(): | |
62 | yield (plugin, info['version']) | |
63 | ||
64 | def local_modifications(self, plugin): | |
65 | try: | |
66 | plugin_data = self.plugin_db[plugin] | |
67 | except KeyError: | |
68 | raise NoSuchPlugin(_( | |
69 | "The plugin '{plugin}' is not installed." | |
70 | ).format(plugin=plugin)) | |
71 | local_changes = [] | |
72 | for filename, checksum in plugin_data['files'].items(): | |
73 | target_path = join(self.path, filename) | |
74 | actual_checksum = hash_local_file(target_path) | |
75 | if actual_checksum != checksum: | |
76 | local_changes.append(( | |
77 | target_path, | |
78 | actual_checksum, | |
79 | checksum, | |
80 | )) | |
81 | return local_changes | |
82 | ||
83 | def manifest_for_plugin(self, plugin): | |
84 | r = get( | |
85 | "{}/{}/manifest.json".format(self.base_url, plugin) | |
86 | ) | |
87 | if r.status_code == 404: | |
88 | raise NoSuchPlugin(plugin) | |
89 | else: | |
90 | return r.json() | |
91 | ||
92 | def record_as_installed(self, plugin, manifest): | |
93 | file_hashes = {} | |
94 | ||
95 | for file in manifest['provides']: | |
96 | target_path = join(self.path, file) | |
97 | file_hashes[file] = hash_local_file(target_path) | |
98 | ||
99 | self.plugin_db[plugin] = { | |
100 | 'files': file_hashes, | |
101 | 'version': manifest['version'], | |
102 | } | |
103 | self.write_db() | |
104 | ||
105 | def remove(self, plugin, force=False): | |
106 | if plugin not in self.plugin_db: | |
107 | raise NoSuchPlugin(_("plugin '{plugin}' is not installed").format(plugin=plugin)) | |
108 | ||
109 | for file, db_checksum in self.plugin_db[plugin]['files'].items(): | |
110 | file_path = join(self.path, file) | |
111 | if not exists(file_path): | |
112 | continue | |
113 | ||
114 | current_checksum = hash_local_file(file_path) | |
115 | if db_checksum != current_checksum and not force: | |
116 | io.stderr(_( | |
117 | "not removing '{path}' because it has been modified since installation" | |
118 | ).format(path=file_path)) | |
119 | continue | |
120 | ||
121 | remove(file_path) | |
122 | ||
123 | del self.plugin_db[plugin] | |
124 | self.write_db() | |
125 | ||
126 | def search(self, term): | |
127 | term = term.lower() | |
128 | for plugin_name, plugin_data in self.index.items(): | |
129 | if term in plugin_name.lower() or term in plugin_data['desc'].lower(): | |
130 | yield (plugin_name, plugin_data['desc']) | |
131 | ||
132 | def update(self, plugin, check_only=False, force=False): | |
133 | if plugin not in self.plugin_db: | |
134 | raise PluginError(_("plugin '{plugin}' is not installed").format(plugin=plugin)) | |
135 | ||
136 | # before updating anything, we need to check for local modifications | |
137 | local_changes = self.local_modifications(plugin) | |
138 | if local_changes and not force: | |
139 | files = [path for path, c1, c2 in local_changes] | |
140 | raise PluginLocalConflict(_( | |
141 | "cannot update '{plugin}' because the following files have been modified locally:" | |
142 | "\n{files}" | |
143 | ).format(files="\n".join(files), plugin=plugin)) | |
144 | ||
145 | manifest = self.manifest_for_plugin(plugin) | |
146 | ||
147 | for file in manifest['provides']: | |
148 | file_path = join(self.path, file) | |
149 | if exists(file_path) and file not in self.plugin_db[plugin]['files'] and not force: | |
150 | # new version added a file that already existed locally | |
151 | raise PluginLocalConflict(_( | |
152 | "cannot update '{plugin}' because it would overwrite '{path}'" | |
153 | ).format(path=file, plugin=plugin)) | |
154 | ||
155 | old_version = self.plugin_db[plugin]['version'] | |
156 | new_version = manifest['version'] | |
157 | ||
158 | if not check_only and old_version != new_version: | |
159 | # actually install files | |
160 | for file in manifest['provides']: | |
161 | target_path = join(self.path, file) | |
162 | url = "{}/{}/{}".format(self.base_url, plugin, file) | |
163 | download(url, target_path) | |
164 | ||
165 | # make file read-only to discourage users from editing them | |
166 | # which will block future updates of the plugin | |
167 | chmod(target_path, S_IREAD | S_IRGRP | S_IROTH) | |
168 | ||
169 | # check for files that have been removed in the new version | |
170 | for file, db_checksum in self.plugin_db[plugin]['files'].items(): | |
171 | if file not in manifest['provides']: | |
172 | file_path = join(self.path, file) | |
173 | current_checksum = hash_local_file(file_path) | |
174 | if db_checksum != current_checksum and not force: | |
175 | io.stderr(_( | |
176 | "not removing '{path}' because it has been modified since installation" | |
177 | ).format(path=file_path)) | |
178 | continue | |
179 | remove(file_path) | |
180 | ||
181 | self.record_as_installed(plugin, manifest) | |
182 | ||
183 | return (old_version, new_version) | |
184 | ||
185 | def write_db(self): | |
186 | with open(join(self.path, "plugins.json"), 'w') as f: | |
187 | f.write(dumps(self.plugin_db, indent=4, sort_keys=True)) | |
188 | f.write("\n") |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from imp import load_source | |
0 | from importlib.machinery import SourceFileLoader | |
4 | 1 | from inspect import isabstract |
5 | 2 | from os import environ, listdir, mkdir, walk |
6 | 3 | from os.path import abspath, dirname, isdir, isfile, join |
7 | 4 | from threading import Lock |
8 | 5 | |
9 | 6 | from pkg_resources import DistributionNotFound, require, VersionConflict |
7 | from tomlkit import parse as parse_toml | |
10 | 8 | |
11 | 9 | from . import items, VERSION_STRING |
12 | 10 | from .bundle import FILENAME_BUNDLE |
18 | 16 | RepositoryError, |
19 | 17 | ) |
20 | 18 | from .group import Group |
21 | from .metadata import ( | |
22 | blame_changed_paths, | |
23 | changes_metadata, | |
24 | check_metadata_processor_result, | |
25 | deepcopy_metadata, | |
26 | DEFAULTS, | |
27 | DONE, | |
28 | OVERWRITE, | |
29 | DoNotRunAgain, | |
30 | ) | |
19 | from .metadata import DoNotRunAgain | |
31 | 20 | from .node import _flatten_group_hierarchy, Node |
32 | 21 | from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy |
33 | from .utils import cached_property, get_file_contents, names | |
22 | from .utils import ( | |
23 | cached_property, | |
24 | error_context, | |
25 | get_file_contents, | |
26 | names, | |
27 | randomize_order, | |
28 | ) | |
34 | 29 | from .utils.scm import get_git_branch, get_git_clean, get_rev |
35 | from .utils.dicts import hash_statedict, merge_dict | |
30 | from .utils.dicts import hash_statedict | |
36 | 31 | from .utils.metastack import Metastack |
37 | 32 | from .utils.text import bold, mark_for_translation as _, red, validate_name |
38 | 33 | from .utils.ui import io, QUIT_EVENT |
160 | 155 | self.__registered_hooks[filename].append(name) |
161 | 156 | |
162 | 157 | |
163 | class LibsProxy(object): | |
158 | class LibsProxy: | |
164 | 159 | def __init__(self, path): |
165 | 160 | self.__module_cache = {} |
166 | 161 | self.__path = path |
172 | 167 | filename = attrname + ".py" |
173 | 168 | filepath = join(self.__path, filename) |
174 | 169 | try: |
175 | m = load_source('bundlewrap.repo.libs_{}'.format(attrname), filepath) | |
170 | m = SourceFileLoader( | |
171 | 'bundlewrap.repo.libs_{}'.format(attrname), | |
172 | filepath, | |
173 | ).load_module() | |
176 | 174 | except: |
177 | 175 | io.stderr(_("Exception while trying to load {}:").format(filepath)) |
178 | 176 | raise |
180 | 178 | return self.__module_cache[attrname] |
181 | 179 | |
182 | 180 | |
183 | class Repository(object): | |
181 | class Repository: | |
184 | 182 | def __init__(self, repo_path=None): |
185 | 183 | if repo_path is None: |
186 | 184 | self.path = "/dev/null" |
194 | 192 | self.node_dict = {} |
195 | 193 | self._get_all_attr_code_cache = {} |
196 | 194 | self._get_all_attr_result_cache = {} |
197 | self._node_metadata_blame = {} | |
198 | 195 | self._node_metadata_complete = {} |
199 | self._node_metadata_partial = {} | |
200 | self._node_metadata_static_complete = set() | |
201 | 196 | self._node_metadata_lock = Lock() |
202 | 197 | |
203 | 198 | if repo_path is not None: |
302 | 297 | |
303 | 298 | open(join(bundle_dir, FILENAME_BUNDLE), 'a').close() |
304 | 299 | |
305 | def create_node(self, node_name): | |
306 | """ | |
307 | Creates an adhoc node with the given name. | |
308 | """ | |
309 | node = Node(node_name) | |
310 | self.add_node(node) | |
311 | return node | |
312 | ||
313 | 300 | def get_all_attrs_from_file(self, path, base_env=None): |
314 | 301 | """ |
315 | 302 | Reads all 'attributes' (if it were a module) from a source file. |
325 | 312 | |
326 | 313 | if path not in self._get_all_attr_code_cache: |
327 | 314 | source = get_file_contents(path) |
328 | self._get_all_attr_code_cache[path] = \ | |
329 | compile(source, path, mode='exec') | |
315 | with error_context(path=path): | |
316 | self._get_all_attr_code_cache[path] = \ | |
317 | compile(source, path, mode='exec') | |
330 | 318 | |
331 | 319 | code = self._get_all_attr_code_cache[path] |
332 | 320 | env = base_env.copy() |
333 | try: | |
321 | with error_context(path=path): | |
334 | 322 | exec(code, env) |
335 | except: | |
336 | io.stderr("Exception while executing {}".format(path)) | |
337 | raise | |
338 | 323 | |
339 | 324 | if not base_env: |
340 | 325 | self._get_all_attr_result_cache[path] = env |
341 | 326 | |
342 | 327 | return env |
343 | 328 | |
344 | def nodes_or_groups_from_file(self, path, attribute): | |
329 | def nodes_or_groups_from_file(self, path, attribute, preexisting): | |
345 | 330 | try: |
346 | 331 | flat_dict = self.get_all_attrs_from_file( |
347 | 332 | path, |
348 | 333 | base_env={ |
334 | attribute: preexisting, | |
349 | 335 | 'libs': self.libs, |
350 | 336 | 'repo_path': self.path, |
351 | 337 | 'vault': self.vault, |
355 | 341 | raise RepositoryError(_( |
356 | 342 | "{} must define a '{}' variable" |
357 | 343 | ).format(path, attribute)) |
344 | if not isinstance(flat_dict, dict): | |
345 | raise ValueError(_("'{v}' in '{p}' must be a dict").format( | |
346 | v=attribute, | |
347 | p=path, | |
348 | )) | |
358 | 349 | for name, infodict in flat_dict.items(): |
350 | infodict.setdefault('file_path', path) | |
359 | 351 | yield (name, infodict) |
352 | ||
353 | def nodes_or_groups_from_dir(self, directory): | |
354 | path = join(self.path, directory) | |
355 | if not isdir(path): | |
356 | return | |
357 | for root_dir, _dirs, files in walk(path): | |
358 | for filename in files: | |
359 | filepath = join(root_dir, filename) | |
360 | if not filename.endswith(".toml") or \ | |
361 | not isfile(filepath) or \ | |
362 | filename.startswith("_"): | |
363 | continue | |
364 | infodict = dict(parse_toml(get_file_contents(filepath))) | |
365 | infodict['file_path'] = filepath | |
366 | yield filename[:-5], infodict | |
360 | 367 | |
361 | 368 | def items_from_dir(self, path): |
362 | 369 | """ |
452 | 459 | """ |
453 | 460 | return self.nodes_in_all_groups([group_name]) |
454 | 461 | |
455 | def _metadata_for_node(self, node_name, partial=False, blame=False): | |
462 | def _metadata_for_node(self, node_name, partial=False, blame=False, stack=False): | |
456 | 463 | """ |
457 | 464 | Returns full or partial metadata for this node. |
458 | 465 | |
459 | 466 | Partial metadata may only be requested from inside a metadata |
460 | processor. | |
467 | reactor. | |
461 | 468 | |
462 | 469 | If necessary, this method will build complete metadata for this |
463 | 470 | node and all related nodes. Related meaning nodes that this node |
464 | 471 | depends on in one of its metadata processors. |
465 | 472 | """ |
473 | if partial: | |
474 | if node_name in self._node_metadata_complete: | |
475 | # We already completed metadata for this node, but partial must | |
476 | # return a Metastack, so we build a single-layered one just for | |
477 | # the interface. | |
478 | metastack = Metastack() | |
479 | metastack._set_layer( | |
480 | "flattened", | |
481 | self._node_metadata_complete[node_name], | |
482 | ) | |
483 | return metastack | |
484 | else: | |
485 | # Return the WIP Metastack or an empty one if we didn't start | |
486 | # yet. | |
487 | self._nodes_we_need_metadata_for.add(node_name) | |
488 | return self._metastacks.setdefault(node_name, Metastack()) | |
489 | ||
490 | if blame or stack: | |
491 | # cannot return cached result here, force rebuild | |
492 | try: | |
493 | del self._node_metadata_complete[node_name] | |
494 | except KeyError: | |
495 | pass | |
496 | ||
466 | 497 | try: |
467 | 498 | return self._node_metadata_complete[node_name] |
468 | 499 | except KeyError: |
469 | 500 | pass |
470 | 501 | |
471 | if partial: | |
472 | self._node_metadata_partial.setdefault(node_name, {}) | |
473 | return self._node_metadata_partial[node_name] | |
502 | # Different worker threads might request metadata at the same time. | |
503 | # This creates problems for the following variables: | |
504 | # | |
505 | # self._metastacks | |
506 | # self._nodes_we_need_metadata_for | |
507 | # | |
508 | # Chaos would ensue if we allowed multiple instances of | |
509 | # _build_node_metadata() running in parallel, messing with these | |
510 | # vars. So we use a lock and reset the vars before and after. | |
474 | 511 | |
475 | 512 | with self._node_metadata_lock: |
476 | 513 | try: |
479 | 516 | except KeyError: |
480 | 517 | pass |
481 | 518 | |
482 | self._node_metadata_partial[node_name] = {} | |
483 | self._build_node_metadata(blame=blame) | |
484 | ||
519 | # set up temporary vars | |
520 | self._metastacks = {} | |
521 | self._nodes_we_need_metadata_for = {node_name} | |
522 | ||
523 | self._build_node_metadata() | |
524 | ||
525 | io.debug("completed metadata for {} nodes".format( | |
526 | len(self._nodes_we_need_metadata_for), | |
527 | )) | |
485 | 528 | # now that we have completed all metadata for this |
486 | 529 | # node and all related nodes, copy that data over |
487 | 530 | # to the complete dict |
488 | self._node_metadata_complete.update(self._node_metadata_partial) | |
489 | ||
490 | # reset temporary vars | |
491 | self._node_metadata_partial = {} | |
492 | self._node_metadata_static_complete = set() | |
531 | for node_name in self._nodes_we_need_metadata_for: | |
532 | self._node_metadata_complete[node_name] = \ | |
533 | self._metastacks[node_name]._as_dict() | |
493 | 534 | |
494 | 535 | if blame: |
495 | return self._node_metadata_blame[node_name] | |
536 | blame_result = self._metastacks[node_name]._as_blame() | |
537 | elif stack: | |
538 | stack_result = self._metastacks[node_name] | |
539 | ||
540 | # reset temporary vars (this isn't strictly necessary, but might | |
541 | # free up some memory and avoid confusion) | |
542 | self._metastacks = {} | |
543 | self._nodes_we_need_metadata_for = set() | |
544 | ||
545 | if blame: | |
546 | return blame_result | |
547 | elif stack: | |
548 | return stack_result | |
496 | 549 | else: |
497 | 550 | return self._node_metadata_complete[node_name] |
498 | 551 | |
499 | def _build_node_metadata(self, blame=False): | |
552 | def _build_node_metadata(self): | |
500 | 553 | """ |
501 | 554 | Builds complete metadata for all nodes that appear in |
502 | self._node_metadata_partial.keys(). | |
503 | """ | |
504 | # TODO remove this mechanism in bw 4.0 | |
505 | self._in_new_metareactor = False | |
506 | ||
507 | # these processors have indicated that they do not need to be run again | |
508 | blacklisted_metaprocs = set() | |
509 | ||
555 | self._nodes_we_need_metadata_for. | |
556 | """ | |
557 | # Prevents us from reassembling static metadata needlessly and | |
558 | # helps us detect nodes pulled into self._nodes_we_need_metadata_for | |
559 | # by node.partial_metadata. | |
560 | nodes_with_completed_static_metadata = set() | |
561 | # these reactors have indicated that they do not need to be run again | |
562 | do_not_run_again = set() | |
563 | # these reactors have raised KeyErrors | |
510 | 564 | keyerrors = {} |
511 | ||
565 | # loop detection | |
512 | 566 | iterations = 0 |
513 | reactors_that_returned_something_in_last_iteration = set() | |
567 | reactors_that_changed_something_in_last_iteration = set() | |
568 | ||
514 | 569 | while not QUIT_EVENT.is_set(): |
515 | 570 | iterations += 1 |
516 | 571 | if iterations > MAX_METADATA_ITERATIONS: |
517 | proclist = "" | |
518 | for node, metaproc in sorted(reactors_that_returned_something_in_last_iteration): | |
519 | proclist += node + " " + metaproc + "\n" | |
572 | reactors = "" | |
573 | for node, reactor in sorted(reactors_that_changed_something_in_last_iteration): | |
574 | reactors += node + " " + reactor + "\n" | |
520 | 575 | raise ValueError(_( |
521 | 576 | "Infinite loop detected between these metadata reactors:\n" |
522 | ) + proclist) | |
577 | ) + reactors) | |
523 | 578 | |
524 | 579 | # First, get the static metadata out of the way |
525 | for node_name in list(self._node_metadata_partial): | |
580 | for node_name in list(self._nodes_we_need_metadata_for): | |
526 | 581 | if QUIT_EVENT.is_set(): |
527 | 582 | break |
528 | 583 | node = self.get_node(node_name) |
529 | node_blame = self._node_metadata_blame.setdefault(node_name, {}) | |
530 | 584 | # check if static metadata for this node is already done |
531 | if node_name in self._node_metadata_static_complete: | |
585 | if node_name in nodes_with_completed_static_metadata: | |
532 | 586 | continue |
533 | ||
534 | with io.job(_("{node} building group metadata").format(node=bold(node.name))): | |
587 | self._metastacks[node_name] = Metastack() | |
588 | ||
589 | with io.job(_("{node} adding metadata defaults").format(node=bold(node.name))): | |
590 | # randomize order to increase chance of exposing clashing defaults | |
591 | for defaults_name, defaults in randomize_order(node.metadata_defaults): | |
592 | self._metastacks[node_name]._set_layer( | |
593 | defaults_name, | |
594 | defaults, | |
595 | ) | |
596 | ||
597 | with io.job(_("{node} adding group metadata").format(node=bold(node.name))): | |
535 | 598 | group_order = _flatten_group_hierarchy(node.groups) |
536 | 599 | for group_name in group_order: |
537 | new_metadata = merge_dict( | |
538 | self._node_metadata_partial[node.name], | |
539 | self.get_group(group_name).metadata, | |
600 | self._metastacks[node_name]._set_layer( | |
601 | "group:{}".format(group_name), | |
602 | self.get_group(group_name)._attributes.get('metadata', {}), | |
540 | 603 | ) |
541 | if blame: | |
542 | blame_changed_paths( | |
543 | self._node_metadata_partial[node.name], | |
544 | new_metadata, | |
545 | node_blame, | |
546 | "group:{}".format(group_name), | |
547 | ) | |
548 | self._node_metadata_partial[node.name] = new_metadata | |
549 | ||
550 | with io.job(_("{node} merging node metadata").format(node=bold(node.name))): | |
551 | # deepcopy_metadata is important here because up to this point | |
552 | # different nodes from the same group might still share objects | |
553 | # nested deeply in their metadata. This becomes a problem if we | |
554 | # start messing with these objects in metadata processors. Every | |
555 | # time we would edit one of these objects, the changes would be | |
556 | # shared amongst multiple nodes. | |
557 | for source_node in (node.template_node, node): | |
558 | if not source_node: # template_node might be None | |
559 | continue | |
560 | new_metadata = deepcopy_metadata(merge_dict( | |
561 | self._node_metadata_partial[node.name], | |
562 | source_node._node_metadata, | |
563 | )) | |
564 | if blame: | |
565 | blame_changed_paths( | |
566 | self._node_metadata_partial[node.name], | |
567 | new_metadata, | |
568 | node_blame, | |
569 | "node:{}".format(source_node.name), | |
570 | ) | |
571 | self._node_metadata_partial[node.name] = new_metadata | |
572 | ||
573 | # At this point, static metadata from groups and nodes has been merged. | |
574 | # Next, we look at defaults from metadata.py. | |
575 | ||
576 | for node_name in list(self._node_metadata_partial): | |
577 | # check if static metadata for this node is already done | |
578 | if node_name in self._node_metadata_static_complete: | |
579 | continue | |
580 | ||
581 | node_blame = self._node_metadata_blame[node_name] | |
582 | with io.job(_("{node} running metadata defaults").format(node=bold(node.name))): | |
583 | for defaults_name, defaults in node.metadata_defaults: | |
584 | if blame: | |
585 | blame_changed_paths( | |
586 | self._node_metadata_partial[node.name], | |
587 | defaults, | |
588 | node_blame, | |
589 | defaults_name, | |
590 | defaults=True, | |
591 | ) | |
592 | self._node_metadata_partial[node.name] = merge_dict( | |
593 | defaults, | |
594 | self._node_metadata_partial[node.name], | |
595 | ) | |
604 | ||
605 | with io.job(_("{node} adding node metadata").format(node=bold(node.name))): | |
606 | self._metastacks[node_name]._set_layer( | |
607 | "node:{}".format(node_name), | |
608 | node._attributes.get('metadata', {}), | |
609 | ) | |
596 | 610 | |
597 | 611 | # This will ensure node/group metadata and defaults are |
598 | 612 | # skipped over in future iterations. |
599 | self._node_metadata_static_complete.add(node_name) | |
600 | ||
601 | # TODO remove this in 4.0 | |
602 | # Now for the interesting part: We run all metadata processors | |
603 | # until none of them return DONE anymore (indicating that they're | |
604 | # just waiting for another metaproc to maybe insert new data, | |
605 | # which isn't happening if none return DONE) | |
606 | metaproc_returned_DONE = False | |
613 | nodes_with_completed_static_metadata.add(node_name) | |
607 | 614 | |
608 | 615 | # Now for the interesting part: We run all metadata reactors |
609 | 616 | # until none of them return changed metadata anymore. |
610 | reactor_returned_changed_metadata = False | |
611 | reactors_that_returned_something_in_last_iteration = set() | |
612 | ||
613 | for node_name in list(self._node_metadata_partial): | |
617 | any_reactor_returned_changed_metadata = False | |
618 | reactors_that_changed_something_in_last_iteration = set() | |
619 | ||
620 | # randomize order to increase chance of exposing unintended | |
621 | # non-deterministic effects of execution order | |
622 | for node_name in randomize_order(self._nodes_we_need_metadata_for): | |
614 | 623 | if QUIT_EVENT.is_set(): |
615 | 624 | break |
616 | 625 | node = self.get_node(node_name) |
617 | node_blame = self._node_metadata_blame[node_name] | |
618 | 626 | |
619 | 627 | with io.job(_("{node} running metadata reactors").format(node=bold(node.name))): |
620 | # TODO remove this mechanism in bw 4.0 | |
621 | self._in_new_metareactor = True | |
622 | ||
623 | for metadata_reactor_name, metadata_reactor in node.metadata_reactors: | |
624 | if (node_name, metadata_reactor_name) in blacklisted_metaprocs: | |
628 | for reactor_name, reactor in randomize_order(node.metadata_reactors): | |
629 | if (node_name, reactor_name) in do_not_run_again: | |
625 | 630 | continue |
626 | io.debug(_( | |
627 | "running metadata reactor {metaproc} for node {node}" | |
628 | ).format( | |
629 | metaproc=metadata_reactor_name, | |
630 | node=node.name, | |
631 | )) | |
632 | if blame: | |
633 | # We need to deepcopy here because otherwise we have no chance of | |
634 | # figuring out what changed... | |
635 | input_metadata = deepcopy_metadata( | |
636 | self._node_metadata_partial[node.name] | |
637 | ) | |
638 | else: | |
639 | # ...but we can't always do it for performance reasons. | |
640 | input_metadata = self._node_metadata_partial[node.name] | |
641 | 631 | try: |
642 | stack = Metastack() | |
643 | stack._set_layer("flattened", input_metadata) | |
644 | new_metadata = metadata_reactor(stack) | |
632 | new_metadata = reactor(self._metastacks[node.name]) | |
645 | 633 | except KeyError as exc: |
646 | keyerrors[(node_name, metadata_reactor_name)] = exc | |
634 | keyerrors[(node_name, reactor_name)] = exc | |
647 | 635 | except DoNotRunAgain: |
648 | blacklisted_metaprocs.add((node_name, metadata_reactor_name)) | |
636 | do_not_run_again.add((node_name, reactor_name)) | |
649 | 637 | except Exception as exc: |
650 | 638 | io.stderr(_( |
651 | 639 | "{x} Exception while executing metadata reactor " |
652 | 640 | "{metaproc} for node {node}:" |
653 | 641 | ).format( |
654 | 642 | x=red("!!!"), |
655 | metaproc=metadata_reactor_name, | |
643 | metaproc=reactor_name, | |
656 | 644 | node=node.name, |
657 | 645 | )) |
658 | 646 | raise exc |
659 | 647 | else: |
660 | 648 | # reactor terminated normally, clear any previously stored exception |
661 | 649 | try: |
662 | del keyerrors[(node_name, metadata_reactor_name)] | |
650 | del keyerrors[(node_name, reactor_name)] | |
663 | 651 | except KeyError: |
664 | 652 | pass |
665 | reactors_that_returned_something_in_last_iteration.add( | |
666 | (node_name, metadata_reactor_name), | |
667 | ) | |
668 | if not reactor_returned_changed_metadata: | |
669 | reactor_returned_changed_metadata = changes_metadata( | |
670 | self._node_metadata_partial[node.name], | |
653 | ||
654 | try: | |
655 | this_changed = self._metastacks[node_name]._set_layer( | |
656 | reactor_name, | |
671 | 657 | new_metadata, |
672 | 658 | ) |
673 | ||
674 | if blame: | |
675 | blame_changed_paths( | |
676 | self._node_metadata_partial[node.name], | |
677 | new_metadata, | |
678 | node_blame, | |
679 | "metadata_reactor:{}".format(metadata_reactor_name), | |
659 | except TypeError as exc: | |
660 | # TODO catch validation errors better | |
661 | io.stderr(_( | |
662 | "{x} Exception after executing metadata reactor " | |
663 | "{metaproc} for node {node}:" | |
664 | ).format( | |
665 | x=red("!!!"), | |
666 | metaproc=reactor_name, | |
667 | node=node.name, | |
668 | )) | |
669 | raise exc | |
670 | if this_changed: | |
671 | reactors_that_changed_something_in_last_iteration.add( | |
672 | (node_name, reactor_name), | |
680 | 673 | ) |
681 | self._node_metadata_partial[node.name] = merge_dict( | |
682 | self._node_metadata_partial[node.name], | |
683 | new_metadata, | |
684 | ) | |
685 | ||
686 | # TODO remove this mechanism in bw 4.0 | |
687 | self._in_new_metareactor = False | |
688 | ||
689 | ### TODO remove this block in 4.0 BEGIN | |
690 | with io.job(_("{node} running metadata processors").format(node=bold(node.name))): | |
691 | for metadata_processor_name, metadata_processor in node._metadata_processors[2]: | |
692 | if (node_name, metadata_processor_name) in blacklisted_metaprocs: | |
693 | continue | |
694 | io.debug(_( | |
695 | "running metadata processor {metaproc} for node {node}" | |
696 | ).format( | |
697 | metaproc=metadata_processor_name, | |
698 | node=node.name, | |
699 | )) | |
700 | if blame: | |
701 | # We need to deepcopy here because otherwise we have no chance of | |
702 | # figuring out what changed... | |
703 | input_metadata = deepcopy_metadata(self._node_metadata_partial[node.name]) | |
704 | else: | |
705 | # ...but we can't always do it for performance reasons. | |
706 | input_metadata = self._node_metadata_partial[node.name] | |
707 | try: | |
708 | processed = metadata_processor(input_metadata) | |
709 | except Exception as exc: | |
710 | io.stderr(_( | |
711 | "{x} Exception while executing metadata processor " | |
712 | "{metaproc} for node {node}:" | |
713 | ).format( | |
714 | x=red("!!!"), | |
715 | metaproc=metadata_processor_name, | |
716 | node=node.name, | |
717 | )) | |
718 | raise exc | |
719 | processed_dict, options = check_metadata_processor_result( | |
720 | input_metadata, | |
721 | processed, | |
722 | node.name, | |
723 | metadata_processor_name, | |
724 | ) | |
725 | if DONE in options: | |
726 | io.debug(_( | |
727 | "metadata processor {metaproc} for node {node} " | |
728 | "has indicated that it need NOT be run again" | |
729 | ).format( | |
730 | metaproc=metadata_processor_name, | |
731 | node=node.name, | |
732 | )) | |
733 | blacklisted_metaprocs.add((node_name, metadata_processor_name)) | |
734 | metaproc_returned_DONE = True | |
735 | else: | |
736 | io.debug(_( | |
737 | "metadata processor {metaproc} for node {node} " | |
738 | "has indicated that it must be run again" | |
739 | ).format( | |
740 | metaproc=metadata_processor_name, | |
741 | node=node.name, | |
742 | )) | |
743 | ||
744 | blame_defaults = False | |
745 | if DEFAULTS in options: | |
746 | processed_dict = merge_dict( | |
747 | processed_dict, | |
748 | self._node_metadata_partial[node.name], | |
749 | ) | |
750 | blame_defaults = True | |
751 | elif OVERWRITE in options: | |
752 | processed_dict = merge_dict( | |
753 | self._node_metadata_partial[node.name], | |
754 | processed_dict, | |
755 | ) | |
756 | ||
757 | if blame: | |
758 | blame_changed_paths( | |
759 | self._node_metadata_partial[node.name], | |
760 | processed_dict, | |
761 | node_blame, | |
762 | "metadata_processor:{}".format(metadata_processor_name), | |
763 | defaults=blame_defaults, | |
764 | ) | |
765 | ||
766 | self._node_metadata_partial[node.name] = processed_dict | |
767 | ### TODO remove this block in 4.0 END | |
768 | ||
769 | if not metaproc_returned_DONE and not reactor_returned_changed_metadata: | |
770 | if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()): | |
674 | any_reactor_returned_changed_metadata = True | |
675 | ||
676 | if not any_reactor_returned_changed_metadata: | |
677 | if nodes_with_completed_static_metadata != self._nodes_we_need_metadata_for: | |
771 | 678 | # During metadata reactor execution, partial metadata may |
772 | 679 | # have been requested for nodes we did not previously |
773 | # consider. Since partial metadata may default to | |
774 | # just an empty dict, we still need to make sure to | |
775 | # generate static metadata for these new nodes, as | |
776 | # that may trigger additional runs of metadata | |
777 | # reactors. | |
680 | # consider. We still need to make sure to generate static | |
681 | # metadata for these new nodes, as that may trigger | |
682 | # additional results from metadata reactors. | |
778 | 683 | continue |
779 | 684 | else: |
685 | # Now that we're done, re-sort static metadata to | |
686 | # overrule reactors. | |
687 | for node_name, metastack in self._metastacks.items(): | |
688 | for identifier in list(metastack._layers.keys()): | |
689 | if ( | |
690 | identifier.startswith("group:") or | |
691 | identifier.startswith("node:") | |
692 | ): | |
693 | metastack._layers[identifier] = metastack._layers.pop(identifier) | |
780 | 694 | break |
781 | 695 | |
782 | 696 | if keyerrors: |
843 | 757 | self.bundle_names.append(dir_entry) |
844 | 758 | |
845 | 759 | # populate groups |
760 | toml_groups = dict(self.nodes_or_groups_from_dir("groups")) | |
846 | 761 | self.group_dict = {} |
847 | for group in self.nodes_or_groups_from_file(self.groups_file, 'groups'): | |
762 | for group in self.nodes_or_groups_from_file(self.groups_file, 'groups', toml_groups): | |
848 | 763 | self.add_group(Group(*group)) |
849 | 764 | |
850 | 765 | # populate items |
853 | 768 | self.item_classes.append(item_class) |
854 | 769 | |
855 | 770 | # populate nodes |
771 | toml_nodes = dict(self.nodes_or_groups_from_dir("nodes")) | |
856 | 772 | self.node_dict = {} |
857 | for node in self.nodes_or_groups_from_file(self.nodes_file, 'nodes'): | |
773 | for node in self.nodes_or_groups_from_file(self.nodes_file, 'nodes', toml_nodes): | |
858 | 774 | self.add_node(Node(*node)) |
859 | 775 | |
860 | 776 | @cached_property |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from base64 import b64encode, urlsafe_b64decode |
4 | try: | |
5 | from configparser import SafeConfigParser | |
6 | except ImportError: # Python 2 | |
7 | from ConfigParser import SafeConfigParser | |
1 | from configparser import ConfigParser | |
8 | 2 | import hashlib |
9 | 3 | import hmac |
10 | 4 | from os import environ |
63 | 57 | yield character |
64 | 58 | |
65 | 59 | |
66 | class SecretProxy(object): | |
60 | class SecretProxy: | |
67 | 61 | @staticmethod |
68 | 62 | def random_key(): |
69 | 63 | """ |
74 | 68 | def __init__(self, repo): |
75 | 69 | self.repo = repo |
76 | 70 | self.keys = self._load_keys() |
77 | self._call_log = {} | |
78 | 71 | |
79 | 72 | def _decrypt(self, cryptotext=None, key=None): |
80 | 73 | """ |
242 | 235 | return random(h.digest()) |
243 | 236 | |
244 | 237 | def _load_keys(self): |
245 | config = SafeConfigParser() | |
238 | config = ConfigParser() | |
246 | 239 | secrets_file = join(self.repo.path, FILENAME_SECRETS) |
247 | 240 | try: |
248 | 241 | config.read(secrets_file) |
256 | 249 | |
257 | 250 | def decrypt(self, cryptotext, key=None): |
258 | 251 | return Fault( |
252 | 'bw secrets decrypt', | |
259 | 253 | self._decrypt, |
260 | 254 | cryptotext=cryptotext, |
261 | 255 | key=key, |
263 | 257 | |
264 | 258 | def decrypt_file(self, source_path, key=None): |
265 | 259 | return Fault( |
260 | 'bw secrets decrypt_file', | |
266 | 261 | self._decrypt_file, |
267 | 262 | source_path=source_path, |
268 | 263 | key=key, |
270 | 265 | |
271 | 266 | def decrypt_file_as_base64(self, source_path, key=None): |
272 | 267 | return Fault( |
268 | 'bw secrets decrypt_file_as_base64', | |
273 | 269 | self._decrypt_file_as_base64, |
274 | 270 | source_path=source_path, |
275 | 271 | key=key, |
321 | 317 | def human_password_for( |
322 | 318 | self, identifier, digits=2, key='generate', per_word=3, words=4, |
323 | 319 | ): |
324 | self._call_log.setdefault(identifier, 0) | |
325 | self._call_log[identifier] += 1 | |
326 | return Fault( | |
320 | return Fault( | |
321 | 'bw secrets human_password_for', | |
327 | 322 | self._generate_human_password, |
328 | 323 | identifier=identifier, |
329 | 324 | digits=digits, |
333 | 328 | ) |
334 | 329 | |
335 | 330 | def password_for(self, identifier, key='generate', length=32, symbols=False): |
336 | self._call_log.setdefault(identifier, 0) | |
337 | self._call_log[identifier] += 1 | |
338 | return Fault( | |
331 | return Fault( | |
332 | 'bw secrets password_for', | |
339 | 333 | self._generate_password, |
340 | 334 | identifier=identifier, |
341 | 335 | key=key, |
345 | 339 | |
346 | 340 | def random_bytes_as_base64_for(self, identifier, key='generate', length=32): |
347 | 341 | return Fault( |
342 | 'bw secrets random_bytes_as_base64', | |
348 | 343 | self._generate_random_bytes_as_base64, |
349 | 344 | identifier=identifier, |
350 | 345 | key=key, |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from base64 import b64encode |
4 | 1 | from codecs import getwriter |
5 | 2 | from contextlib import contextmanager |
7 | 4 | from inspect import isgenerator |
8 | 5 | from os import chmod, close, makedirs, remove |
9 | 6 | from os.path import dirname, exists |
7 | from random import shuffle | |
10 | 8 | import stat |
11 | 9 | from sys import stderr, stdout |
12 | 10 | from tempfile import mkstemp |
15 | 13 | |
16 | 14 | from ..exceptions import DontCache, FaultUnavailable |
17 | 15 | |
18 | __GETATTR_CODE_CACHE = {} | |
19 | __GETATTR_RESULT_CACHE = {} | |
20 | __GETATTR_NODEFAULT = "very_unlikely_default_value" | |
21 | ||
22 | ||
16 | ||
17 | class NO_DEFAULT: pass | |
23 | 18 | MODE644 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH |
24 | ||
25 | try: | |
26 | STDERR_WRITER = getwriter('utf-8')(stderr.buffer) | |
27 | STDOUT_WRITER = getwriter('utf-8')(stdout.buffer) | |
28 | except AttributeError: # Python 2 | |
29 | STDERR_WRITER = getwriter('utf-8')(stderr) | |
30 | STDOUT_WRITER = getwriter('utf-8')(stdout) | |
19 | STDERR_WRITER = getwriter('utf-8')(stderr.buffer) | |
20 | STDOUT_WRITER = getwriter('utf-8')(stdout.buffer) | |
31 | 21 | |
32 | 22 | |
33 | 23 | def cached_property(prop): |
53 | 43 | |
54 | 44 | |
55 | 45 | def download(url, path): |
56 | if not exists(dirname(path)): | |
57 | makedirs(dirname(path)) | |
58 | if exists(path): | |
59 | chmod(path, MODE644) | |
60 | with open(path, 'wb') as f: | |
61 | r = get(url, stream=True) | |
62 | r.raise_for_status() | |
63 | for block in r.iter_content(1024): | |
64 | if not block: | |
65 | break | |
66 | else: | |
67 | f.write(block) | |
68 | ||
69 | ||
70 | class Fault(object): | |
46 | with error_context(url=url, path=path): | |
47 | if not exists(dirname(path)): | |
48 | makedirs(dirname(path)) | |
49 | if exists(path): | |
50 | chmod(path, MODE644) | |
51 | with open(path, 'wb') as f: | |
52 | r = get(url, stream=True) | |
53 | r.raise_for_status() | |
54 | for block in r.iter_content(1024): | |
55 | if not block: | |
56 | break | |
57 | else: | |
58 | f.write(block) | |
59 | ||
60 | ||
61 | class ErrorContext(Exception): | |
62 | pass | |
63 | ||
64 | ||
65 | @contextmanager | |
66 | def error_context(**kwargs): | |
67 | """ | |
68 | This can be used to provide context for critical exceptions. Since | |
69 | we're processing lots of different dicts, a "KeyError: foo" will | |
70 | often not be helpful, since it's not clear which dict is missing the | |
71 | key. | |
72 | ||
73 | ||
74 | >>> with error_context(arbitrary_kwarg="helpful hint"): | |
75 | ... {}["foo"] | |
76 | ... | |
77 | Traceback (most recent call last): | |
78 | [...] | |
79 | KeyError: 'foo' | |
80 | ||
81 | The above exception was the direct cause of the following exception: | |
82 | ||
83 | Traceback (most recent call last): | |
84 | [...] | |
85 | bundlewrap.utils.ErrorContext: ACTUAL EXCEPTION ABOVE | |
86 | {'arbitrary_kwarg': 'helpful hint'} | |
87 | ||
88 | ||
89 | Careful though: Only use this in places where you don't expect | |
90 | exceptions to occur, since they will indiscriminately be reraised as | |
91 | ErrorContext. | |
92 | """ | |
93 | try: | |
94 | yield | |
95 | except Exception as exc: | |
96 | raise ErrorContext("ACTUAL EXCEPTION ABOVE\n" + repr(kwargs)) from exc | |
97 | ||
98 | ||
99 | class Fault: | |
71 | 100 | """ |
72 | 101 | A proxy object for lazy access to things that may not really be |
73 | 102 | available at the time of use. |
75 | 104 | This let's us gracefully skip items that require information that's |
76 | 105 | currently not available. |
77 | 106 | """ |
78 | def __init__(self, callback, **kwargs): | |
107 | def __init__(self, fault_identifier, callback, **kwargs): | |
108 | if isinstance(fault_identifier, list): | |
109 | self.id_list = fault_identifier | |
110 | else: | |
111 | self.id_list = [fault_identifier] | |
112 | ||
113 | for key, value in sorted(kwargs.items()): | |
114 | self.id_list.append(hash(key)) | |
115 | self.id_list.append(hash(value)) | |
116 | ||
79 | 117 | self._available = None |
80 | 118 | self._exc = None |
81 | 119 | self._value = None |
97 | 135 | if isinstance(other, Fault): |
98 | 136 | def callback(): |
99 | 137 | return self.value + other.value |
100 | return Fault(callback) | |
138 | return Fault(self.id_list + other.id_list, callback) | |
101 | 139 | else: |
102 | 140 | def callback(): |
103 | 141 | return self.value + other |
104 | return Fault(callback) | |
142 | return Fault(self.id_list + ['raw {}'.format(repr(other))], callback) | |
143 | ||
144 | def __eq__(self, other): | |
145 | if not isinstance(other, Fault): | |
146 | return False | |
147 | else: | |
148 | return self.id_list == other.id_list | |
149 | ||
150 | def __hash__(self): | |
151 | return hash(tuple(self.id_list)) | |
105 | 152 | |
106 | 153 | def __len__(self): |
107 | 154 | return len(self.value) |
115 | 162 | def b64encode(self): |
116 | 163 | def callback(): |
117 | 164 | return b64encode(self.value.encode('UTF-8')).decode('UTF-8') |
118 | return Fault(callback) | |
165 | return Fault(self.id_list + ['b64encode'], callback) | |
119 | 166 | |
120 | 167 | def format_into(self, format_string): |
121 | 168 | def callback(): |
122 | 169 | return format_string.format(self.value) |
123 | return Fault(callback) | |
170 | return Fault(self.id_list + ['format_into ' + format_string], callback) | |
124 | 171 | |
125 | 172 | @property |
126 | 173 | def is_available(self): |
139 | 186 | def method(self, *args, **kwargs): |
140 | 187 | def callback(): |
141 | 188 | return getattr(self.value, method_name)(*args, **kwargs) |
142 | return Fault(callback) | |
189 | return Fault(self.id_list + [method_name], callback) | |
143 | 190 | return method |
144 | 191 | |
145 | 192 | |
157 | 204 | |
158 | 205 | |
159 | 206 | def get_file_contents(path): |
160 | with open(path, 'rb') as f: | |
161 | content = f.read() | |
207 | with error_context(path=path): | |
208 | with open(path, 'rb') as f: | |
209 | content = f.read() | |
162 | 210 | return content |
163 | 211 | |
164 | 212 | |
178 | 226 | """ |
179 | 227 | for obj in obj_list: |
180 | 228 | yield obj.name |
229 | ||
230 | ||
231 | def randomize_order(obj): | |
232 | if isinstance(obj, dict): | |
233 | result = list(obj.items()) | |
234 | else: | |
235 | result = list(obj) | |
236 | shuffle(result) | |
237 | return result | |
181 | 238 | |
182 | 239 | |
183 | 240 | def sha1(data): |
189 | 246 | return hasher.hexdigest() |
190 | 247 | |
191 | 248 | |
192 | class SkipList(object): | |
249 | class SkipList: | |
193 | 250 | """ |
194 | 251 | Used to maintain a list of nodes that have already been visited. |
195 | 252 | """ |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | 0 | from sys import exit |
3 | 1 | |
4 | 2 | from ..exceptions import NoSuchGroup, NoSuchItem, NoSuchNode |
39 | 37 | exit(1) |
40 | 38 | |
41 | 39 | |
42 | def get_node(repo, node_name, adhoc_nodes=False): | |
40 | def get_node(repo, node_name): | |
43 | 41 | try: |
44 | 42 | return repo.get_node(node_name) |
45 | 43 | except NoSuchNode: |
46 | if adhoc_nodes: | |
47 | return repo.create_node(node_name) | |
48 | else: | |
49 | io.stderr(_("{x} No such node: {node}").format( | |
50 | node=node_name, | |
51 | x=red("!!!"), | |
52 | )) | |
53 | exit(1) | |
44 | io.stderr(_("{x} No such node: {node}").format( | |
45 | node=node_name, | |
46 | x=red("!!!"), | |
47 | )) | |
48 | exit(1) | |
54 | 49 | |
55 | 50 | |
56 | HELP_get_target_nodes = _("""expression to select target nodes, i.e.: | |
57 | "node1,node2,group3,bundle:foo,!bundle:bar,!group:group4,lambda:node.metadata['magic']<3" | |
58 | to select 'node1', 'node2', all nodes in 'group3', all nodes with the | |
59 | bundle 'foo', all nodes without bundle 'bar', all nodes not in 'group4' | |
60 | and all nodes whose 'magic' metadata is less than three (any exceptions | |
61 | in lambda expressions are ignored) | |
51 | HELP_get_target_nodes = _("""expression to select target nodes: | |
52 | ||
53 | my_node # to select a single node | |
54 | my_group # all nodes in this group | |
55 | bundle:my_bundle # all nodes with this bundle | |
56 | !bundle:my_bundle # all nodes without this bundle | |
57 | !group:my_group # all nodes not in this group | |
58 | "lambda:node.metadata_get('foo/magic', 47) < 3" | |
59 | # all nodes whose metadata["foo"]["magic"] is less than three | |
62 | 60 | """) |
63 | 61 | |
64 | 62 | |
65 | def get_target_nodes(repo, target_string, adhoc_nodes=False): | |
66 | targets = [] | |
67 | for name in target_string.split(","): | |
63 | def get_target_nodes(repo, target_strings): | |
64 | targets = set() | |
65 | for name in target_strings: | |
68 | 66 | name = name.strip() |
69 | 67 | if name.startswith("bundle:"): |
70 | 68 | bundle_name = name.split(":", 1)[1] |
71 | 69 | for node in repo.nodes: |
72 | 70 | if bundle_name in names(node.bundles): |
73 | targets.append(node) | |
71 | targets.add(node) | |
74 | 72 | elif name.startswith("!bundle:"): |
75 | 73 | bundle_name = name.split(":", 1)[1] |
76 | 74 | for node in repo.nodes: |
77 | 75 | if bundle_name not in names(node.bundles): |
78 | targets.append(node) | |
76 | targets.add(node) | |
79 | 77 | elif name.startswith("!group:"): |
80 | 78 | group_name = name.split(":", 1)[1] |
81 | 79 | for node in repo.nodes: |
82 | 80 | if group_name not in names(node.groups): |
83 | targets.append(node) | |
81 | targets.add(node) | |
84 | 82 | elif name.startswith("lambda:"): |
85 | 83 | expression = eval("lambda node: " + name.split(":", 1)[1]) |
86 | 84 | for node in repo.nodes: |
87 | try: | |
88 | if expression(node): | |
89 | targets.append(node) | |
90 | except: | |
91 | pass | |
85 | if expression(node): | |
86 | targets.add(node) | |
92 | 87 | else: |
93 | 88 | try: |
94 | targets.append(repo.get_node(name)) | |
89 | targets.add(repo.get_node(name)) | |
95 | 90 | except NoSuchNode: |
96 | 91 | try: |
97 | targets += list(repo.get_group(name).nodes) | |
92 | group = repo.get_group(name) | |
98 | 93 | except NoSuchGroup: |
99 | if adhoc_nodes: | |
100 | targets.append(repo.create_node(name)) | |
101 | else: | |
102 | io.stderr(_("{x} No such node or group: {name}").format( | |
103 | x=red("!!!"), | |
104 | name=name, | |
105 | )) | |
106 | exit(1) | |
107 | return sorted(set(targets)) | |
94 | io.stderr(_("{x} No such node or group: {name}").format( | |
95 | x=red("!!!"), | |
96 | name=name, | |
97 | )) | |
98 | exit(1) | |
99 | else: | |
100 | targets.update(group.nodes) | |
101 | return targets |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from difflib import unified_diff |
4 | 1 | from hashlib import sha1 |
5 | 2 | from json import dumps, JSONEncoder |
6 | 3 | |
4 | from tomlkit import document as toml_document | |
5 | ||
7 | 6 | from . import Fault |
8 | 7 | from .text import bold, green, red |
9 | 8 | from .text import force_text, mark_for_translation as _ |
10 | 9 | |
11 | 10 | |
12 | try: | |
13 | text_type = unicode | |
14 | byte_type = str | |
15 | except NameError: | |
16 | text_type = str | |
17 | byte_type = bytes | |
18 | ||
19 | try: | |
20 | from types import MappingProxyType | |
21 | except ImportError: | |
22 | # XXX Not available in Python 2, but that's EOL anyway and we're | |
23 | # going to drop support for it very soon. The following at least | |
24 | # creates a new object, so updates to it will not be persistent. | |
25 | MappingProxyType = dict | |
26 | ||
27 | 11 | DIFF_MAX_INLINE_LENGTH = 36 |
28 | 12 | DIFF_MAX_LINE_LENGTH = 1024 |
29 | 13 | |
30 | 14 | |
31 | class _Atomic(object): | |
15 | class _Atomic: | |
32 | 16 | """ |
33 | 17 | This and the following related classes are used to mark objects as |
34 | 18 | non-mergeable for the purposes of merge_dict(). |
35 | 19 | """ |
36 | 20 | pass |
37 | 21 | |
38 | ||
39 | class _AtomicDict(dict, _Atomic): | |
40 | pass | |
41 | ||
42 | ||
43 | class _AtomicList(list, _Atomic): | |
44 | pass | |
45 | ||
46 | ||
47 | class _AtomicSet(set, _Atomic): | |
48 | pass | |
49 | ||
50 | ||
51 | class _AtomicTuple(tuple, _Atomic): | |
52 | pass | |
22 | class _AtomicDict(dict, _Atomic): pass | |
23 | class _AtomicList(list, _Atomic): pass | |
24 | class _AtomicSet(set, _Atomic): pass | |
25 | class _AtomicTuple(tuple, _Atomic): pass | |
53 | 26 | |
54 | 27 | |
55 | 28 | ATOMIC_TYPES = { |
58 | 31 | set: _AtomicSet, |
59 | 32 | tuple: _AtomicTuple, |
60 | 33 | } |
34 | ||
35 | ||
36 | def dict_to_toml(dict_obj): | |
37 | toml_doc = toml_document() | |
38 | for key, value in dict_obj.items(): | |
39 | if isinstance(value, tuple): | |
40 | toml_doc[key] = list(value) | |
41 | elif isinstance(value, set): | |
42 | toml_doc[key] = sorted(value) | |
43 | elif isinstance(value, dict): | |
44 | toml_doc[key] = dict_to_toml(value) | |
45 | else: | |
46 | toml_doc[key] = value | |
47 | return toml_doc | |
61 | 48 | |
62 | 49 | |
63 | 50 | def diff_keys(sdict1, sdict2): |
158 | 145 | |
159 | 146 | TYPE_DIFFS = { |
160 | 147 | bool: diff_value_bool, |
161 | byte_type: diff_value_text, | |
148 | bytes: diff_value_text, | |
162 | 149 | float: diff_value_int, |
163 | 150 | int: diff_value_int, |
164 | 151 | list: diff_value_list, |
165 | 152 | set: diff_value_list, |
166 | text_type: diff_value_text, | |
153 | str: diff_value_text, | |
167 | 154 | tuple: diff_value_list, |
168 | 155 | } |
169 | 156 | |
186 | 173 | return sorted(obj) |
187 | 174 | else: |
188 | 175 | return JSONEncoder.default(self, obj) |
189 | ||
190 | ||
191 | def freeze_object(obj): | |
192 | """ | |
193 | Returns a read-only version of the given object (if possible). | |
194 | """ | |
195 | if isinstance(obj, dict): | |
196 | keys = set(obj.keys()) | |
197 | for k in keys: | |
198 | obj[k] = freeze_object(obj[k]) | |
199 | return MappingProxyType(obj) | |
200 | elif isinstance(obj, (list, tuple)): | |
201 | result = [] | |
202 | for i in obj: | |
203 | result.append(freeze_object(i)) | |
204 | return tuple(result) | |
205 | elif isinstance(obj, set): | |
206 | result = set() | |
207 | for i in obj: | |
208 | result.add(freeze_object(i)) | |
209 | return frozenset(obj) | |
210 | else: | |
211 | return obj | |
212 | 176 | |
213 | 177 | |
214 | 178 | def hash_statedict(sdict): |
355 | 319 | ) |
356 | 320 | |
357 | 321 | |
322 | class COLLECTION_OF_STRINGS: pass | |
323 | class TUPLE_OF_INTS: pass | |
324 | ||
325 | ||
326 | def validate_dict(candidate, schema, required_keys=None): | |
327 | if not isinstance(candidate, dict): | |
328 | raise ValueError(_("not a dict: {}").format(repr(candidate))) | |
329 | for key, value in candidate.items(): | |
330 | if key not in schema: | |
331 | raise ValueError(_("illegal key: {}").format(key)) | |
332 | allowed_types = schema[key] | |
333 | if allowed_types == COLLECTION_OF_STRINGS: | |
334 | if not isinstance(value, (list, set, tuple)): | |
335 | raise ValueError(_("key '{k}' is {i}, but should be one of: {t}").format( | |
336 | k=key, | |
337 | i=type(value), | |
338 | t=(list, set, tuple), | |
339 | )) | |
340 | for inner_value in value: | |
341 | if not isinstance(inner_value, str): | |
342 | raise ValueError(_("non-string member in '{k}': {v}").format( | |
343 | k=key, | |
344 | v=repr(inner_value), | |
345 | )) | |
346 | elif allowed_types == TUPLE_OF_INTS: | |
347 | if not isinstance(value, tuple): | |
348 | raise ValueError(_("key '{k}' is {i}, but should be a tuple").format( | |
349 | k=key, | |
350 | i=type(value), | |
351 | )) | |
352 | for inner_value in value: | |
353 | if not isinstance(inner_value, int): | |
354 | raise ValueError(_("non-int member in '{k}': {v}").format( | |
355 | k=key, | |
356 | v=repr(inner_value), | |
357 | )) | |
358 | elif not isinstance(value, allowed_types): | |
359 | raise ValueError(_("key '{k}' is {i}, but should be one of: {t}").format( | |
360 | k=key, | |
361 | i=type(value), | |
362 | t=allowed_types, | |
363 | )) | |
364 | for key in required_keys or []: | |
365 | if key not in candidate: | |
366 | raise ValueError(_("missing required key: {}").format(key)) | |
367 | ||
368 | ||
358 | 369 | def validate_statedict(sdict): |
359 | 370 | """ |
360 | 371 | Raises ValueError if the given statedict is invalid. |
362 | 373 | if sdict is None: |
363 | 374 | return |
364 | 375 | for key, value in sdict.items(): |
365 | if not isinstance(force_text(key), text_type): | |
376 | if not isinstance(force_text(key), str): | |
366 | 377 | raise ValueError(_("non-text statedict key: {}").format(key)) |
367 | 378 | |
368 | 379 | if type(value) not in TYPE_DIFFS and value is not None: |
382 | 393 | )) |
383 | 394 | |
384 | 395 | |
396 | def delete_key_at_path(d, path): | |
397 | if len(path) == 1: | |
398 | del d[path[0]] | |
399 | else: | |
400 | delete_key_at_path(d[path[0]], path[1:]) | |
401 | ||
402 | ||
403 | def replace_key_at_path(d, path, new_key): | |
404 | if len(path) == 1: | |
405 | value = d[path[0]] | |
406 | del d[path[0]] | |
407 | d[new_key] = value | |
408 | else: | |
409 | replace_key_at_path(d[path[0]], path[1:], new_key) | |
410 | ||
411 | ||
412 | def set_key_at_path(d, path, value): | |
413 | if len(path) == 1: | |
414 | d[path[0]] = value | |
415 | else: | |
416 | if path[0] not in d: # setdefault doesn't work with tomlkit | |
417 | d[path[0]] = {} | |
418 | set_key_at_path(d[path[0]], path[1:], value) | |
419 | ||
420 | ||
385 | 421 | def value_at_key_path(dict_obj, path): |
386 | 422 | """ |
387 | 423 | Given the list of keys in `path`, recursively traverse `dict_obj` |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from collections import OrderedDict |
4 | 1 | from sys import version_info |
5 | 2 | |
6 | from ..metadata import validate_metadata, value_at_key_path | |
7 | from .dicts import freeze_object, map_dict_keys, merge_dict | |
8 | ||
9 | ||
10 | _NO_DEFAULT = "<NO METASTACK DEFAULT PROVIDED>" | |
3 | from ..metadata import deepcopy_metadata, validate_metadata, value_at_key_path | |
4 | from . import NO_DEFAULT | |
5 | from .dicts import map_dict_keys, merge_dict | |
11 | 6 | |
12 | 7 | |
13 | 8 | class Metastack: |
25 | 20 | else: |
26 | 21 | self._layers = {} |
27 | 22 | |
28 | def get(self, path, default=_NO_DEFAULT): | |
23 | def get(self, path, default=NO_DEFAULT): | |
29 | 24 | """ |
30 | 25 | Get the value at the given path, merging all layers together. |
26 | ||
31 | 27 | Path may either be string like |
28 | ||
32 | 29 | 'foo/bar' |
30 | ||
33 | 31 | accessing the 'bar' key in the dict at the 'foo' key |
34 | 32 | or a tuple like |
33 | ||
35 | 34 | ('fo/o', 'bar') |
35 | ||
36 | 36 | accessing the 'bar' key in the dict at the 'fo/o' key. |
37 | 37 | """ |
38 | 38 | if not isinstance(path, (tuple, list)): |
55 | 55 | result = merge_dict(result, {'data': value}) |
56 | 56 | |
57 | 57 | if undef: |
58 | if default != _NO_DEFAULT: | |
58 | if default != NO_DEFAULT: | |
59 | 59 | return default |
60 | 60 | else: |
61 | 61 | raise KeyError('/'.join(path)) |
62 | 62 | else: |
63 | return freeze_object(result['data']) | |
63 | return deepcopy_metadata(result['data']) | |
64 | 64 | |
65 | 65 | def _as_dict(self): |
66 | 66 | final_dict = {} |
0 | import re | |
1 | ||
2 | 0 | from . import names |
3 | 1 | from .text import mark_for_translation as _, red |
4 | 2 | |
34 | 32 | items, |
35 | 33 | cluster=True, |
36 | 34 | concurrency=True, |
37 | static=True, | |
38 | 35 | regular=True, |
39 | 36 | reverse=True, |
40 | 37 | auto=True, |
113 | 110 | |
114 | 111 | |
115 | 112 | def plot_group(groups, nodes, show_nodes): |
113 | groups = sorted(groups) | |
114 | nodes = sorted(nodes) | |
115 | ||
116 | 116 | yield "digraph bundlewrap" |
117 | 117 | yield "{" |
118 | 118 | |
132 | 132 | yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) |
133 | 133 | |
134 | 134 | for group in groups: |
135 | for subgroup in group.immediate_subgroup_names: | |
135 | for subgroup in sorted(group._attributes.get('subgroups', set())): | |
136 | 136 | yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) |
137 | for subgroup in group._subgroup_names_from_patterns: | |
137 | for subgroup in sorted(group._subgroup_names_from_patterns): | |
138 | 138 | yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) |
139 | 139 | |
140 | 140 | if show_nodes: |
141 | 141 | for group in groups: |
142 | for node in group._nodes_from_members: | |
143 | yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( | |
144 | group.name, node.name) | |
145 | ||
146 | for node in group._nodes_from_patterns: | |
147 | yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( | |
148 | group.name, node.name) | |
149 | ||
150 | 142 | for node in nodes: |
151 | if group in node._groups_dynamic: | |
152 | yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format( | |
143 | if group in set(node._attributes.get('groups', set())): | |
144 | yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( | |
145 | node.name, group.name) | |
146 | elif node in group._nodes_from_members: | |
147 | yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( | |
153 | 148 | group.name, node.name) |
154 | ||
149 | else: | |
150 | for pattern in sorted(group._member_patterns): | |
151 | if pattern.search(node.name) is not None: | |
152 | yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( | |
153 | group.name, node.name) | |
154 | break | |
155 | 155 | yield "}" |
156 | 156 | |
157 | 157 | |
168 | 168 | "fontname=Helvetica]") |
169 | 169 | yield "edge [arrowhead=vee]" |
170 | 170 | |
171 | for group in node.groups: | |
171 | for group in sorted(node.groups): | |
172 | 172 | yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) |
173 | 173 | |
174 | 174 | yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) |
175 | 175 | |
176 | for group in node.groups: | |
177 | for subgroup in group.immediate_subgroup_names: | |
176 | for group in sorted(node.groups): | |
177 | for subgroup in sorted(group._attributes.get('subgroups', set())): | |
178 | 178 | if subgroup in names(node.groups): |
179 | yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) | |
180 | for pattern in group.immediate_subgroup_patterns: | |
181 | compiled_pattern = re.compile(pattern) | |
182 | for group2 in node.groups: | |
183 | if compiled_pattern.search(group2.name) is not None and group2 != group: | |
184 | yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, group2.name) | |
185 | ||
186 | for group in node.groups: | |
187 | if node in group._nodes_from_members: | |
179 | yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format( | |
180 | group.name, subgroup) | |
181 | for pattern in sorted(group._immediate_subgroup_patterns): | |
182 | for group2 in sorted(node.groups): | |
183 | if pattern.search(group2.name) is not None and group2 != group: | |
184 | yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format( | |
185 | group.name, group2.name) | |
186 | ||
187 | if group in node._attributes.get('groups', set()): | |
188 | yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( | |
189 | node.name, group.name) | |
190 | elif node in group._nodes_from_members: | |
188 | 191 | yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( |
189 | 192 | group.name, node.name) |
190 | elif node in group._nodes_from_patterns: | |
191 | yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( | |
192 | group.name, node.name) | |
193 | elif group in node._groups_dynamic: | |
194 | yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format( | |
195 | group.name, node.name) | |
196 | ||
193 | else: | |
194 | for pattern in sorted(group._member_patterns): | |
195 | if pattern.search(node.name) is not None: | |
196 | yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( | |
197 | group.name, node.name) | |
197 | 198 | yield "}" |
198 | 199 | |
199 | 200 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | |
5 | 2 | from . import cached_property |
6 | 3 | from .text import force_text, mark_for_translation as _ |
38 | 35 | return file_stat |
39 | 36 | |
40 | 37 | |
41 | class PathInfo(object): | |
38 | class PathInfo: | |
42 | 39 | """ |
43 | 40 | Serves as a proxy to get_path_type. |
44 | 41 | """ |
58 | 55 | @property |
59 | 56 | def group(self): |
60 | 57 | return self.stat['group'] |
61 | ||
62 | @property | |
63 | def is_binary_file(self): | |
64 | return self.is_file and not self.is_text_file | |
65 | 58 | |
66 | 59 | @property |
67 | 60 | def is_directory(self): |
113 | 106 | # contains backslash-escaped characters – we must lstrip() that |
114 | 107 | return force_text(result.stdout).strip().lstrip("\\").split()[0] |
115 | 108 | |
116 | @cached_property | |
117 | def sha256(self): | |
118 | if self.node.os == 'macos': | |
119 | result = self.node.run("shasum -a 256 -- {}".format(quote(self.path))) | |
120 | elif self.node.os in self.node.OS_FAMILY_BSD: | |
121 | result = self.node.run("sha256 -q -- {}".format(quote(self.path))) | |
122 | else: | |
123 | result = self.node.run("sha256sum -- {}".format(quote(self.path))) | |
124 | return force_text(result.stdout).strip().split()[0] | |
125 | ||
126 | 109 | @property |
127 | 110 | def size(self): |
128 | 111 | return self.stat['size'] |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from pipes import quote | |
0 | from shlex import quote | |
4 | 1 | from subprocess import CalledProcessError, check_output, STDOUT |
5 | 2 | |
6 | 3 | from .text import mark_for_translation as _ |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os import environ |
4 | 1 | |
5 | 2 | from .text import ansi_clean |
78 | 75 | return result |
79 | 76 | |
80 | 77 | |
81 | def _border_center(column_widths): | |
78 | def _border_center(column_widths): # FIXME unused? | |
82 | 79 | result = FRAME_CENTER_LEFT |
83 | 80 | result += FRAME_CENTER_COLUMN_SEPARATOR.join( |
84 | 81 | [FRAME_COLUMN_FILLER * width for width in column_widths] |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | import platform |
4 | 1 | from subprocess import Popen, PIPE |
5 | 2 | |
34 | 31 | |
35 | 32 | tmpdir.mkdir("data") |
36 | 33 | tmpdir.mkdir("hooks") |
34 | tmpdir.mkdir("libs") | |
37 | 35 | |
38 | 36 | groupspy = tmpdir.join("groups.py") |
39 | groupspy.write("# -*- coding: utf-8 -*-\ngroups = {}\n".format(repr(groups))) | |
37 | groupspy.write("groups = {}\n".format(repr(groups))) | |
40 | 38 | |
41 | 39 | nodespy = tmpdir.join("nodes.py") |
42 | nodespy.write("# -*- coding: utf-8 -*-\nnodes = {}\n".format(repr(nodes))) | |
40 | nodespy.write("nodes = {}\n".format(repr(nodes))) | |
43 | 41 | |
44 | 42 | secrets = tmpdir.join(FILENAME_SECRETS) |
45 | 43 | secrets.write("[generate]\nkey = {}\n\n[encrypt]\nkey = {}\n".format( |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from datetime import datetime, timedelta |
4 | 1 | from io import BytesIO |
5 | 2 | from os import environ |
44 | 41 | |
45 | 42 | |
46 | 43 | @ansi_wrapper |
47 | def inverse(text): | |
48 | return "\033[0m\033[7m{}\033[0m".format(text) | |
49 | ||
50 | ||
51 | @ansi_wrapper | |
52 | 44 | def italic(text): |
53 | 45 | return "\033[3m{}\033[0m".format(text) |
54 | 46 | |
121 | 113 | def force_text(data): |
122 | 114 | """ |
123 | 115 | Try to return a text aka unicode object from the given data. |
124 | Also has Python 2/3 compatibility baked in. Oh the humanity. | |
125 | 116 | """ |
126 | 117 | if isinstance(data, bytes): |
127 | 118 | return data.decode('utf-8', 'replace') |
186 | 177 | return output |
187 | 178 | |
188 | 179 | |
189 | class LineBuffer(object): | |
180 | class LineBuffer: | |
190 | 181 | def __init__(self, target): |
191 | 182 | self.buffer = b"" |
192 | 183 | self.record = BytesIO() |
257 | 248 | else: |
258 | 249 | raise ValueError(_("{} is not a valid duration string").format(repr(duration))) |
259 | 250 | return timedelta(days=days, seconds=seconds) |
251 | ||
252 | ||
253 | def toml_clean(s): | |
254 | """ | |
255 | Removes duplicate sections from TOML, e.g.: | |
256 | ||
257 | [foo] <--- this line will be removed since it's redundant | |
258 | [foo.bar] | |
259 | baz = 1 | |
260 | """ | |
261 | lines = list(s.splitlines()) | |
262 | result = [] | |
263 | previous = "" | |
264 | for line in lines.copy(): | |
265 | if line.startswith("[") and line.endswith("]"): | |
266 | if line[1:].startswith(previous + "."): | |
267 | result.pop() | |
268 | previous = line[1:-1] | |
269 | else: | |
270 | previous = "" | |
271 | result.append(line) | |
272 | return "\n".join(result) + "\n" |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from contextlib import contextmanager |
4 | 1 | from datetime import datetime |
5 | from errno import EPIPE | |
6 | 2 | import fcntl |
7 | 3 | from functools import wraps |
8 | 4 | from os import _exit, environ, getpid, kill |
36 | 32 | TTY = STDOUT_WRITER.isatty() |
37 | 33 | |
38 | 34 | |
39 | if sys.version_info >= (3, 0): | |
40 | broken_pipe_exception = BrokenPipeError | |
41 | else: | |
42 | broken_pipe_exception = IOError | |
43 | ||
44 | ||
45 | 35 | def add_debug_indicator(f): |
46 | 36 | @wraps(f) |
47 | 37 | def wrapped(self, msg, **kwargs): |
125 | 115 | View the given list of Unicode lines in a pager (e.g. `less`). |
126 | 116 | """ |
127 | 117 | lines = list(lines) |
128 | line_width = max([len(ansi_clean(line)) for line in lines]) | |
129 | if TTY and line_width > term_width(): | |
130 | pager = Popen([environ.get("PAGER", "/usr/bin/less")], stdin=PIPE) | |
118 | if TTY: | |
119 | write_to_stream(STDOUT_WRITER, SHOW_CURSOR) | |
120 | env = environ.copy() | |
121 | env["LESS"] = env.get("LESS", "") + " -FR" | |
122 | pager = Popen( | |
123 | [environ.get("PAGER", "/usr/bin/less")], | |
124 | env=env, | |
125 | stdin=PIPE, | |
126 | ) | |
131 | 127 | pager.stdin.write("\n".join(lines).encode('utf-8')) |
132 | 128 | pager.stdin.close() |
133 | 129 | pager.communicate() |
130 | write_to_stream(STDOUT_WRITER, HIDE_CURSOR) | |
134 | 131 | else: |
135 | 132 | for line in lines: |
136 | 133 | io.stdout(line) |
143 | 140 | else: |
144 | 141 | stream.write(ansi_clean(msg)) |
145 | 142 | stream.flush() |
146 | except broken_pipe_exception as e: | |
147 | if broken_pipe_exception == IOError: | |
148 | if e.errno != EPIPE: | |
149 | raise | |
150 | ||
151 | ||
152 | class DrainableStdin(object): | |
143 | except BrokenPipeError: | |
144 | pass | |
145 | ||
146 | ||
147 | class DrainableStdin: | |
153 | 148 | def get_input(self): |
154 | 149 | while True: |
155 | 150 | if QUIT_EVENT.is_set(): |
162 | 157 | termios.tcflush(sys.stdin, termios.TCIFLUSH) |
163 | 158 | |
164 | 159 | |
165 | class IOManager(object): | |
160 | class IOManager: | |
166 | 161 | """ |
167 | 162 | Threadsafe singleton class that handles all IO. |
168 | 163 | """ |
0 | bundlewrap (4.0.0-1) unstable; urgency=medium | |
1 | ||
2 | bundlewrap 4.0.0 makes several backwards-incompatible changes, and depending | |
3 | on the features you use, you might have to update your repository. | |
4 | ||
5 | For full details, the upstream changelog that contains additional | |
6 | information is available at: | |
7 | ||
8 | /usr/share/doc/bundlewrap/changelog.gz | |
9 | ||
10 | A full guide on how to migrate is available at: | |
11 | ||
12 | https://docs.bundlewrap.org/guide/migrate_34/ | |
13 | ||
14 | -- Jonathan Carter <jcc@debian.org> Sun, 19 Jul 2020 19:22:41 +0200 |
0 | bundlewrap (4.0.0-1) unstable; urgency=medium | |
1 | ||
2 | * New upstream release | |
3 | * Add python3-tomlkit to build-dependencies (needed for tests) | |
4 | ||
5 | -- Jonathan Carter <jcc@debian.org> Tue, 30 Jun 2020 20:38:26 +0200 | |
6 | ||
0 | 7 | bundlewrap (3.10.0-1) unstable; urgency=medium |
1 | 8 | |
2 | 9 | * New upstream release |
119 | 119 | Step 3: Implement methods |
120 | 120 | ------------------------- |
121 | 121 | |
122 | You should probably start with `sdict()`. Use `self.node.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object. | |
122 | You should probably start with `sdict()`. Use `self.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object. | |
123 | 123 | |
124 | The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.node.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values. | |
124 | The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values. | |
125 | 125 | |
126 | 126 | `block_concurrent()` must return a list of item types (e.g. `['pkg_apt']`) that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time). |
127 | 127 |
0 | # Writing your own plugins | |
1 | ||
2 | [Plugins](../repo/plugins.md) can provide almost any file in a BundleWrap repository: bundles, custom items, hooks, libs, etc. | |
3 | ||
4 | Notable exceptions are `nodes.py` and `groups.py`. If your plugin wants to extend those, use a [lib](../repo/libs.md) instead and ask users to add the result of a function call in your lib to their nodes or groups dicts. | |
5 | ||
6 | <div class="alert alert-warning">If your plugin depends on other libraries, make sure that it catches ImportErrors in a way that makes it obvious for the user what's missing. Keep in mind that people will often just <code>git pull</code> their repo and not install your plugin themselves.</div> | |
7 | ||
8 | <br> | |
9 | ||
10 | ## Starting a new plugin | |
11 | ||
12 | ### Step 1: Clone the plugins repo | |
13 | ||
14 | Create a clone of the [official plugins repo](https://github.com/bundlewrap/plugins) on GitHub. | |
15 | ||
16 | ### Step 2: Create a branch | |
17 | ||
18 | You should work on a branch specific to your plugin. | |
19 | ||
20 | ### Step 3: Copy your plugin files | |
21 | ||
22 | Now take the files that make up your plugin and move them into a subfolder of the plugins repo. The subfolder must be named like your plugin. | |
23 | ||
24 | ### Step 4: Create required files | |
25 | ||
26 | In your plugin subfolder, create a file called `manifest.json` from this template: | |
27 | ||
28 | { | |
29 | "desc": "Concise description (keep it somewhere around 80 characters)", | |
30 | "help": "Optional verbose help text to be displayed after installing. May\ninclude\nnewlines.", | |
31 | "provides": [ | |
32 | "bundles/example/items.py", | |
33 | "hooks/example.py" | |
34 | ], | |
35 | "version": 1 | |
36 | } | |
37 | ||
38 | The `provides` section must contain a list of all files provided by your plugin. | |
39 | ||
40 | You also have to create an `AUTHORS` file containing your name and email address. | |
41 | ||
42 | Last but not least we require a `LICENSE` file with an OSI-approved Free Software license. | |
43 | ||
44 | ### Step 5: Update the plugin index | |
45 | ||
46 | Run the `update_index.py` script at the root of the plugins repo. | |
47 | ||
48 | ### Step 6: Run tests | |
49 | ||
50 | Run the `test.py` script at the root of the plugins repo. It will tell you if there is anything wrong with your plugin. | |
51 | ||
52 | ### Step 7: Commit | |
53 | ||
54 | Commit all changes to your branch | |
55 | ||
56 | ### Step 8: Create pull request | |
57 | ||
58 | Create a pull request on GitHub to request inclusion of your new plugin in the official repo. Once your branch is merged, your plugin will become available to be installed by `bw repo plugin install yourplugin` and appear on [plugins.bundlewrap.org](http://plugins.bundlewrap.org). | |
59 | ||
60 | <br> | |
61 | ||
62 | ## Updating an existing plugin | |
63 | ||
64 | To release a new version of your plugin: | |
65 | ||
66 | * Increase the version number in `manifest.json` | |
67 | * Update the list of provided files in `manifest.json` | |
68 | * If you're updating someone elses plugin, you should get their consent and add your name to `AUTHORS` | |
69 | ||
70 | Then just follow the instructions above from step 5 onward. |
62 | 62 | * `dpkg` (only used with [pkg_apt](../items/pkg_apt.md) items) |
63 | 63 | * `echo` |
64 | 64 | * `file` |
65 | * `find` (only used with [directory purging](../items/directory.md#purge)) | |
65 | * `find` | |
66 | 66 | * `grep` |
67 | 67 | * `groupadd` |
68 | 68 | * `groupmod` |
77 | 77 | * `sha1sum` |
78 | 78 | * `stat` |
79 | 79 | * `systemctl` (only used with [svc_systemd](../items/svc_systemd.md) items) |
80 | * `tar` (only used with [git_deploy](../items/git_deploy.md) items) | |
80 | 81 | * `useradd` |
81 | 82 | * `usermod` |
82 | 83 |
65 | 65 | }, |
66 | 66 | } |
67 | 67 | |
68 | All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `apiVersion`, `Kind`, and `metadata/name` keys, but you can override them if you must. | |
68 | All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `Kind` and `metadata/name` keys, but you can override them if you must. | |
69 | 69 | |
70 | 70 | Alternatively, you can keep your resource definitions in manifest files: |
71 | 71 |
29 | 29 | ✓ node1 lock Y1KD removed</code></pre> |
30 | 30 | |
31 | 31 | Expired locks are automatically and silently purged whenever BundleWrap has the opportunity. Be sure to check out `bw lock add --help` for how to customize expiration time, add a short comment explaining the reason for the lock, or lock only certain items. Using `bw apply` on a soft locked node is not an error and affected items will simply be skipped. |
32 | ||
33 | ## Locking non-UNIX nodes | |
34 | ||
35 | Most of the time, BundleWrap assumes that your target system is a UNIX-like operating system. It then stores locks as files in the node's local file system. | |
36 | ||
37 | BundleWrap supports managing non-UNIX nodes, too, such as Kubernetes. You can also write your own custom item types to manage hardware. In those situations, BundleWrap has no place to store lock files. | |
38 | ||
39 | You can solve this by designating another regular UNIX node as a "locking node": | |
40 | ||
41 | <pre><code class="nohighlight">nodes['my.k8s.cluster'] = { | |
42 | 'locking_node': 'my.openbsd.box', | |
43 | 'os': 'kubernetes', | |
44 | 'metadata': { | |
45 | ... | |
46 | }, | |
47 | }</code></pre> | |
48 | ||
49 | `my.openbsd.box` is the name of another regular node, which must be managed by BundleWrap. You can now use all the usual locking mechanisms when working with `my.k8s.cluster` and its locks will be stored on `my.openbsd.box`. (They will, of course, not conflict with regular locks for `my.openbsd.box`.) | |
50 | ||
51 | A locking node can host locks for as many other nodes as you wish. |
0 | # Migrating from BundleWrap 3.x to 4.x | |
1 | ||
2 | As per [semver](http://semver.org), BundleWrap 4.0 breaks compatibility with repositories created for BundleWrap 3.x. This document provides a guide on how to upgrade your repositories to BundleWrap 4.x. Please read the entire document before proceeding. | |
3 | ||
4 | <br> | |
5 | ||
6 | ## metadata.py | |
7 | ||
8 | Metadata processors have been split into defaults and reactors. See [metadata.py](../repo/metadata.py.md) for details. | |
9 | ||
10 | Generally speaking, metadata processors that returned `DONE, DEFAULTS` can be turned into defaults. | |
11 | ||
12 | @metadata_processor | |
13 | def foo(metadata): | |
14 | return {"bar": 47} | |
15 | ||
16 | becomes | |
17 | ||
18 | defaults = { | |
19 | "bar": 47, | |
20 | } | |
21 | ||
22 | Metadata processors that return `OVERWRITE, RUN_ME_AGAIN` or otherwise depend on other metadata need to be turned into reactors: | |
23 | ||
24 | @metadata_processor | |
25 | def foo(metadata): | |
26 | metadata["bar"] = metadata["baz"] + 5 | |
27 | return metadata, OVERWRITE, RUN_ME_AGAIN | |
28 | ||
29 | becomes | |
30 | ||
31 | @metadata_reactor | |
32 | def foo(metadata): | |
33 | return { | |
34 | "bar": metadata.get("baz") + 5, | |
35 | } | |
36 | ||
37 | <br> | |
38 | ||
39 | ## members_add and members_remove | |
40 | ||
41 | These must be replaced by other mechanism, such as the newly-available `groups` attribute on individual nodes. Also note that you can now do `bw apply 'lambda:node.metadata["env"] == "prod"'` so you may no longer have a need to create groups based on metadata. | |
42 | ||
43 | <br> | |
44 | ||
45 | ## Plugins | |
46 | ||
47 | The plugin system has been removed since it saw barely any use. The most popular plugin, the `git_deploy` item is now built into BundleWrap itself. | |
48 | ||
49 | rm plugins.json | |
50 | rm items/git_deploy.py | |
51 | ||
52 | <br> | |
53 | ||
54 | ## Command line argument parsing | |
55 | ||
56 | Previously, `bw` used a comma-separated syntax to specify targets for certain actions such as `bw apply`. We now use a space separated style: | |
57 | ||
58 | bw apply node1,node2 | |
59 | ||
60 | becomes | |
61 | ||
62 | bw apply node1 node2 | |
63 | ||
64 | This may appear trivial, but might lead to confusion with people not used to providing multiple multi-value space-separated arguments on the command line. | |
65 | ||
66 | bw nodes -a all node1 | |
67 | ||
68 | becomes | |
69 | ||
70 | bw nodes -a all -- node1 | |
71 | ||
72 | The `--` is necessary so we can tell when the argument list for `-a` ends. Here is another example: | |
73 | ||
74 | bw nodes -a hostname,bundles node1,node2 | |
75 | ||
76 | becomes | |
77 | ||
78 | bw nodes -a hostname bundles -- node1 node2 | |
79 | ||
80 | While a little more verbose, this style let's us use proper shell quoting for argument tokens. | |
81 | ||
82 | <br> | |
83 | ||
84 | ## Minor changes | |
85 | ||
86 | For everything else, please consult the [changelog](https://github.com/bundlewrap/bundlewrap/blob/master/CHANGELOG.md#400). |
0 | # TOML nodes and groups | |
1 | ||
2 | The primary way to define nodes is in [nodes.py](../repo/nodes.py.md). However, BundleWrap also provides a built-in alternative that you can use to define each node in a [TOML](https://github.com/toml-lang/toml) file. Doing this has pros and cons, which is why you can choose which way is best for you. | |
3 | ||
4 | *Pros* | |
5 | ||
6 | * One file per node | |
7 | * Node files are machine-readable and -writeable | |
8 | * Easier on the eyes for nodes with simple metadata | |
9 | ||
10 | *Cons* | |
11 | ||
12 | * Does not support [Fault objects](../api/#bundlewraputilsfault) | |
13 | * Does not support [atomic()](../repo/groups.py.md#metadata) | |
14 | * Does not support `None` | |
15 | * Does not support sets or tuples | |
16 | * More difficult to read for long, deeply nested metadata | |
17 | ||
18 | <br> | |
19 | ||
20 | ## Using TOML nodes | |
21 | ||
22 | First, you have to make sure your `nodes.py` doesn't overwrite your TOML nodes. Check if your `nodes.py` overwrites the `nodes` dict: | |
23 | ||
24 | nodes = { # bad | |
25 | "my_node": {...}, | |
26 | } | |
27 | ||
28 | TOML nodes will be added to the `nodes.py` context automatically, so change your `nodes.py` to add to them (or just leave the file empty): | |
29 | ||
30 | nodes["my_node"] = { # good | |
31 | ... | |
32 | } | |
33 | ||
34 | Now you are all set to create your first TOML node. Create a file called `nodes/nodenamegoeshere.toml`: | |
35 | ||
36 | hostname = "tomlnode.example.com" | |
37 | bundles = [ | |
38 | "bundle1", | |
39 | "bundle2", | |
40 | ] | |
41 | ||
42 | [metadata] | |
43 | foo = "bar" | |
44 | ||
45 | [metadata.baz] | |
46 | frob = 47 | |
47 | ||
48 | And that's it. This node will now be added to your other nodes. You may use subdirectories of `nodes/`, but the node name will always just be the filename minus the ".toml" extension. | |
49 | ||
50 | <br> | |
51 | ||
52 | ## Converting existing nodes | |
53 | ||
54 | This is an easy one line operation: | |
55 | ||
56 | bw debug -n nodenamegoeshere -c "node.toml_save()" | |
57 | ||
58 | Don't forget to remove the original node though. | |
59 | ||
60 | <br> | |
61 | ||
62 | ## Editing TOML nodes from Python | |
63 | ||
64 | BundleWrap uses [tomlkit](https://github.com/sdispater/tomlkit) internally and exposes a `TOMLDocument` instance as `node.toml` for you to modify: | |
65 | ||
66 | $ bw debug -n nodenamegoeshere | |
67 | >>> node.file_path | |
68 | nodes/nodenamegoeshere.toml | |
69 | >>> node.toml['bundles'].append("bundle3") | |
70 | >>> node.toml_save() | |
71 | ||
72 | For your convenience, `.toml_set()` is also provided to easily set nested dict values: | |
73 | ||
74 | >>> node.toml_set("metadata/foo/bar/baz", 47) | |
75 | >>> node.toml_save() | |
76 | ||
77 | This should make it pretty straightforward to make changes to lots of nodes without the headaches of using `sed` or something of that nature to edit Python code in `nodes.py`. | |
78 | ||
79 | <br> | |
80 | ||
81 | ## TOML groups | |
82 | ||
83 | They work exactly the same way as nodes, but have their own `groups/` directory. `.toml`, `.toml_set()` and `toml_save()` are also found on `Group` objects. |
27 | 27 | <a href="/repo/libs">libs/</a> |
28 | 28 | <a href="/guide/secrets">.secrets.cfg</a> |
29 | 29 | <a href="/repo/groups.py">groups.py</a> |
30 | nodes/ | |
31 | <a href="/guide/toml">nodename.toml</a> | |
30 | 32 | <a href="/repo/nodes.py">nodes.py</a> |
31 | <a href="/repo/plugins">plugins.json</a> | |
32 | 33 | <a href="/repo/requirements.txt">requirements.txt</a> |
33 | 34 | </div> |
0 | # Deploying from git | |
1 | ||
2 | The `git_deploy` item lets you deploy the *contents* of a git repository to a node - without requiring the node to have access to that repository or exposing the `.git/` directory to the node. | |
3 | ||
4 | directories = { | |
5 | # git_deploy will not create this by itself | |
6 | "/var/tmp/example": {}, | |
7 | } | |
8 | ||
9 | git_deploy = { | |
10 | "/var/tmp/example": { | |
11 | 'repo': "example", | |
12 | 'rev': "master", | |
13 | 'use_xattrs': True, | |
14 | }, | |
15 | } | |
16 | ||
17 | `git_deploy` items will only upload a tarball with the data from the git repo, no part of the git history is leaked to the node. | |
18 | ||
19 | Requires git to be installed on the machine running BundleWrap. | |
20 | ||
21 | <br> | |
22 | ||
23 | # git_deploy_repos | |
24 | ||
25 | Put this in a file called git_deploy_repos in your repository root: | |
26 | ||
27 | example: /Users/jdoe/Projects/example | |
28 | ||
29 | This file should also be added to your `.gitignore` if you are sharing that repo with a team. Each team member must provide a mapping of the repo name used in the bundle ("example" in this case) to a local filesystem path with a git repository. It is each user's responsibility to make sure the clone in that location is up to date. | |
30 | ||
31 | <br> | |
32 | ||
33 | # Attribute reference | |
34 | ||
35 | See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes) | |
36 | ||
37 | <hr> | |
38 | ||
39 | ## repo | |
40 | ||
41 | The short name of a repo as it appears in `git_deploy_repos`. | |
42 | ||
43 | Alternatively, it can point directly to a git URL: | |
44 | ||
45 | git_deploy = { | |
46 | "/var/tmp/example": { | |
47 | 'repo': "https://github.com/bundlewrap/bundlewrap.git", | |
48 | [...] | |
49 | }, | |
50 | } | |
51 | ||
52 | Note however that this has a severe performance penalty, as a new clone of that repo has to be made every time the status of the item is checked. | |
53 | ||
54 | <br> | |
55 | ||
56 | ## rev | |
57 | ||
58 | The `rev` attribute can contain anything `git rev-parse` can resolve into a commit hash (branch names, tags, first few characters of full commit hash). Note that you should probably use tags here. *Never* use HEAD (use a branch name like 'master' instead). | |
59 | ||
60 | <br> | |
61 | ||
62 | ## use_xattrs | |
63 | ||
64 | BundleWrap needs to store the deployed commit hash on the node. The `use_xattrs` attribute controls how this is done. If set to `True`, the `attr` command on the node is used to store the hash as an extended file system attribute. Since `attr` might not be installed on the node, the default is to place a dotfile in the target directory instead (keep that in mind when deploying websites etc.). |
8 | 8 | Manage resources in Kubernetes clusters. |
9 | 9 | |
10 | 10 | k8s_namespaces = { |
11 | "my-app": {}, | |
11 | "my-app": { | |
12 | 'manifest': { | |
13 | 'apiVersion': "v1", | |
14 | }, | |
15 | }, | |
12 | 16 | "my-previous-app": {'delete': True}, |
13 | 17 | } |
14 | 18 | |
27 | 31 | ## Resource types |
28 | 32 | |
29 | 33 | <table> |
30 | <tr><th>Resource type</th><th>Bundle attribute</th><th>apiVersion</th></tr> | |
31 | <tr><td>Cluster Role</td><td>k8s_clusterroles</td><td>rbac.authorization.k8s.io/v1</td></tr> | |
32 | <tr><td>Cluster Role Binding</td><td>k8s_clusterrolebindings</td><td>rbac.authorization.k8s.io/v1</td></tr> | |
33 | <tr><td>Config Map</td><td>k8s_configmaps</td><td>v1</td></tr> | |
34 | <tr><td>Cron Job</td><td>k8s_cronjobs</td><td>batch/v1beta1</td></tr> | |
35 | <tr><td>Custom Resource Definition</td><td>k8s_crd</td><td>apiextensions.k8s.io/v1</td></tr> | |
36 | <tr><td>Daemon Set</td><td>k8s_daemonsets</td><td>apps/v1</td></tr> | |
37 | <tr><td>Deployment</td><td>k8s_deployments</td><td>apps/v1</td></tr> | |
38 | <tr><td>Ingress</td><td>k8s_ingresses</td><td>networking.k8s.io/v1beta1</td></tr> | |
39 | <tr><td>Namespace</td><td>k8s_namespaces</td><td>v1</td></tr> | |
40 | <tr><td>Network Policy</td><td>k8s_networkpolicies</td><td>networking.k8s.io/v1</td></tr> | |
41 | <tr><td>Persistent Volume Claim</td><td>k8s_pvc</td><td>v1</td></tr> | |
42 | <tr><td>Role</td><td>k8s_roles</td><td>rbac.authorization.k8s.io/v1</td></tr> | |
43 | <tr><td>Role Binding</td><td>k8s_rolebindings</td><td>rbac.authorization.k8s.io/v1</td></tr> | |
44 | <tr><td>Service</td><td>k8s_services</td><td>v1</td></tr> | |
45 | <tr><td>Service Account</td><td>k8s_serviceaccounts</td><td>v1</td></tr> | |
46 | <tr><td>Secret</td><td>k8s_secrets</td><td>v1</td></tr> | |
47 | <tr><td>StatefulSet</td><td>k8s_statefulsets</td><td>apps/v1</td></tr> | |
48 | <tr><td>(any)</td><td>k8s_raw</td><td>(any)</td></tr> | |
34 | <tr><th>Resource type</th><th>Bundle attribute</th></tr> | |
35 | <tr><td>Cluster Role</td><td>k8s_clusterroles</td></tr> | |
36 | <tr><td>Cluster Role Binding</td><td>k8s_clusterrolebindings</td></tr> | |
37 | <tr><td>Config Map</td><td>k8s_configmaps</td></tr> | |
38 | <tr><td>Cron Job</td><td>k8s_cronjobs</td></tr> | |
39 | <tr><td>Custom Resource Definition</td><td>k8s_crd</td></tr> | |
40 | <tr><td>Daemon Set</td><td>k8s_daemonsets</td></tr> | |
41 | <tr><td>Deployment</td><td>k8s_deployments</td></tr> | |
42 | <tr><td>Ingress</td><td>k8s_ingresses</td></tr> | |
43 | <tr><td>Namespace</td><td>k8s_namespaces</td></tr> | |
44 | <tr><td>Network Policy</td><td>k8s_networkpolicies</td></tr> | |
45 | <tr><td>Persistent Volume Claim</td><td>k8s_pvc</td></tr> | |
46 | <tr><td>Role</td><td>k8s_roles</td></tr> | |
47 | <tr><td>Role Binding</td><td>k8s_rolebindings</td></tr> | |
48 | <tr><td>Service</td><td>k8s_services</td></tr> | |
49 | <tr><td>Service Account</td><td>k8s_serviceaccounts</td></tr> | |
50 | <tr><td>Secret</td><td>k8s_secrets</td></tr> | |
51 | <tr><td>StatefulSet</td><td>k8s_statefulsets</td></tr> | |
52 | <tr><td>(any)</td><td>k8s_raw</td></tr> | |
49 | 53 | </table> |
50 | 54 | |
51 | 55 | You can define [Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) like this: |
53 | 57 | k8s_crd = { |
54 | 58 | "custom-thing": { |
55 | 59 | 'manifest': { |
60 | 'apiVersion': "apiextensions.k8s.io/v1beta1", | |
56 | 61 | 'spec': { |
57 | 62 | 'names': { |
58 | 63 | 'kind': "CustomThing", |
13 | 13 | |
14 | 14 | ## Contributing code |
15 | 15 | |
16 | <div class="alert alert-info">Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat. If your idea is not a good fit for all or most BundleWrap users, it can still be included <a href="../dev_plugins">as a plugin</a>.</div> | |
16 | <div class="alert alert-info">Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat.</div> | |
17 | 17 | |
18 | 18 | Here are the steps: |
19 | 19 | |
22 | 22 | 3. Same goes for documentation. |
23 | 23 | 4. Set up a [virtualenv](http://virtualenv.readthedocs.org/en/latest/) and run `pip install -r requirements.txt`. |
24 | 24 | 5. Make sure you can connect to your localhost via `ssh` without using a password and that you are able to run `sudo`. |
25 | 6. Run `py.test`. | |
25 | 6. Run `py.test tests/`. | |
26 | 26 | 7. Review and sign the Copyright Assignment Agreement (CAA) by adding your name and email to the `AUTHORS` file. (This step can be skipped if your contribution is too small to be considered intellectual property, e.g. spelling fixes) |
27 | 27 | 8. Open a pull request on [GitHub](https://github.com/bundlewrap/bundlewrap). |
28 | 28 | 9. Feel great. Thank you. |
33 | 33 | |
34 | 34 | ### Is BundleWrap secure? |
35 | 35 | |
36 | BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and plugins. We also recommend following commit logs to your repos. | |
36 | BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and code. We also recommend following commit logs to your repos. | |
37 | 37 | |
38 | 38 | <br> |
39 | 39 |
25 | 25 | This section is a reference for all possible attributes you can define for a group: |
26 | 26 | |
27 | 27 | groups = { |
28 | 'group1': { | |
29 | # THIS PART IS EXPLAINED HERE | |
30 | 'bundles': ["bundle1", "bundle2"], | |
31 | 'members': ["node1"], | |
32 | 'members_add': lambda node: node.os == 'debian', | |
33 | 'members_remove': lambda node: node.os == 'ubuntu', | |
34 | 'member_patterns': [r"^cluster1\."], | |
35 | 'metadata': {'foo': "bar"}, | |
36 | 'os': 'linux', | |
37 | 'subgroups': ["group2", "group3"], | |
38 | 'subgroup_patterns': [r"^group.*pattern$"], | |
39 | }, | |
28 | 'group1': { | |
29 | # THIS PART IS EXPLAINED HERE | |
30 | 'bundles': ["bundle1", "bundle2"], | |
31 | 'members': ["node1"], | |
32 | 'member_patterns': [r"^cluster1\."], | |
33 | 'metadata': {'foo': "bar"}, | |
34 | 'os': 'linux', | |
35 | 'subgroups': ["group2", "group3"], | |
36 | 'subgroup_patterns': [r"^group.*pattern$"], | |
37 | }, | |
40 | 38 | } |
41 | 39 | |
42 | 40 | Note that many attributes from [nodes.py](nodes.py.md) (e.g. `bundles`) may also be set at group level, but aren't explicitly documented here again. |
54 | 52 | ## members |
55 | 53 | |
56 | 54 | A tuple or list of node names that belong to this group. |
57 | ||
58 | <br> | |
59 | ||
60 | ## members_add and members_remove | |
61 | ||
62 | For these attributes you can provide a function that takes a node object as its only argument. The function must return a boolean. The function will be called once for every node in the repo. If `True`, this node will be added (`members_add`) to or removed (`members_remove`) from this group. | |
63 | ||
64 | <div class="alert alert-warning">Inside your function you may query node attributes and groups, but you will not see groups or attributes added as a result of a different <code>members_add</code> / <code>members_remove</code> function. Only attributes and groups that have been set statically will be available. You can, however, remove a node with <code>members_remove</code> that you added with <code>members_add</code> (but not vice-versa).<br>You should also avoid using <code>node.metadata</code> here. Since metadata ultimately depends on group memberships, only metadata set in <code>nodes.py</code> will be returned here.</div> | |
65 | 55 | |
66 | 56 | <br> |
67 | 57 |
38 | 38 | <tr><td><a href="../../items/action">action</a></td><td><code>actions</code></td><td>Actions allow you to run commands on every <code>bw apply</code></td></tr> |
39 | 39 | <tr><td><a href="../../items/directory">directory</a></td><td><code>directories</code></td><td>Manages permissions and ownership for directories</td></tr> |
40 | 40 | <tr><td><a href="../../items/file">file</a></td><td><code>files</code></td><td>Manages contents, permissions, and ownership for files</td></tr> |
41 | <tr><td><a href="../../items/git_deploy">git_deploy</a></td><td><code>git_deploy</code></td><td>Deploys the contents of a git repository</td></tr> | |
41 | 42 | <tr><td><a href="../../items/group">group</a></td><td><code>groups</code></td><td>Manages groups by wrapping <code>groupadd</code>, <code>groupmod</code> and <code>groupdel</code></td></tr> |
42 | 43 | <tr><td><a href="../../items/k8s">k8s_*</a></td><td><code>k8s_*</code></td><td>Manages resources in Kubernetes clusters by wrapping <code>kubectl</code></td></tr> |
43 | 44 | <tr><td><a href="../../items/pkg_apt">pkg_apt</a></td><td><code>pkg_apt</code></td><td>Installs and removes packages with APT</td></tr> |
112 | 113 | |
113 | 114 | * if you need all items of a certain type to depend on something or |
114 | 115 | * if you need all items in a bundle to depend on something or |
115 | * if you need an item in a bundle you can't edit (e.g. because it's provided by a community-maintained [plugin](plugins.md)) to depend on something in your bundles | |
116 | * if you need an item in a bundle you can't edit to depend on something in your bundles | |
116 | 117 | |
117 | 118 | <br> |
118 | 119 | |
134 | 135 | |
135 | 136 | In this simplified example we save ourselves from duplicating the logic that gets the current MySQL version from metadata (which is probably overkill here, but you might encounter more complex situations). |
136 | 137 | |
138 | Tags also allow for optional dependencies, since items can depend on tags that don't exist. So for example if you need to do something after items from another bundle have been completed, but that bundle might not always be there, you can depend on a tag given to the items of the other bundle. | |
139 | ||
137 | 140 | <br> |
138 | 141 | |
139 | 142 | ## triggers and triggered |
23 | 23 | "bar": metadata.get("foo"), |
24 | 24 | } |
25 | 25 | |
26 | While this looks simple enough, there are some important caveats. First and foremost: Metadata reactors must assume to be called many times. This is to give you an opportunity to react to metadata provided by other reactors. All reactors will be run again and again until none of them return any changed metadata. Anything you return from a reactor will overwrite existing metadata. | |
26 | While this looks simple enough, there are some important caveats. First and foremost: Metadata reactors must assume to be called many times. This is to give you an opportunity to react to metadata provided by other reactors. All reactors will be run again and again until none of them return any changed metadata. Anything you return from a reactor will overwrite defaults, while metadata from `groups.py` and `nodes.py` will still overwrite metadata from reactors. Collection types like sets and dicts will be merged. | |
27 | 27 | |
28 | 28 | The parameter `metadata` is not a dictionary but an instance of `Metastack`. You cannot modify the contents of this object. It provides `.get("some/path", "default")` to query a key path (equivalent to `metadata["some"]["path"]` in a dict) and accepts an optional default value. It will raise a `KeyError` when called for a non-existant path without a default. |
29 | 29 | |
30 | 30 | While node and group metadata and metadata defaults will always be available to reactors, you should not rely on that for the simple reason that you may one day move some metadata from those static sources into another reactor, which may be run later. Thus you may need to wait for some iterations before that data shows up in `metadata`. Note that BundleWrap will catch any `KeyError`s raised in metadata reactors and only report them if they don't go away after all other relevant reactors are done. |
31 | 31 | |
32 | 32 | To avoid deadlocks when accessing *other* nodes' metadata from within a metadata reactor, use `other_node.partial_metadata` instead of `other_node.metadata`. For the same reason, always use the `metadata` parameter to access the current node's metadata, never `node.metadata`. |
33 | ||
34 | <div class="alert alert-danger">Be careful when returning <a href="../../guide/api#bundlewraputilsfault">Fault</a> objects from reactors. <strong>All</strong> Fault objects (including those returned from <code>repo.vault.*</code>) will be considered <strong>equal</strong> to one another when BundleWrap inspects the returned metadata to check if anything changed compared to what was returned in an earlier iteration.</div> | |
35 | 33 | |
36 | 34 | |
37 | 35 | ### DoNotRunAgain |
50 | 50 | nodes['node-1'] = { |
51 | 51 | 'hostname': "node-1.example.com", |
52 | 52 | } |
53 | ||
54 | Alternatively, consider using [TOML nodes](../guide/toml.md). | |
53 | 55 | |
54 | 56 | <br> |
55 | 57 | |
136 | 138 | |
137 | 139 | <br> |
138 | 140 | |
139 | ### template_node | |
140 | ||
141 | Copy all attributes and merge all metadata from this node. This is useful for temporary clones of single specific nodes, where you don't want to create a group to deduplicate all the node-level configuration. | |
142 | ||
143 | Cannot be set at group level. | |
144 | ||
145 | <br> | |
146 | ||
147 | 141 | ## OS compatibility overrides |
148 | 142 | |
149 | 143 | ### cmd_wrapper_outer |
0 | # Plugins | |
1 | ||
2 | The plugin system in BundleWrap is an easy way of integrating third-party code into your repository. | |
3 | ||
4 | <div class="alert alert-warning">While plugins are subject to some superficial code review by BundleWrap developers before being accepted, we cannot make any guarantees as to the quality and trustworthiness of plugins. Always do your due diligence before running third-party code.</div> | |
5 | ||
6 | <br> | |
7 | ||
8 | ## Finding plugins | |
9 | ||
10 | It's as easy as `bw repo plugin search <term>`. Or you can browse [plugins.bundlewrap.org](http://plugins.bundlewrap.org). | |
11 | ||
12 | <br> | |
13 | ||
14 | ## Installing plugins | |
15 | ||
16 | You probably guessed it: `bw repo plugin install <plugin>` | |
17 | ||
18 | Installing the first plugin in your repo will create a file called `plugins.json`. You should commit this file (and any files installed by the plugin of course) to version control. | |
19 | ||
20 | <div class="alert alert-info">Avoid editing files provided by plugins at all costs. Local modifications will prevent future updates to the plugin.</div> | |
21 | ||
22 | <br> | |
23 | ||
24 | ## Updating plugins | |
25 | ||
26 | You can update all installed plugins with this command: `bw repo plugin update` | |
27 | ||
28 | <br> | |
29 | ||
30 | ## Removing a plugin | |
31 | ||
32 | `bw repo plugin remove <plugin>` | |
33 | ||
34 | <br> | |
35 | ||
36 | ## Writing your own | |
37 | ||
38 | See the [guide on publishing your own plugins](../guide/dev_plugin.md). |
18 | 18 | - Locking: guide/locks.md |
19 | 19 | - Kubernetes: guide/kubernetes.md |
20 | 20 | - Custom items: guide/dev_item.md |
21 | - Writing plugins: guide/dev_plugin.md | |
22 | 21 | - Python API: guide/api.md |
23 | 22 | - OS compatibility: guide/os_compatibility.md |
23 | - TOML nodes and groups: guide/toml.md | |
24 | 24 | - Migrating to 2.0: guide/migrate_12.md |
25 | 25 | - Migrating to 3.0: guide/migrate_23.md |
26 | - Migrating to 4.0: guide/migrate_34.md | |
26 | 27 | - Repository: |
27 | 28 | - Overview: repo/layout.md |
28 | 29 | - nodes.py: repo/nodes.py.md |
32 | 33 | - bundles/.../metadata.py: repo/metadata.py.md |
33 | 34 | - hooks/: repo/hooks.md |
34 | 35 | - libs/: repo/libs.md |
35 | - Plugins: repo/plugins.md | |
36 | 36 | - Items: |
37 | 37 | - action: items/action.md |
38 | 38 | - directory: items/directory.md |
39 | 39 | - file: items/file.md |
40 | - git_deploy: items/git_deploy.md | |
40 | 41 | - group: items/group.md |
41 | 42 | - k8s_*: items/k8s.md |
42 | 43 | - pkg_apt: items/pkg_apt.md |
5 | 5 | python_files=*.py |
6 | 6 | python_classes=Test |
7 | 7 | python_functions=test_* |
8 | ||
9 | [bdist_wheel] | |
10 | universal = 1 |
0 | from sys import version_info | |
1 | ||
2 | 0 | from setuptools import find_packages, setup |
3 | 1 | |
4 | 2 | |
5 | dependencies = [ | |
6 | "cryptography", | |
7 | "Jinja2", | |
8 | "Mako", | |
9 | "passlib", | |
10 | "pyyaml", | |
11 | "requests >= 1.0.0", | |
12 | "six", | |
13 | ] | |
14 | if version_info < (3, 2, 0): | |
15 | dependencies.append("futures") | |
16 | ||
17 | 3 | setup( |
18 | 4 | name="bundlewrap", |
19 | version="3.10.0", | |
5 | version="4.0.0", | |
20 | 6 | description="Config management with Python", |
21 | 7 | long_description=( |
22 | 8 | "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" |
41 | 27 | "Natural Language :: English", |
42 | 28 | "Operating System :: POSIX :: Linux", |
43 | 29 | "Programming Language :: Python", |
44 | "Programming Language :: Python :: 2.7", | |
45 | "Programming Language :: Python :: 3.4", | |
46 | "Programming Language :: Python :: 3.5", | |
47 | 30 | "Programming Language :: Python :: 3.6", |
48 | 31 | "Programming Language :: Python :: 3.7", |
49 | 32 | "Programming Language :: Python :: 3.8", |
50 | 33 | "Topic :: System :: Installation/Setup", |
51 | 34 | "Topic :: System :: Systems Administration", |
52 | 35 | ], |
53 | install_requires=dependencies, | |
54 | extras_require={ # used for wheels | |
55 | ':python_version=="2.7"': ["futures"], | |
56 | }, | |
36 | install_requires=[ | |
37 | "cryptography", | |
38 | "Jinja2", | |
39 | "Mako", | |
40 | "passlib", | |
41 | "pyyaml", | |
42 | "requests >= 1.0.0", | |
43 | "tomlkit", | |
44 | ], | |
57 | 45 | zip_safe=False, |
58 | 46 | ) |
0 | from os.path import exists, join | |
1 | ||
2 | from bundlewrap.utils.testing import host_os, make_repo, run | |
3 | ||
4 | ||
5 | def test_apply(tmpdir): | |
6 | make_repo( | |
7 | tmpdir, | |
8 | bundles={ | |
9 | "bundle1": { | |
10 | 'files': { | |
11 | join(str(tmpdir), "test"): { | |
12 | 'content': "test", | |
13 | }, | |
14 | }, | |
15 | }, | |
16 | }, | |
17 | groups={ | |
18 | "adhoc-localhost": { | |
19 | 'bundles': ["bundle1"], | |
20 | 'member_patterns': ["localhost"], | |
21 | 'os': host_os(), | |
22 | }, | |
23 | }, | |
24 | ) | |
25 | ||
26 | assert not exists(join(str(tmpdir), "test")) | |
27 | stdout, stderr, rcode = run("bw -A apply localhost", path=str(tmpdir)) | |
28 | assert rcode == 0 | |
29 | assert exists(join(str(tmpdir), "test")) | |
30 | ||
31 | ||
32 | def test_apply_fail(tmpdir): | |
33 | make_repo( | |
34 | tmpdir, | |
35 | bundles={ | |
36 | "bundle1": { | |
37 | 'files': { | |
38 | join(str(tmpdir), "test"): { | |
39 | 'content': "test", | |
40 | }, | |
41 | }, | |
42 | }, | |
43 | }, | |
44 | groups={ | |
45 | "adhoc-localhost": { | |
46 | 'bundles': ["bundle1"], | |
47 | 'member_patterns': ["localhost"], | |
48 | 'os': host_os(), | |
49 | }, | |
50 | }, | |
51 | ) | |
52 | ||
53 | assert not exists(join(str(tmpdir), "test")) | |
54 | stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) | |
55 | assert rcode == 1 | |
56 | assert not exists(join(str(tmpdir), "test")) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from bundlewrap.utils.testing import host_os, make_repo, run |
4 | 1 | |
5 | 2 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import exists, join |
4 | 1 | |
5 | 2 | from bundlewrap.utils.testing import host_os, make_repo, run |
36 | 33 | }, |
37 | 34 | ) |
38 | 35 | |
39 | run("bw apply -o bundle:test localhost", path=str(tmpdir)) | |
36 | run("bw apply -o bundle:test -- localhost", path=str(tmpdir)) | |
40 | 37 | assert exists(join(str(tmpdir), "foo")) |
41 | 38 | assert exists(join(str(tmpdir), "bar")) |
42 | 39 | assert not exists(join(str(tmpdir), "baz")) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import exists, join |
4 | 1 | |
5 | 2 | from bundlewrap.utils.testing import host_os, make_repo, run |
24 | 21 | }, |
25 | 22 | }, |
26 | 23 | ) |
27 | result = run("bw apply --skip bundle:test localhost", path=str(tmpdir)) | |
24 | result = run("bw apply --skip bundle:test -- localhost", path=str(tmpdir)) | |
28 | 25 | assert result[2] == 0 |
29 | 26 | assert not exists(join(str(tmpdir), "foo")) |
30 | 27 | |
44 | 41 | nodes={ |
45 | 42 | "localhost": { |
46 | 43 | 'bundles': ["test"], |
44 | 'groups': {"foo"}, | |
47 | 45 | 'os': host_os(), |
48 | 46 | }, |
49 | 47 | }, |
50 | 48 | groups={ |
51 | "foo": {'members': ["localhost"]}, | |
49 | "foo": {}, | |
52 | 50 | }, |
53 | 51 | ) |
54 | result = run("bw apply --skip group:foo localhost", path=str(tmpdir)) | |
52 | result = run("bw apply --skip group:foo -- localhost", path=str(tmpdir)) | |
55 | 53 | assert result[2] == 0 |
56 | 54 | assert not exists(join(str(tmpdir), "foo")) |
57 | 55 | |
75 | 73 | }, |
76 | 74 | }, |
77 | 75 | ) |
78 | result = run("bw apply --skip file:{} localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir)) | |
76 | result = run("bw apply --skip file:{} -- localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir)) | |
79 | 77 | assert result[2] == 0 |
80 | 78 | assert not exists(join(str(tmpdir), "foo")) |
81 | 79 | |
99 | 97 | }, |
100 | 98 | }, |
101 | 99 | ) |
102 | result = run("bw apply --skip node:localhost localhost", path=str(tmpdir)) | |
100 | result = run("bw apply --skip node:localhost -- localhost", path=str(tmpdir)) | |
103 | 101 | assert result[2] == 0 |
104 | 102 | assert not exists(join(str(tmpdir), "foo")) |
105 | 103 | |
124 | 122 | }, |
125 | 123 | }, |
126 | 124 | ) |
127 | result = run("bw apply --skip tag:nope localhost", path=str(tmpdir)) | |
125 | result = run("bw apply --skip tag:nope -- localhost", path=str(tmpdir)) | |
128 | 126 | assert result[2] == 0 |
129 | 127 | assert not exists(join(str(tmpdir), "foo")) |
130 | 128 | |
148 | 146 | }, |
149 | 147 | }, |
150 | 148 | ) |
151 | result = run("bw apply --skip file: localhost", path=str(tmpdir)) | |
149 | result = run("bw apply --skip file: -- localhost", path=str(tmpdir)) | |
152 | 150 | assert result[2] == 0 |
153 | 151 | assert not exists(join(str(tmpdir), "foo")) |
154 | 152 | |
178 | 176 | }, |
179 | 177 | }, |
180 | 178 | ) |
181 | result = run("bw apply --skip tag:nope localhost", path=str(tmpdir)) | |
179 | result = run("bw apply --skip tag:nope -- localhost", path=str(tmpdir)) | |
182 | 180 | assert result[2] == 0 |
183 | 181 | assert not exists(join(str(tmpdir), "foo")) |
184 | 182 | assert not exists(join(str(tmpdir), "bar")) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os import mkdir |
4 | 1 | from os.path import exists, join |
5 | 2 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from base64 import b64encode |
4 | 1 | from os.path import exists, join |
5 | 2 | |
215 | 212 | with open(join(str(tmpdir), "foo"), 'rb') as f: |
216 | 213 | content = f.read() |
217 | 214 | assert content == b"${node.name}" |
215 | ||
216 | ||
217 | def test_fault_content_unavailable_skipped(tmpdir): | |
218 | make_repo( | |
219 | tmpdir, | |
220 | bundles={ | |
221 | "test": {}, | |
222 | }, | |
223 | nodes={ | |
224 | "localhost": { | |
225 | 'bundles': ["test"], | |
226 | 'os': host_os(), | |
227 | }, | |
228 | }, | |
229 | ) | |
230 | with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: | |
231 | f.write(""" | |
232 | files = { | |
233 | "/tmp/bw_test_faultunavailable": { | |
234 | 'content': repo.vault.password_for("fault", key="missing"), | |
235 | }, | |
236 | } | |
237 | """) | |
238 | stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) | |
239 | assert rcode == 0 | |
240 | assert b"file:/tmp/bw_test_faultunavailable skipped (Fault unavailable)" in stdout | |
241 | assert not exists("/tmp/bw_test_faultunavailable") |
0 | from os.path import exists, join | |
1 | ||
2 | from bundlewrap.utils.testing import host_os, make_repo, run | |
3 | ||
4 | ||
5 | def test_deploy_from_url(tmpdir): | |
6 | make_repo( | |
7 | tmpdir, | |
8 | bundles={ | |
9 | "test": { | |
10 | 'git_deploy': { | |
11 | join(str(tmpdir), "git_deployed_bw"): { | |
12 | 'repo': "https://github.com/bundlewrap/bundlewrap.git", | |
13 | 'rev': "master", | |
14 | }, | |
15 | }, | |
16 | 'directories': { | |
17 | join(str(tmpdir), "git_deployed_bw"): {}, | |
18 | }, | |
19 | }, | |
20 | }, | |
21 | nodes={ | |
22 | "localhost": { | |
23 | 'bundles': ["test"], | |
24 | 'os': host_os(), | |
25 | }, | |
26 | }, | |
27 | ) | |
28 | ||
29 | assert not exists(join(str(tmpdir), "git_deployed_bw", "LICENSE")) | |
30 | stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) | |
31 | assert rcode == 0 | |
32 | assert exists(join(str(tmpdir), "git_deployed_bw", "LICENSE")) | |
33 | assert not exists(join(str(tmpdir), "git_deployed_bw", ".git")) | |
34 | ||
35 | ||
36 | def test_cannot_deploy_into_purged(tmpdir): | |
37 | make_repo( | |
38 | tmpdir, | |
39 | bundles={ | |
40 | "test": { | |
41 | 'git_deploy': { | |
42 | join(str(tmpdir), "git_deployed_bw"): { | |
43 | 'repo': "https://github.com/bundlewrap/bundlewrap.git", | |
44 | 'rev': "master", | |
45 | }, | |
46 | }, | |
47 | 'directories': { | |
48 | join(str(tmpdir), "git_deployed_bw"): { | |
49 | 'purge': True, | |
50 | }, | |
51 | }, | |
52 | }, | |
53 | }, | |
54 | nodes={ | |
55 | "localhost": { | |
56 | 'bundles': ["test"], | |
57 | 'os': host_os(), | |
58 | }, | |
59 | }, | |
60 | ) | |
61 | ||
62 | stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) | |
63 | assert rcode == 1 | |
64 | assert b"cannot git_deploy into purged directory" in stderr |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from json import loads |
4 | 1 | from os import environ |
5 | 2 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | 0 | from os.path import exists, join |
3 | 1 | |
4 | 2 | from bundlewrap.utils.testing import host_os, make_repo, run |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import exists, join |
4 | 1 | |
5 | 2 | from bundlewrap.utils.testing import host_os, make_repo, run |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os import mkdir, readlink, symlink |
4 | 1 | from os.path import join |
5 | 2 |
8 | 8 | "node2": {'metadata': {"key": "value2"}}, |
9 | 9 | }, |
10 | 10 | ) |
11 | stdout, stderr, rcode = run("bw diff -m node1,node2", path=str(tmpdir)) | |
11 | stdout, stderr, rcode = run("bw diff -m node1 node2", path=str(tmpdir)) | |
12 | 12 | assert b"value1" in stdout |
13 | 13 | assert b"value2" in stdout |
14 | 14 | assert stderr == b"" |
39 | 39 | }, |
40 | 40 | }, |
41 | 41 | ) |
42 | stdout, stderr, rcode = run("bw diff -i file:/tmp/test node1,node2", path=str(tmpdir)) | |
42 | stdout, stderr, rcode = run("bw diff -i file:/tmp/test -- node1 node2", path=str(tmpdir)) | |
43 | 43 | assert b"one" in stdout |
44 | 44 | assert b"two" in stdout |
45 | 45 | assert stderr == b"" |
77 | 77 | }, |
78 | 78 | }, |
79 | 79 | ) |
80 | stdout, stderr, rcode = run("bw diff node1,node2", path=str(tmpdir)) | |
80 | stdout, stderr, rcode = run("bw diff node1 node2", path=str(tmpdir)) | |
81 | 81 | assert b"/tmp/foo" in stdout |
82 | 82 | assert b"/tmp/bar" not in stdout |
83 | 83 | assert stderr == b"" |
0 | from json import loads | |
1 | from os.path import join | |
0 | from bundlewrap.utils.testing import make_repo, run | |
2 | 1 | |
3 | from bundlewrap.utils.testing import make_repo, run | |
2 | ||
3 | def test_group_members(tmpdir): | |
4 | make_repo( | |
5 | tmpdir, | |
6 | nodes={ | |
7 | "node1": {}, | |
8 | "node2": {}, | |
9 | "node3": {}, | |
10 | }, | |
11 | groups={ | |
12 | "group1": {}, | |
13 | "group2": { | |
14 | 'members': {"node2"}, | |
15 | }, | |
16 | "group3": { | |
17 | 'members': {"node2", "node3"}, | |
18 | }, | |
19 | }, | |
20 | ) | |
21 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1 group2 group3 -a nodes", path=str(tmpdir)) | |
22 | assert stdout == b"""group1\t | |
23 | group2\tnode2 | |
24 | group3\tnode2,node3 | |
25 | """ | |
26 | assert stderr == b"" | |
27 | assert rcode == 0 | |
4 | 28 | |
5 | 29 | |
6 | 30 | def test_group_members_at_node(tmpdir): |
13 | 37 | }, |
14 | 38 | groups={ |
15 | 39 | "group1": {}, |
16 | "group2": { | |
17 | 'members': ["node2"], | |
18 | }, | |
19 | "group3": { | |
20 | 'members': ["node3"], | |
21 | }, | |
40 | "group2": {}, | |
41 | "group3": {}, | |
22 | 42 | }, |
23 | 43 | ) |
24 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir)) | |
44 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1 group2 group3 -a nodes", path=str(tmpdir)) | |
25 | 45 | assert stdout == b"""group1\tnode1,node2 |
26 | group2\tnode1,node2 | |
27 | group3\tnode3 | |
46 | group2\tnode1 | |
47 | group3\t | |
28 | 48 | """ |
29 | 49 | assert stderr == b"" |
30 | 50 | assert rcode == 0 |
31 | ||
32 | ||
33 | def test_group_members_add(tmpdir): | |
34 | make_repo( | |
35 | tmpdir, | |
36 | nodes={ | |
37 | "node1": {'os': 'centos'}, | |
38 | "node2": {'os': 'debian'}, | |
39 | "node3": {'os': 'ubuntu'}, | |
40 | }, | |
41 | ) | |
42 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
43 | f.write(""" | |
44 | groups = { | |
45 | "group1": { | |
46 | 'members_add': lambda node: node.os == 'centos', | |
47 | }, | |
48 | "group2": { | |
49 | 'members': ["node2"], | |
50 | 'members_add': lambda node: node.os != 'centos', | |
51 | }, | |
52 | "group3": { | |
53 | 'members_add': lambda node: not node.in_group("group2"), | |
54 | }, | |
55 | "group4": { | |
56 | 'members': ["node3"], | |
57 | }, | |
58 | } | |
59 | """) | |
60 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir)) | |
61 | assert stdout == b"""group1\tnode1 | |
62 | group2\tnode2,node3 | |
63 | group3\tnode1,node3 | |
64 | group4\tnode3 | |
65 | """ | |
66 | assert stderr == b"" | |
67 | assert rcode == 0 | |
68 | ||
69 | ||
70 | def test_group_members_remove(tmpdir): | |
71 | make_repo( | |
72 | tmpdir, | |
73 | nodes={ | |
74 | "node1": {'os': 'centos'}, | |
75 | "node2": {'os': 'debian'}, | |
76 | "node3": {'os': 'ubuntu'}, | |
77 | "node4": {'os': 'ubuntu'}, | |
78 | }, | |
79 | ) | |
80 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
81 | f.write(""" | |
82 | groups = { | |
83 | "group1": { | |
84 | 'members_add': lambda node: node.os == 'ubuntu', | |
85 | }, | |
86 | "group2": { | |
87 | 'members_add': lambda node: node.os == 'ubuntu', | |
88 | 'members_remove': lambda node: node.name == "node3", | |
89 | }, | |
90 | "group3": { | |
91 | 'members_add': lambda node: not node.in_group("group3"), | |
92 | }, | |
93 | "group4": { | |
94 | 'subgroups': ["group3"], | |
95 | 'members_remove': lambda node: node.os == 'debian', | |
96 | }, | |
97 | } | |
98 | """) | |
99 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir)) | |
100 | assert stdout == b"""group1\tnode3,node4 | |
101 | group2\tnode4 | |
102 | group3\tnode1,node2,node3,node4 | |
103 | group4\tnode1,node3,node4 | |
104 | """ | |
105 | assert stderr == b"" | |
106 | assert rcode == 0 | |
107 | ||
108 | ||
109 | def test_group_members_partial_metadata(tmpdir): | |
110 | make_repo( | |
111 | tmpdir, | |
112 | nodes={ | |
113 | "node1": { | |
114 | 'metadata': {'foo': 1}, | |
115 | }, | |
116 | "node2": {}, | |
117 | }, | |
118 | ) | |
119 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
120 | f.write(""" | |
121 | groups = { | |
122 | "group1": { | |
123 | 'members_add': lambda node: node.metadata.get('foo') == 1, | |
124 | }, | |
125 | "group2": { | |
126 | 'members': ["node2"], | |
127 | 'metadata': {'foo': 1}, | |
128 | }, | |
129 | } | |
130 | """) | |
131 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2 nodes", path=str(tmpdir)) | |
132 | assert stdout == b"""group1\tnode1 | |
133 | group2\tnode2 | |
134 | """ | |
135 | assert stderr == b"" | |
136 | assert rcode == 0 | |
137 | ||
138 | ||
139 | def test_group_members_remove_based_on_metadata(tmpdir): | |
140 | make_repo( | |
141 | tmpdir, | |
142 | nodes={ | |
143 | "node1": { | |
144 | 'metadata': {'remove': False}, | |
145 | }, | |
146 | "node2": {}, | |
147 | }, | |
148 | ) | |
149 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
150 | f.write(""" | |
151 | groups = { | |
152 | "group1": { | |
153 | 'members_add': lambda node: not node.metadata.get('remove', False), | |
154 | 'members_remove': lambda node: node.metadata.get('remove', False), | |
155 | }, | |
156 | "group2": { | |
157 | 'members': ["node2"], | |
158 | 'metadata': {'remove': True}, | |
159 | }, | |
160 | "group3": { | |
161 | 'subgroups': ["group1"], | |
162 | 'members_remove': lambda node: node.name.endswith("1") and node.metadata.get('redherring', True), | |
163 | }, | |
164 | } | |
165 | """) | |
166 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir)) | |
167 | assert stdout == b"""group1\tnode1,node2 | |
168 | group2\tnode2 | |
169 | group3\tnode2 | |
170 | """ | |
171 | assert stderr == b"" | |
172 | assert rcode == 0 | |
173 | ||
174 | # make sure there is no metadata deadlock | |
175 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
176 | assert loads(stdout.decode('utf-8')) == {'remove': False} | |
177 | assert stderr == b"" | |
178 | assert rcode == 0 | |
179 | ||
180 | ||
181 | def test_group_members_removed_from_supergroup(tmpdir): | |
182 | make_repo( | |
183 | tmpdir, | |
184 | nodes={ | |
185 | 'node_in_group': { | |
186 | 'hostname': "localhost", | |
187 | }, | |
188 | 'node_NOT_in_group': { | |
189 | 'hostname': "localhost", | |
190 | 'metadata': { | |
191 | 'remove_from_group': True, | |
192 | }, | |
193 | }, | |
194 | }, | |
195 | ) | |
196 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
197 | f.write(""" | |
198 | groups = { | |
199 | 'super_group': { | |
200 | 'subgroups': ['intermediate_group'], | |
201 | }, | |
202 | 'intermediate_group': { | |
203 | 'members_remove': lambda node: node.metadata.get('remove_from_group', False), | |
204 | 'subgroups': ['inner_group'], | |
205 | }, | |
206 | 'inner_group': { | |
207 | 'member_patterns': ( | |
208 | r".*", | |
209 | ), | |
210 | }, | |
211 | } | |
212 | """) | |
213 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i inner_group,intermediate_group,intermediate_group nodes", path=str(tmpdir)) | |
214 | assert stdout == b"""inner_group\tnode_NOT_in_group,node_in_group | |
215 | intermediate_group\tnode_in_group | |
216 | intermediate_group\tnode_in_group | |
217 | """ | |
218 | assert stderr == b"" | |
219 | assert rcode == 0 |
283 | 283 | make_repo( |
284 | 284 | tmpdir, |
285 | 285 | groups={ |
286 | "group1": {'members': ["node1", "node2"]}, | |
287 | "group2": {'members': ["node3"]}, | |
288 | }, | |
289 | nodes={ | |
290 | "node1": {}, | |
291 | "node2": {}, | |
292 | "node3": {}, | |
286 | "group1": {}, | |
287 | "group2": {}, | |
288 | }, | |
289 | nodes={ | |
290 | "node1": {'groups': {"group1"}}, | |
291 | "node2": {'groups': {"group1"}}, | |
292 | "node3": {'groups': {"group2"}}, | |
293 | 293 | }, |
294 | 294 | ) |
295 | 295 | |
302 | 302 | make_repo( |
303 | 303 | tmpdir, |
304 | 304 | groups={ |
305 | "group1": {'members': ["node1", "node2"]}, | |
306 | "group2": {'members': ["node3"]}, | |
307 | }, | |
308 | nodes={ | |
309 | "node1": {}, | |
310 | "node2": {}, | |
311 | "node3": {}, | |
305 | "group1": {}, | |
306 | "group2": {}, | |
307 | }, | |
308 | nodes={ | |
309 | "node1": {'groups': {"group1"}}, | |
310 | "node2": {'groups': {"group1"}}, | |
311 | "node3": {'groups': {"group2"}}, | |
312 | 312 | }, |
313 | 313 | ) |
314 | 314 | |
321 | 321 | make_repo( |
322 | 322 | tmpdir, |
323 | 323 | groups={ |
324 | "group1": {'members': ["node1", "node2"]}, | |
325 | "group2": {'members': ["node3"]}, | |
326 | }, | |
327 | nodes={ | |
328 | "node1": {}, | |
329 | "node2": {}, | |
330 | "node3": {}, | |
324 | "group1": {}, | |
325 | "group2": {}, | |
326 | }, | |
327 | nodes={ | |
328 | "node1": {'groups': {"group1"}}, | |
329 | "node2": {'groups': {"group1"}}, | |
330 | "node3": {'groups': {"group2"}}, | |
331 | 331 | }, |
332 | 332 | ) |
333 | 333 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from bundlewrap.utils.testing import make_repo, run |
4 | 1 | |
5 | 2 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | 0 | from re import search |
3 | 1 | |
4 | 2 | from bundlewrap.utils.testing import host_os, make_repo, run |
27 | 25 | }, |
28 | 26 | }, |
29 | 27 | ) |
30 | run("rm -f /tmp/bw_test_lock_add") | |
31 | stdout, stderr, rcode = run("BW_IDENTITY=jdoe bw lock add -c höhöhö -e 1m -i file:/tmp/bw_test_lock_add localhost", path=str(tmpdir)) | |
28 | run("sudo rm -f /tmp/bw_test_lock_add") | |
29 | stdout, stderr, rcode = run("BW_IDENTITY=jdoe bw lock add -c höhöhö -e 1m -i file:/tmp/bw_test_lock_add -- localhost", path=str(tmpdir)) | |
32 | 30 | assert rcode == 0 |
33 | 31 | lock_id = get_lock_id(stdout.decode('utf-8')) |
34 | 32 | assert len(lock_id) == 4 |
35 | stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) | |
33 | stdout, stderr, rcode = run("bw -d apply localhost", path=str(tmpdir)) | |
36 | 34 | assert rcode == 0 |
37 | 35 | stdout, stderr, rcode = run("cat /tmp/bw_test_lock_add", path=str(tmpdir)) |
38 | 36 | assert rcode != 0 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from json import loads |
4 | 1 | from os.path import join |
5 | 2 | |
45 | 42 | tmpdir, |
46 | 43 | nodes={ |
47 | 44 | "node1": { |
45 | 'groups': {"group1"}, | |
48 | 46 | 'metadata': { |
49 | 47 | "foo": { |
50 | 48 | "bar": "baz", |
54 | 52 | }, |
55 | 53 | groups={ |
56 | 54 | "group1": { |
57 | 'members': ["node1"], | |
58 | 55 | 'metadata': { |
59 | 56 | "ding": 5, |
60 | 57 | "foo": { |
77 | 74 | assert rcode == 0 |
78 | 75 | |
79 | 76 | |
80 | def test_template_node(tmpdir): | |
81 | make_repo( | |
82 | tmpdir, | |
83 | nodes={ | |
84 | "node1": { | |
85 | 'template_node': "node2", | |
86 | }, | |
87 | "node2": { | |
88 | 'metadata': { | |
89 | "foo": 2, | |
90 | }, | |
91 | }, | |
92 | }, | |
93 | groups={ | |
94 | "group1": { | |
95 | 'members': ["node1"], | |
96 | 'metadata': { | |
97 | "foo": 3, | |
98 | }, | |
99 | }, | |
100 | }, | |
101 | ) | |
102 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
103 | assert loads(stdout.decode()) == {"foo": 2} | |
104 | assert stderr == b"" | |
105 | assert rcode == 0 | |
106 | ||
107 | ||
108 | def test_template_node_override(tmpdir): | |
109 | make_repo( | |
110 | tmpdir, | |
111 | nodes={ | |
112 | "node1": { | |
113 | 'metadata': { | |
114 | "foo": 1, | |
115 | }, | |
116 | 'template_node': "node2", | |
117 | }, | |
118 | "node2": { | |
119 | 'metadata': { | |
120 | "foo": 2, | |
121 | }, | |
122 | }, | |
123 | }, | |
124 | groups={ | |
125 | "group1": { | |
126 | 'members': ["node1"], | |
127 | 'metadata': { | |
128 | "foo": 3, | |
129 | }, | |
130 | }, | |
131 | }, | |
132 | ) | |
133 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
134 | assert loads(stdout.decode()) == {"foo": 1} | |
135 | assert stderr == b"" | |
136 | assert rcode == 0 | |
137 | ||
138 | ||
139 | 77 | def test_metadatapy(tmpdir): |
140 | 78 | make_repo( |
141 | 79 | tmpdir, |
143 | 81 | nodes={ |
144 | 82 | "node1": { |
145 | 83 | 'bundles': ["test"], |
146 | 'metadata': {"foo": "bar"}, | |
147 | }, | |
148 | }, | |
149 | ) | |
150 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
151 | f.write( | |
152 | """@metadata_processor | |
153 | def foo(metadata): | |
154 | metadata["baz"] = node.name | |
155 | return metadata, DONE | |
156 | """) | |
157 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
158 | assert loads(stdout.decode()) == { | |
159 | "baz": "node1", | |
160 | "foo": "bar", | |
161 | } | |
162 | assert stderr == b"" | |
163 | assert rcode == 0 | |
164 | ||
165 | ||
166 | def test_metadatapy_defaults(tmpdir): | |
167 | make_repo( | |
168 | tmpdir, | |
169 | bundles={"test": {}}, | |
170 | nodes={ | |
171 | "node1": { | |
172 | 'bundles': ["test"], | |
173 | 'metadata': {"foo": "bar"}, | |
174 | }, | |
175 | }, | |
176 | ) | |
177 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
178 | f.write( | |
179 | """@metadata_processor | |
84 | 'metadata': { | |
85 | "foo": { | |
86 | "bar": "shizzle", | |
87 | }, | |
88 | }, | |
89 | }, | |
90 | }, | |
91 | ) | |
92 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
93 | f.write( | |
94 | """@metadata_reactor | |
180 | 95 | def foo(metadata): |
181 | 96 | return { |
182 | "foo": "baz", | |
183 | "baz": "foo", | |
184 | }, DONE, DEFAULTS | |
185 | """) | |
186 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
187 | assert loads(stdout.decode()) == { | |
188 | "baz": "foo", | |
97 | "baz": node.name, | |
98 | "frob": metadata.get("foo/bar", "shnozzle") + "ay", | |
99 | "gob": metadata.get("shlop", "mop"), | |
100 | } | |
101 | """) | |
102 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
103 | assert loads(stdout.decode()) == { | |
104 | "baz": "node1", | |
105 | "foo": { | |
106 | "bar": "shizzle", | |
107 | }, | |
108 | "frob": "shizzleay", | |
109 | "gob": "mop", | |
110 | } | |
111 | assert stderr == b"" | |
112 | assert rcode == 0 | |
113 | ||
114 | ||
115 | def test_metadatapy_defaults(tmpdir): | |
116 | make_repo( | |
117 | tmpdir, | |
118 | bundles={"test": {}}, | |
119 | nodes={ | |
120 | "node1": { | |
121 | 'bundles': ["test"], | |
122 | 'metadata': {"foo": "bar"}, | |
123 | }, | |
124 | }, | |
125 | ) | |
126 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
127 | f.write( | |
128 | """defaults = { | |
129 | "baz": node.name, | |
130 | "foo": "baz", | |
131 | } | |
132 | """) | |
133 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
134 | assert loads(stdout.decode()) == { | |
135 | "baz": "node1", | |
189 | 136 | "foo": "bar", |
190 | 137 | } |
191 | 138 | assert stderr == b"" |
211 | 158 | """) |
212 | 159 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: |
213 | 160 | f.write( |
214 | """@metadata_processor | |
161 | """defaults = { | |
162 | "foo": { | |
163 | "bar": "frob", | |
164 | "baz": "gobble", | |
165 | }, | |
166 | } | |
167 | """) | |
168 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
169 | assert loads(stdout.decode()) == { | |
170 | "foo": {"bar": "baz"}, | |
171 | } | |
172 | assert stderr == b"" | |
173 | assert rcode == 0 | |
174 | ||
175 | ||
176 | def test_metadatapy_update(tmpdir): | |
177 | make_repo( | |
178 | tmpdir, | |
179 | bundles={"test": {}}, | |
180 | nodes={ | |
181 | "node1": { | |
182 | 'bundles': ["test"], | |
183 | 'metadata': {"foo": "bar"}, | |
184 | }, | |
185 | }, | |
186 | ) | |
187 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
188 | f.write( | |
189 | """@metadata_reactor | |
215 | 190 | def foo(metadata): |
216 | 191 | return { |
217 | "foo": { | |
218 | "bar": "frob", | |
219 | "baz": "gobble", | |
220 | }, | |
221 | }, DONE, DEFAULTS | |
222 | """) | |
223 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
224 | assert loads(stdout.decode()) == { | |
225 | "foo": {"bar": "baz"}, | |
226 | } | |
227 | assert stderr == b"" | |
228 | assert rcode == 0 | |
229 | ||
230 | ||
231 | def test_metadatapy_update(tmpdir): | |
232 | make_repo( | |
233 | tmpdir, | |
234 | bundles={"test": {}}, | |
235 | nodes={ | |
236 | "node1": { | |
237 | 'bundles': ["test"], | |
238 | 'metadata': {"foo": "bar"}, | |
239 | }, | |
240 | }, | |
241 | ) | |
242 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
243 | f.write( | |
244 | """@metadata_processor | |
245 | def foo(metadata): | |
246 | return { | |
247 | "foo": "baz", | |
248 | "baz": "foo", | |
249 | }, DONE, OVERWRITE | |
250 | """) | |
251 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
252 | assert loads(stdout.decode()) == { | |
253 | 192 | "baz": "foo", |
254 | 193 | "foo": "baz", |
255 | 194 | } |
256 | assert stderr == b"" | |
257 | assert rcode == 0 | |
258 | ||
259 | ||
260 | def test_metadatapy_invalid_number_of_elements(tmpdir): | |
261 | make_repo( | |
262 | tmpdir, | |
263 | bundles={"test": {}}, | |
264 | nodes={ | |
265 | "node1": { | |
266 | 'bundles': ["test"], | |
267 | 'metadata': {"foo": "bar"}, | |
268 | }, | |
269 | }, | |
270 | ) | |
271 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
272 | f.write( | |
273 | """@metadata_processor | |
274 | def foo(metadata): | |
275 | return metadata | |
276 | """) | |
277 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
278 | assert rcode != 0 | |
279 | ||
280 | ||
281 | def test_metadatapy_invalid_first_element_not_dict(tmpdir): | |
282 | make_repo( | |
283 | tmpdir, | |
284 | bundles={"test": {}}, | |
285 | nodes={ | |
286 | "node1": { | |
287 | 'bundles': ["test"], | |
288 | 'metadata': {"foo": "bar"}, | |
289 | }, | |
290 | }, | |
291 | ) | |
292 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
293 | f.write( | |
294 | """@metadata_processor | |
295 | def foo(metadata): | |
296 | return DONE, metadata | |
297 | """) | |
298 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
299 | assert rcode != 0 | |
300 | ||
301 | ||
302 | def test_metadatapy_invalid_defaults_plus_original_dict(tmpdir): | |
303 | make_repo( | |
304 | tmpdir, | |
305 | bundles={"test": {}}, | |
306 | nodes={ | |
307 | "node1": { | |
308 | 'bundles': ["test"], | |
309 | 'metadata': {"foo": "bar"}, | |
310 | }, | |
311 | }, | |
312 | ) | |
313 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
314 | f.write( | |
315 | """@metadata_processor | |
316 | def foo(metadata): | |
317 | return metadata, DONE, DEFAULTS | |
318 | """) | |
319 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
320 | assert rcode != 0 | |
321 | ||
322 | ||
323 | def test_metadatapy_invalid_overwrite_plus_original_dict(tmpdir): | |
324 | make_repo( | |
325 | tmpdir, | |
326 | bundles={"test": {}}, | |
327 | nodes={ | |
328 | "node1": { | |
329 | 'bundles': ["test"], | |
330 | 'metadata': {"foo": "bar"}, | |
331 | }, | |
332 | }, | |
333 | ) | |
334 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
335 | f.write( | |
336 | """@metadata_processor | |
337 | def foo(metadata): | |
338 | return metadata, DONE, OVERWRITE | |
339 | """) | |
340 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
341 | assert rcode != 0 | |
342 | ||
343 | ||
344 | def test_metadatapy_invalid_option(tmpdir): | |
345 | make_repo( | |
346 | tmpdir, | |
347 | bundles={"test": {}}, | |
348 | nodes={ | |
349 | "node1": { | |
350 | 'bundles': ["test"], | |
351 | 'metadata': {"foo": "bar"}, | |
352 | }, | |
353 | }, | |
354 | ) | |
355 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
356 | f.write( | |
357 | """@metadata_processor | |
358 | def foo(metadata): | |
359 | return metadata, DONE, 1000 | |
360 | """) | |
361 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
362 | assert rcode != 0 | |
363 | ||
364 | ||
365 | def test_metadatapy_invalid_done_and_again(tmpdir): | |
366 | make_repo( | |
367 | tmpdir, | |
368 | bundles={"test": {}}, | |
369 | nodes={ | |
370 | "node1": { | |
371 | 'bundles': ["test"], | |
372 | 'metadata': {"foo": "bar"}, | |
373 | }, | |
374 | }, | |
375 | ) | |
376 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
377 | f.write( | |
378 | """@metadata_processor | |
379 | def foo(metadata): | |
380 | return metadata, DONE, RUN_ME_AGAIN | |
381 | """) | |
382 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
383 | assert rcode != 0 | |
384 | ||
385 | ||
386 | def test_metadatapy_invalid_no_done_or_again(tmpdir): | |
387 | make_repo( | |
388 | tmpdir, | |
389 | bundles={"test": {}}, | |
390 | nodes={ | |
391 | "node1": { | |
392 | 'bundles': ["test"], | |
393 | 'metadata': {"foo": "bar"}, | |
394 | }, | |
395 | }, | |
396 | ) | |
397 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
398 | f.write( | |
399 | """@metadata_processor | |
400 | def foo(metadata): | |
401 | return {}, DEFAULTS | |
402 | """) | |
403 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
404 | assert rcode != 0 | |
405 | ||
406 | ||
407 | def test_metadatapy_invalid_defaults_and_overwrite(tmpdir): | |
408 | make_repo( | |
409 | tmpdir, | |
410 | bundles={"test": {}}, | |
411 | nodes={ | |
412 | "node1": { | |
413 | 'bundles': ["test"], | |
414 | 'metadata': {"foo": "bar"}, | |
415 | }, | |
416 | }, | |
417 | ) | |
418 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
419 | f.write( | |
420 | """@metadata_processor | |
421 | def foo(metadata): | |
422 | return {}, DEFAULTS, OVERWRITE, DONE | |
423 | """) | |
424 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
425 | assert rcode != 0 | |
195 | """) | |
196 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
197 | assert loads(stdout.decode()) == { | |
198 | "baz": "foo", | |
199 | "foo": "bar", | |
200 | } | |
201 | assert stderr == b"" | |
202 | assert rcode == 0 | |
426 | 203 | |
427 | 204 | |
428 | 205 | def test_table(tmpdir): |
450 | 227 | }, |
451 | 228 | }, |
452 | 229 | }, |
230 | groups={"all": {'member_patterns': {r".*"}}}, | |
231 | ) | |
232 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw metadata all -k foo_dict/bar foo_list foo_int foo_umlaut", path=str(tmpdir)) | |
233 | assert stdout.decode('utf-8') == """node\tfoo_dict/bar\tfoo_int\tfoo_list\tfoo_umlaut | |
234 | node1\tbaz\t47\tbar, 1\tföö | |
235 | node2\t<missing>\t-3\t\tfüü | |
236 | """ | |
237 | assert stderr == b"" | |
238 | assert rcode == 0 | |
239 | ||
240 | ||
241 | def test_metadatapy_merge_order(tmpdir): | |
242 | make_repo( | |
243 | tmpdir, | |
244 | bundles={"test": {}}, | |
245 | nodes={ | |
246 | "node1": { | |
247 | 'bundles': ["test"], | |
248 | 'groups': {"group1"}, | |
249 | 'metadata': { | |
250 | "four": "node", | |
251 | }, | |
252 | }, | |
253 | }, | |
453 | 254 | groups={ |
454 | "all": { | |
455 | 'members': ["node1", "node2"], | |
456 | }, | |
457 | }, | |
458 | ) | |
459 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw metadata --table all foo_dict bar, foo_list, foo_int, foo_umlaut", path=str(tmpdir)) | |
460 | assert stdout.decode('utf-8') == """node\tfoo_dict bar\tfoo_list\tfoo_int\tfoo_umlaut | |
461 | node1\tbaz\tbar, 1\t47\tföö | |
462 | node2\t<missing>\t\t-3\tfüü | |
463 | """ | |
464 | assert stderr == b"" | |
465 | assert rcode == 0 | |
466 | ||
467 | ||
468 | def test_table_no_key(tmpdir): | |
469 | make_repo( | |
470 | tmpdir, | |
471 | nodes={ | |
472 | "node1": {}, | |
473 | }, | |
474 | ) | |
475 | stdout, stderr, rcode = run("bw metadata --table node1", path=str(tmpdir)) | |
476 | assert rcode == 1 | |
477 | ||
478 | ||
479 | def test_metadatapy_proc_merge_order(tmpdir): | |
480 | make_repo( | |
481 | tmpdir, | |
482 | bundles={"test": {}}, | |
483 | nodes={ | |
484 | "node1": { | |
485 | 'bundles': ["test"], | |
486 | 'metadata': { | |
487 | "one": "node", | |
488 | "two": "node", | |
489 | "five": "node", | |
255 | "group1": { | |
256 | 'metadata': { | |
257 | "three": "group", | |
258 | "four": "group", | |
490 | 259 | }, |
491 | 260 | }, |
492 | 261 | }, |
494 | 263 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: |
495 | 264 | f.write( |
496 | 265 | """defaults = { |
266 | "one": "defaults", | |
497 | 267 | "two": "defaults", |
498 | 268 | "three": "defaults", |
499 | 269 | "four": "defaults", |
502 | 272 | @metadata_reactor |
503 | 273 | def foo_reactor(metadata): |
504 | 274 | return { |
275 | "two": "reactor", | |
276 | "three": "reactor", | |
505 | 277 | "four": "reactor", |
506 | "five": "reactor", | |
507 | } | |
508 | """) | |
509 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
510 | assert loads(stdout.decode()) == { | |
511 | "one": "node", | |
512 | "two": "node", | |
513 | "three": "defaults", | |
514 | "four": "reactor", | |
515 | "five": "reactor", | |
516 | } | |
517 | assert stderr == b"" | |
518 | assert rcode == 0 | |
519 | ||
520 | ||
521 | def test_metadatapy_do_not_run_me_again(tmpdir): | |
522 | make_repo( | |
523 | tmpdir, | |
524 | bundles={"test": {}}, | |
525 | nodes={ | |
526 | "node1": { | |
527 | 'bundles': ["test"], | |
528 | }, | |
529 | }, | |
530 | ) | |
531 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
532 | f.write( | |
533 | """called = False | |
534 | @metadata_reactor | |
278 | } | |
279 | """) | |
280 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
281 | assert loads(stdout.decode()) == { | |
282 | "one": "defaults", | |
283 | "two": "reactor", | |
284 | "three": "group", | |
285 | "four": "node", | |
286 | } | |
287 | assert stderr == b"" | |
288 | assert rcode == 0 | |
289 | ||
290 | ||
291 | def test_metadatapy_static_reorder(tmpdir): | |
292 | make_repo( | |
293 | tmpdir, | |
294 | bundles={"test": {}}, | |
295 | nodes={ | |
296 | "node1": { | |
297 | 'bundles': ["test"], | |
298 | 'metadata': { | |
299 | "foo": "bar", | |
300 | "frob": "flup", | |
301 | }, | |
302 | }, | |
303 | }, | |
304 | ) | |
305 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
306 | f.write( | |
307 | """@metadata_reactor | |
535 | 308 | def foo_reactor(metadata): |
536 | global called | |
537 | if not called: | |
538 | called = True | |
539 | raise DoNotRunAgain | |
540 | else: | |
541 | raise AssertionError | |
542 | @metadata_reactor | |
543 | def bar_reactor(metadata): | |
544 | return {'called': called} | |
545 | """) | |
546 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
547 | assert loads(stdout.decode()) == { | |
548 | "called": True, | |
309 | return { | |
310 | "foo": "overwritten", | |
311 | "baz": metadata.get("frob"), | |
312 | } | |
313 | """) | |
314 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
315 | assert loads(stdout.decode()) == { | |
316 | "foo": "bar", | |
317 | "frob": "flup", | |
318 | "baz": "flup", | |
549 | 319 | } |
550 | 320 | assert stderr == b"" |
551 | 321 | assert rcode == 0 |
620 | 390 | return {'foo_ran': True} |
621 | 391 | else: |
622 | 392 | return {'foo': metadata.get('bar'), 'foo_ran': True} |
393 | ||
394 | ||
623 | 395 | @metadata_reactor |
624 | 396 | def bar(metadata): |
625 | 397 | foo_ran = metadata.get('foo_ran', False) |
19 | 19 | def test_hostname(tmpdir): |
20 | 20 | make_repo( |
21 | 21 | tmpdir, |
22 | groups={"all": {'members': ["node1"]}}, | |
22 | groups={"all": {'member_patterns': {r".*"}}}, | |
23 | 23 | nodes={"node1": {'hostname': "node1.example.com"}}, |
24 | 24 | ) |
25 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all hostname | cut -f 2", path=str(tmpdir)) | |
25 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all -a hostname | cut -f 2", path=str(tmpdir)) | |
26 | 26 | assert stdout == b"node1.example.com\n" |
27 | 27 | assert stderr == b"" |
28 | 28 | assert rcode == 0 |
35 | 35 | "bundle1": {}, |
36 | 36 | "bundle2": {}, |
37 | 37 | }, |
38 | groups={"all": {'members': ["node1", "node2"]}}, | |
38 | groups={"all": {'member_patterns': {r".*"}}}, | |
39 | 39 | nodes={ |
40 | 40 | "node1": {'bundles': ["bundle1", "bundle2"]}, |
41 | 41 | "node2": {'bundles': ["bundle2"]}, |
42 | 42 | }, |
43 | 43 | ) |
44 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all bundles | grep node1 | cut -f 2", path=str(tmpdir)) | |
44 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all -a bundles | grep node1 | cut -f 2", path=str(tmpdir)) | |
45 | 45 | assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2"] |
46 | 46 | assert stderr == b"" |
47 | 47 | assert rcode == 0 |
71 | 71 | }, |
72 | 72 | }, |
73 | 73 | ) |
74 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 bundles | cut -f 2", path=str(tmpdir)) | |
74 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 -a bundles | cut -f 2", path=str(tmpdir)) | |
75 | 75 | assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2", "bundle3"] |
76 | 76 | assert stderr == b"" |
77 | 77 | assert rcode == 0 |
78 | ||
79 | ||
80 | def test_template_node(tmpdir): | |
81 | make_repo( | |
82 | tmpdir, | |
83 | nodes={ | |
84 | "node1": {'template_node': "node2"}, | |
85 | "node2": {'dummy': True}, | |
86 | }, | |
87 | ) | |
88 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 dummy | grep node1 | cut -f 2", path=str(tmpdir)) | |
89 | assert stdout.decode().strip() == "True" | |
90 | assert stderr == b"" | |
91 | assert rcode == 0 | |
92 | ||
93 | ||
94 | def test_template_node_cascade(tmpdir): | |
95 | make_repo( | |
96 | tmpdir, | |
97 | nodes={ | |
98 | "node1": {'template_node': "node2"}, | |
99 | "node2": {'template_node': "node1"}, | |
100 | }, | |
101 | ) | |
102 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 dummy", path=str(tmpdir)) | |
103 | assert rcode == 1 |
0 | from os.path import join | |
1 | ||
2 | 0 | from bundlewrap.utils.testing import make_repo, run |
3 | 1 | |
4 | 2 | |
6 | 4 | make_repo( |
7 | 5 | tmpdir, |
8 | 6 | nodes={ |
9 | "node-foo": {}, | |
7 | "node-foo": {'groups': {"group-foo"}}, | |
10 | 8 | "node-bar": {}, |
11 | 9 | "node-baz": {}, |
12 | "node-pop": {}, | |
10 | "node-pop": {'groups': {"group-baz"}}, | |
11 | }, | |
12 | groups={ | |
13 | "group-foo": { | |
14 | 'member_patterns': [r".*-bar"], | |
15 | }, | |
16 | "group-bar": { | |
17 | 'subgroups': ["group-foo"], | |
18 | }, | |
19 | "group-baz": {}, | |
20 | "group-frob": { | |
21 | 'members': {"node-pop"}, | |
22 | }, | |
23 | "group-pop": { | |
24 | 'subgroup_patterns': [r"ba"], | |
25 | }, | |
13 | 26 | }, |
14 | 27 | ) |
15 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
16 | f.write(""" | |
17 | groups = { | |
18 | "group-foo": { | |
19 | 'members': ["node-foo"], | |
20 | 'member_patterns': [r".*-bar"], | |
21 | }, | |
22 | "group-bar": { | |
23 | 'subgroups': ["group-foo"], | |
24 | }, | |
25 | "group-baz": { | |
26 | 'members': ["node-pop"], | |
27 | 'members_add': lambda node: node.name == "node-pop", | |
28 | }, | |
29 | "group-pop": { | |
30 | 'subgroup_patterns': [r"ba"], | |
31 | }, | |
32 | } | |
33 | """) | |
34 | 28 | stdout, stderr, rcode = run("bw plot groups-for-node node-foo", path=str(tmpdir)) |
35 | 29 | assert stdout == b"""digraph bundlewrap |
36 | 30 | { |
43 | 37 | "node-foo" [fontcolor="#303030",shape=box,style=rounded]; |
44 | 38 | "group-bar" -> "group-foo" [color="#6BB753",penwidth=2] |
45 | 39 | "group-pop" -> "group-bar" [color="#6BB753",penwidth=2] |
46 | "group-foo" -> "node-foo" [color="#D18C57",penwidth=2] | |
47 | 40 | } |
48 | 41 | """ |
49 | 42 | assert stderr == b"" |
56 | 49 | node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] |
57 | 50 | edge [arrowhead=vee] |
58 | 51 | "group-baz" [fontcolor=white,style=filled]; |
52 | "group-frob" [fontcolor=white,style=filled]; | |
59 | 53 | "group-pop" [fontcolor=white,style=filled]; |
60 | 54 | "node-pop" [fontcolor="#303030",shape=box,style=rounded]; |
55 | "group-frob" -> "node-pop" [color="#D18C57",penwidth=2] | |
61 | 56 | "group-pop" -> "group-baz" [color="#6BB753",penwidth=2] |
62 | "group-baz" -> "node-pop" [color="#D18C57",penwidth=2] | |
63 | 57 | } |
64 | 58 | """ |
65 | 59 | assert stderr == b"" |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from bundlewrap.utils.testing import make_repo, run |
4 | 1 | |
5 | 2 | |
26 | 23 | ) |
27 | 24 | |
28 | 25 | stdout, stderr, rcode = run("bw stats", path=str(tmpdir)) |
29 | assert stdout == """╭───────┬─────────────────────╮ | |
30 | │ count │ type │ | |
31 | ├───────┼─────────────────────┤ | |
32 | │ 1 │ nodes │ | |
33 | │ 0 │ groups │ | |
34 | │ 1 │ bundles │ | |
35 | │ 0 │ metadata defaults │ | |
36 | │ 0 │ metadata processors │ | |
37 | │ 0 │ metadata reactors │ | |
38 | │ 2 │ items │ | |
39 | ├───────┼─────────────────────┤ | |
40 | │ 2 │ file │ | |
41 | ╰───────┴─────────────────────╯ | |
26 | assert stdout == """╭───────┬───────────────────╮ | |
27 | │ count │ type │ | |
28 | ├───────┼───────────────────┤ | |
29 | │ 1 │ nodes │ | |
30 | │ 0 │ groups │ | |
31 | │ 1 │ bundles │ | |
32 | │ 0 │ metadata defaults │ | |
33 | │ 0 │ metadata reactors │ | |
34 | │ 2 │ items │ | |
35 | ├───────┼───────────────────┤ | |
36 | │ 2 │ file │ | |
37 | ╰───────┴───────────────────╯ | |
42 | 38 | """.encode('utf-8') |
114 | 114 | assert run("bw test -I", path=str(tmpdir))[2] == 1 |
115 | 115 | |
116 | 116 | |
117 | def test_unknown_tag(tmpdir): | |
118 | make_repo( | |
119 | tmpdir, | |
120 | nodes={ | |
121 | "node1": { | |
122 | 'bundles': ["bundle1"], | |
123 | }, | |
124 | }, | |
125 | bundles={ | |
126 | "bundle1": { | |
127 | "files": { | |
128 | "/foo": { | |
129 | 'content': "none", | |
130 | 'needs': { | |
131 | "tag:bar", | |
132 | }, | |
133 | }, | |
134 | }, | |
135 | }, | |
136 | }, | |
137 | ) | |
138 | assert run("bw test -I", path=str(tmpdir))[2] == 0 | |
139 | ||
140 | ||
117 | 141 | def test_circular_trigger_self(tmpdir): |
118 | 142 | make_repo( |
119 | 143 | tmpdir, |
199 | 223 | def test_group_metadata_collision(tmpdir): |
200 | 224 | make_repo( |
201 | 225 | tmpdir, |
202 | nodes={"node1": {}}, | |
226 | nodes={ | |
227 | "node1": { | |
228 | 'groups': { | |
229 | "group1", | |
230 | "group3", | |
231 | }, | |
232 | }, | |
233 | }, | |
203 | 234 | groups={ |
204 | 235 | "group1": { |
205 | 'members': ["node1"], | |
206 | 236 | 'metadata': { |
207 | 237 | 'foo': { |
208 | 238 | 'baz': 1, |
219 | 249 | }, |
220 | 250 | 'subgroups': ["group3"], |
221 | 251 | }, |
222 | "group3": { | |
223 | 'members': ["node1"], | |
224 | }, | |
252 | "group3": {}, | |
225 | 253 | }, |
226 | 254 | ) |
227 | 255 | assert run("bw test -M", path=str(tmpdir))[2] == 1 |
230 | 258 | def test_group_metadata_collision_subgroups(tmpdir): |
231 | 259 | make_repo( |
232 | 260 | tmpdir, |
233 | nodes={"node1": {}}, | |
261 | nodes={ | |
262 | "node1": { | |
263 | 'groups': { | |
264 | "group1", | |
265 | "group3", | |
266 | }, | |
267 | }, | |
268 | }, | |
234 | 269 | groups={ |
235 | 270 | "group1": { |
236 | 'members': ["node1"], | |
237 | 271 | 'metadata': { |
238 | 272 | 'foo': { |
239 | 273 | 'baz': 1, |
250 | 284 | }, |
251 | 285 | 'subgroups': ["group1", "group3"], |
252 | 286 | }, |
253 | "group3": { | |
254 | 'members': ["node1"], | |
255 | }, | |
287 | "group3": {}, | |
256 | 288 | }, |
257 | 289 | ) |
258 | 290 | assert run("bw test -M", path=str(tmpdir))[2] == 0 |
261 | 293 | def test_group_metadata_collision_list(tmpdir): |
262 | 294 | make_repo( |
263 | 295 | tmpdir, |
264 | nodes={"node1": {}}, | |
296 | nodes={ | |
297 | "node1": { | |
298 | 'groups': { | |
299 | "group1", | |
300 | "group2", | |
301 | }, | |
302 | }, | |
303 | }, | |
265 | 304 | groups={ |
266 | 305 | "group1": { |
267 | 'members': ["node1"], | |
268 | 306 | 'metadata': { |
269 | 307 | 'foo': [1], |
270 | 308 | }, |
271 | 309 | }, |
272 | 310 | "group2": { |
273 | 'members': ["node1"], | |
274 | 311 | 'metadata': { |
275 | 312 | 'foo': [2], |
276 | 313 | }, |
283 | 320 | def test_group_metadata_collision_dict(tmpdir): |
284 | 321 | make_repo( |
285 | 322 | tmpdir, |
286 | nodes={"node1": {}}, | |
323 | nodes={ | |
324 | "node1": { | |
325 | 'groups': { | |
326 | "group1", | |
327 | "group2", | |
328 | }, | |
329 | }, | |
330 | }, | |
287 | 331 | groups={ |
288 | 332 | "group1": { |
289 | 'members': ["node1"], | |
290 | 333 | 'metadata': { |
291 | 334 | 'foo': {'bar': 1}, |
292 | 335 | }, |
293 | 336 | }, |
294 | 337 | "group2": { |
295 | 'members': ["node1"], | |
296 | 338 | 'metadata': { |
297 | 339 | 'foo': 2, |
298 | 340 | }, |
305 | 347 | def test_group_metadata_collision_dict_ok(tmpdir): |
306 | 348 | make_repo( |
307 | 349 | tmpdir, |
308 | nodes={"node1": {}}, | |
350 | nodes={ | |
351 | "node1": { | |
352 | 'groups': { | |
353 | "group1", | |
354 | "group2", | |
355 | }, | |
356 | }, | |
357 | }, | |
309 | 358 | groups={ |
310 | 359 | "group1": { |
311 | 'members': ["node1"], | |
312 | 360 | 'metadata': { |
313 | 361 | 'foo': {'bar': 1}, |
314 | 362 | }, |
315 | 363 | }, |
316 | 364 | "group2": { |
317 | 'members': ["node1"], | |
318 | 365 | 'metadata': { |
319 | 366 | 'foo': {'baz': 2}, |
320 | 367 | }, |
327 | 374 | def test_group_metadata_collision_set(tmpdir): |
328 | 375 | make_repo( |
329 | 376 | tmpdir, |
330 | nodes={"node1": {}}, | |
377 | nodes={ | |
378 | "node1": { | |
379 | 'groups': { | |
380 | "group1", | |
381 | "group2", | |
382 | }, | |
383 | }, | |
384 | }, | |
331 | 385 | groups={ |
332 | 386 | "group1": { |
333 | 'members': ["node1"], | |
334 | 387 | 'metadata': { |
335 | 388 | 'foo': set([1]), |
336 | 389 | }, |
337 | 390 | }, |
338 | 391 | "group2": { |
339 | 'members': ["node1"], | |
340 | 392 | 'metadata': { |
341 | 393 | 'foo': 2, |
342 | 394 | }, |
349 | 401 | def test_group_metadata_collision_set_ok(tmpdir): |
350 | 402 | make_repo( |
351 | 403 | tmpdir, |
352 | nodes={"node1": {}}, | |
404 | nodes={ | |
405 | "node1": { | |
406 | 'groups': { | |
407 | "group1", | |
408 | "group2", | |
409 | }, | |
410 | }, | |
411 | }, | |
353 | 412 | groups={ |
354 | 413 | "group1": { |
355 | 'members': ["node1"], | |
356 | 414 | 'metadata': { |
357 | 415 | 'foo': set([1]), |
358 | 416 | }, |
359 | 417 | }, |
360 | 418 | "group2": { |
361 | 'members': ["node1"], | |
362 | 419 | 'metadata': { |
363 | 420 | 'foo': set([2]), |
364 | 421 | }, |
366 | 423 | }, |
367 | 424 | ) |
368 | 425 | assert run("bw test -M", path=str(tmpdir))[2] == 0 |
426 | ||
427 | ||
428 | def test_defaults_metadata_collision(tmpdir): | |
429 | make_repo( | |
430 | tmpdir, | |
431 | nodes={ | |
432 | "node1": { | |
433 | 'bundles': {"bundle1", "bundle2"}, | |
434 | }, | |
435 | }, | |
436 | bundles={ | |
437 | "bundle1": {}, | |
438 | "bundle2": {}, | |
439 | }, | |
440 | ) | |
441 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: | |
442 | f.write( | |
443 | """defaults = { | |
444 | "foo": "bar", | |
445 | } | |
446 | """) | |
447 | with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: | |
448 | f.write( | |
449 | """defaults = { | |
450 | "foo": "baz", | |
451 | } | |
452 | """) | |
453 | stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) | |
454 | assert rcode == 1 | |
455 | assert b"foo" in stderr | |
456 | ||
457 | ||
458 | def test_defaults_metadata_collision_nested(tmpdir): | |
459 | make_repo( | |
460 | tmpdir, | |
461 | nodes={ | |
462 | "node1": { | |
463 | 'bundles': {"bundle1", "bundle2"}, | |
464 | }, | |
465 | }, | |
466 | bundles={ | |
467 | "bundle1": {}, | |
468 | "bundle2": {}, | |
469 | }, | |
470 | ) | |
471 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: | |
472 | f.write( | |
473 | """defaults = { | |
474 | "foo": {"bar": "baz"}, | |
475 | } | |
476 | """) | |
477 | with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: | |
478 | f.write( | |
479 | """defaults = { | |
480 | "foo": {"bar": "frob"}, | |
481 | } | |
482 | """) | |
483 | stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) | |
484 | assert rcode == 1 | |
485 | assert b"foo/bar" in stderr | |
486 | ||
487 | ||
488 | def test_defaults_metadata_collision_ok(tmpdir): | |
489 | make_repo( | |
490 | tmpdir, | |
491 | nodes={ | |
492 | "node1": { | |
493 | 'bundles': {"bundle1", "bundle2"}, | |
494 | }, | |
495 | }, | |
496 | bundles={ | |
497 | "bundle1": {}, | |
498 | "bundle2": {}, | |
499 | }, | |
500 | ) | |
501 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: | |
502 | f.write( | |
503 | """defaults = { | |
504 | "foo": {"bar"}, | |
505 | } | |
506 | """) | |
507 | with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: | |
508 | f.write( | |
509 | """defaults = { | |
510 | "foo": {"baz"}, | |
511 | } | |
512 | """) | |
513 | assert run("bw test -M", path=str(tmpdir))[2] == 0 | |
514 | ||
515 | ||
516 | def test_reactor_metadata_collision(tmpdir): | |
517 | make_repo( | |
518 | tmpdir, | |
519 | nodes={ | |
520 | "node1": { | |
521 | 'bundles': {"bundle1", "bundle2"}, | |
522 | }, | |
523 | }, | |
524 | bundles={ | |
525 | "bundle1": {}, | |
526 | "bundle2": {}, | |
527 | }, | |
528 | ) | |
529 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: | |
530 | f.write( | |
531 | """@metadata_reactor | |
532 | def foo(metadata): | |
533 | return {"foo": 1} | |
534 | """) | |
535 | with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: | |
536 | f.write( | |
537 | """@metadata_reactor | |
538 | def foo(metadata): | |
539 | return {"foo": 2} | |
540 | """) | |
541 | stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) | |
542 | assert rcode == 1 | |
543 | assert b"foo" in stderr | |
544 | ||
545 | ||
546 | def test_reactor_metadata_collision_nested(tmpdir): | |
547 | make_repo( | |
548 | tmpdir, | |
549 | nodes={ | |
550 | "node1": { | |
551 | 'bundles': {"bundle1", "bundle2"}, | |
552 | }, | |
553 | }, | |
554 | bundles={ | |
555 | "bundle1": {}, | |
556 | "bundle2": {}, | |
557 | }, | |
558 | ) | |
559 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: | |
560 | f.write( | |
561 | """@metadata_reactor | |
562 | def foo(metadata): | |
563 | return {"foo": {"bar": "1"}} | |
564 | """) | |
565 | with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: | |
566 | f.write( | |
567 | """@metadata_reactor | |
568 | def foo(metadata): | |
569 | return {"foo": {"bar": "2"}} | |
570 | """) | |
571 | stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) | |
572 | assert rcode == 1 | |
573 | assert b"foo/bar" in stderr | |
574 | ||
575 | ||
576 | def test_reactor_metadata_collision_nested_mixed(tmpdir): | |
577 | make_repo( | |
578 | tmpdir, | |
579 | nodes={ | |
580 | "node1": { | |
581 | 'bundles': {"bundle1", "bundle2"}, | |
582 | }, | |
583 | }, | |
584 | bundles={ | |
585 | "bundle1": {}, | |
586 | "bundle2": {}, | |
587 | }, | |
588 | ) | |
589 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: | |
590 | f.write( | |
591 | """@metadata_reactor | |
592 | def foo(metadata): | |
593 | return {"foo": {"bar": {True}}} | |
594 | """) | |
595 | with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f: | |
596 | f.write( | |
597 | """@metadata_reactor | |
598 | def foo(metadata): | |
599 | return {"foo": {"bar": [False]}} | |
600 | """) | |
601 | stdout, stderr, rcode = run("bw test -M", path=str(tmpdir)) | |
602 | assert rcode == 1 | |
603 | assert b"foo/bar" in stderr | |
369 | 604 | |
370 | 605 | |
371 | 606 | def test_fault_missing(tmpdir): |
391 | 626 | assert run("bw test -iI", path=str(tmpdir))[2] == 0 |
392 | 627 | |
393 | 628 | |
629 | def test_fault_missing_content(tmpdir): | |
630 | make_repo( | |
631 | tmpdir, | |
632 | nodes={ | |
633 | "node1": { | |
634 | 'bundles': ["bundle1"], | |
635 | }, | |
636 | }, | |
637 | bundles={ | |
638 | "bundle1": {} | |
639 | }, | |
640 | ) | |
641 | with open(join(str(tmpdir), "bundles", "bundle1", "items.py"), 'w') as f: | |
642 | f.write(""" | |
643 | files = { | |
644 | "/foo": { | |
645 | 'content': repo.vault.decrypt("bzzt", key="unavailable"), | |
646 | }, | |
647 | } | |
648 | """) | |
649 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
650 | assert run("bw test -iI", path=str(tmpdir))[2] == 0 | |
651 | ||
652 | ||
394 | 653 | def test_metadata_determinism_ok(tmpdir): |
395 | 654 | make_repo( |
396 | 655 | tmpdir, |
404 | 663 | }, |
405 | 664 | ) |
406 | 665 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: |
407 | f.write("""@metadata_processor | |
666 | f.write("""@metadata_reactor | |
408 | 667 | def test(metadata): |
409 | metadata['test'] = 1 | |
410 | return metadata, DONE | |
668 | return {'test': 1} | |
411 | 669 | """) |
412 | 670 | assert run("bw test -m 3", path=str(tmpdir))[2] == 0 |
413 | 671 | |
426 | 684 | ) |
427 | 685 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: |
428 | 686 | f.write("""from random import randint |
429 | ||
430 | @metadata_processor | |
687 | n = randint(1, 99999) | |
688 | ||
689 | @metadata_reactor | |
431 | 690 | def test(metadata): |
432 | metadata.setdefault('test', randint(1, 99999)) | |
433 | return metadata, DONE | |
691 | return {'test': n} | |
434 | 692 | """) |
435 | 693 | assert run("bw test -m 3", path=str(tmpdir))[2] == 1 |
436 | 694 | |
483 | 741 | make_repo( |
484 | 742 | tmpdir, |
485 | 743 | nodes={ |
486 | "node1": {}, | |
744 | "node1": {'groups': {"group2"}}, | |
487 | 745 | }, |
488 | 746 | groups={ |
489 | 747 | "group1": {'subgroups': ["missing-group"]}, |
490 | "group2": {'members': ["node1"]}, | |
748 | "group2": {}, | |
491 | 749 | }, |
492 | 750 | ) |
493 | 751 | assert run("bw test", path=str(tmpdir))[2] == 1 |
499 | 757 | make_repo( |
500 | 758 | tmpdir, |
501 | 759 | nodes={ |
502 | "node1": {}, | |
760 | "node1": {'groups': {"group2"}}, | |
503 | 761 | }, |
504 | 762 | groups={ |
505 | 763 | "group1": {}, |
506 | "group2": {'members': ["node1"]}, | |
764 | "group2": {}, | |
507 | 765 | }, |
508 | 766 | ) |
509 | 767 | assert run("bw test", path=str(tmpdir))[2] == 0 |
583 | 841 | }, |
584 | 842 | ) |
585 | 843 | assert run("bw test -I", path=str(tmpdir))[2] == 1 |
586 | ||
587 | ||
588 | def test_secret_identifier_only_once(tmpdir): | |
589 | make_repo( | |
590 | tmpdir, | |
591 | nodes={ | |
592 | "node1": { | |
593 | 'bundles': ["bundle1"], | |
594 | }, | |
595 | }, | |
596 | bundles={ | |
597 | "bundle1": { | |
598 | 'files': { | |
599 | "/test": { | |
600 | 'content': "${repo.vault.password_for('testing')}", | |
601 | 'content_type': 'mako', | |
602 | }, | |
603 | }, | |
604 | }, | |
605 | }, | |
606 | ) | |
607 | assert run("bw test -s ''", path=str(tmpdir))[2] == 1 | |
608 | assert run("bw test -s 'test'", path=str(tmpdir))[2] == 0 | |
609 | assert run("bw test -s 'test,foo'", path=str(tmpdir))[2] == 0 | |
610 | ||
611 | ||
612 | def test_secret_identifier_twice(tmpdir): | |
613 | make_repo( | |
614 | tmpdir, | |
615 | nodes={ | |
616 | "node1": { | |
617 | 'bundles': ["bundle1"], | |
618 | }, | |
619 | "node2": { | |
620 | 'bundles': ["bundle1"], | |
621 | }, | |
622 | }, | |
623 | bundles={ | |
624 | "bundle1": { | |
625 | 'files': { | |
626 | "/test": { | |
627 | 'content': "${repo.vault.password_for('testing')}", | |
628 | 'content_type': 'mako', | |
629 | }, | |
630 | }, | |
631 | }, | |
632 | }, | |
633 | ) | |
634 | assert run("bw test -s ''", path=str(tmpdir))[2] == 0 | |
635 | assert run("bw test -s 'test'", path=str(tmpdir))[2] == 0 | |
636 | assert run("bw test -s 'test,foo'", path=str(tmpdir))[2] == 0 | |
637 | 844 | |
638 | 845 | |
639 | 846 | def test_reverse_dummy_dep(tmpdir): |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from os.path import join |
4 | 1 | |
5 | 2 | from bundlewrap.utils.testing import host_os, make_repo, run |
0 | from os.path import join | |
1 | ||
2 | from bundlewrap.utils.testing import make_repo, run | |
3 | ||
4 | ||
5 | def test_metadatapy(tmpdir): | |
6 | make_repo( | |
7 | tmpdir, | |
8 | ) | |
9 | with open(join(str(tmpdir), "libs", "libstest.py"), 'w') as f: | |
10 | f.write( | |
11 | """ivar = 47 | |
12 | ||
13 | def func(): | |
14 | return 48 | |
15 | """) | |
16 | stdout, stderr, rcode = run("bw debug -c 'print(repo.libs.libstest.ivar)'", path=str(tmpdir)) | |
17 | assert stdout == b"47\n" | |
18 | assert stderr == b"" | |
19 | assert rcode == 0 | |
20 | ||
21 | stdout, stderr, rcode = run("bw debug -c 'print(repo.libs.libstest.func())'", path=str(tmpdir)) | |
22 | assert stdout == b"48\n" | |
23 | assert stderr == b"" | |
24 | assert rcode == 0 | |
25 |
0 | from base64 import b64decode | |
1 | from os.path import join | |
2 | ||
3 | from bundlewrap.utils.testing import make_repo, run | |
4 | ||
5 | ||
6 | def test_b64encode_fault(tmpdir): | |
7 | make_repo(tmpdir) | |
8 | ||
9 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir)) | |
10 | assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n" | |
11 | assert stderr == b"" | |
12 | assert rcode == 0 | |
13 | ||
14 | ||
15 | def test_encrypt(tmpdir): | |
16 | make_repo(tmpdir) | |
17 | ||
18 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir)) | |
19 | assert stderr == b"" | |
20 | assert rcode == 0 | |
21 | ||
22 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) | |
23 | assert stdout == b"test\n" | |
24 | assert stderr == b"" | |
25 | assert rcode == 0 | |
26 | ||
27 | ||
28 | def test_encrypt_different_key_autodetect(tmpdir): | |
29 | make_repo(tmpdir) | |
30 | ||
31 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\", key=\"generate\"))'", path=str(tmpdir)) | |
32 | assert stderr == b"" | |
33 | assert rcode == 0 | |
34 | ||
35 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) | |
36 | assert stdout == b"test\n" | |
37 | assert stderr == b"" | |
38 | assert rcode == 0 | |
39 | ||
40 | ||
41 | def test_encrypt_file(tmpdir): | |
42 | make_repo(tmpdir) | |
43 | ||
44 | source_file = join(str(tmpdir), "data", "source") | |
45 | with open(source_file, 'w') as f: | |
46 | f.write("ohai") | |
47 | ||
48 | stdout, stderr, rcode = run( | |
49 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
50 | source_file, | |
51 | "encrypted", | |
52 | ), | |
53 | path=str(tmpdir), | |
54 | ) | |
55 | assert stderr == b"" | |
56 | assert rcode == 0 | |
57 | ||
58 | stdout, stderr, rcode = run( | |
59 | "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( | |
60 | "encrypted", | |
61 | ), | |
62 | path=str(tmpdir), | |
63 | ) | |
64 | assert stdout == b"ohai\n" | |
65 | assert stderr == b"" | |
66 | assert rcode == 0 | |
67 | ||
68 | ||
69 | def test_encrypt_file_different_key_autodetect(tmpdir): | |
70 | make_repo(tmpdir) | |
71 | ||
72 | source_file = join(str(tmpdir), "data", "source") | |
73 | with open(source_file, 'w') as f: | |
74 | f.write("ohai") | |
75 | ||
76 | stdout, stderr, rcode = run( | |
77 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\", \"{}\")'".format( | |
78 | source_file, | |
79 | "encrypted", | |
80 | "generate", | |
81 | ), | |
82 | path=str(tmpdir), | |
83 | ) | |
84 | assert stderr == b"" | |
85 | assert rcode == 0 | |
86 | ||
87 | stdout, stderr, rcode = run( | |
88 | "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( | |
89 | "encrypted", | |
90 | ), | |
91 | path=str(tmpdir), | |
92 | ) | |
93 | assert stdout == b"ohai\n" | |
94 | assert stderr == b"" | |
95 | assert rcode == 0 | |
96 | ||
97 | ||
98 | def test_encrypt_file_base64(tmpdir): | |
99 | make_repo(tmpdir) | |
100 | ||
101 | source_file = join(str(tmpdir), "data", "source") | |
102 | with open(source_file, 'wb') as f: | |
103 | f.write("öhai".encode('latin-1')) | |
104 | ||
105 | stdout, stderr, rcode = run( | |
106 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
107 | source_file, | |
108 | "encrypted", | |
109 | ), | |
110 | path=str(tmpdir), | |
111 | ) | |
112 | assert stderr == b"" | |
113 | assert rcode == 0 | |
114 | ||
115 | stdout, stderr, rcode = run( | |
116 | "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format( | |
117 | "encrypted", | |
118 | ), | |
119 | path=str(tmpdir), | |
120 | ) | |
121 | assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1') | |
122 | assert stderr == b"" | |
123 | assert rcode == 0 | |
124 | ||
125 | ||
126 | def test_format_password(tmpdir): | |
127 | make_repo(tmpdir) | |
128 | ||
129 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir)) | |
130 | assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n" | |
131 | assert stderr == b"" | |
132 | assert rcode == 0 | |
133 | ||
134 | ||
135 | def test_human_password(tmpdir): | |
136 | make_repo(tmpdir) | |
137 | ||
138 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\"))'", path=str(tmpdir)) | |
139 | assert stdout == b"Xaint-Heep-Pier-Tikl-76\n" | |
140 | assert stderr == b"" | |
141 | assert rcode == 0 | |
142 | ||
143 | ||
144 | def test_human_password_digits(tmpdir): | |
145 | make_repo(tmpdir) | |
146 | ||
147 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir)) | |
148 | assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n" | |
149 | assert stderr == b"" | |
150 | assert rcode == 0 | |
151 | ||
152 | ||
153 | def test_human_password_per_word(tmpdir): | |
154 | make_repo(tmpdir) | |
155 | ||
156 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir)) | |
157 | assert stdout == b"X-D-F-H-42\n" | |
158 | assert stderr == b"" | |
159 | assert rcode == 0 | |
160 | ||
161 | ||
162 | def test_human_password_words(tmpdir): | |
163 | make_repo(tmpdir) | |
164 | ||
165 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir)) | |
166 | assert stdout == b"Xaint-Heep-13\n" | |
167 | assert stderr == b"" | |
168 | assert rcode == 0 | |
169 | ||
170 | ||
171 | def test_random_bytes_as_base64(tmpdir): | |
172 | make_repo(tmpdir) | |
173 | ||
174 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\"))'", path=str(tmpdir)) | |
175 | assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n" | |
176 | assert stderr == b"" | |
177 | assert rcode == 0 | |
178 | ||
179 | ||
180 | def test_random_bytes_as_base64_length(tmpdir): | |
181 | make_repo(tmpdir) | |
182 | ||
183 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\", length=1))'", path=str(tmpdir)) | |
184 | assert stdout == b"rg==\n" | |
185 | assert stderr == b"" | |
186 | assert rcode == 0 | |
187 | ||
188 | ||
189 | def test_faults_equality_decrypt(tmpdir): | |
190 | make_repo(tmpdir) | |
191 | ||
192 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"foo\"))'", path=str(tmpdir)) | |
193 | assert stderr == b"" | |
194 | assert rcode == 0 | |
195 | enc_foo = stdout.decode('utf-8').strip() | |
196 | ||
197 | stdout, stderr, rcode = run( | |
198 | "bw debug -c 'print(repo.vault.encrypt(\"bar\"))'", path=str(tmpdir), | |
199 | ) | |
200 | assert stderr == b"" | |
201 | assert rcode == 0 | |
202 | enc_bar = stdout.decode('utf-8').strip() | |
203 | ||
204 | stdout, stderr, rcode = run( | |
205 | "bw debug -c 'print(repo.vault.decrypt(\"{}\") == repo.vault.decrypt(\"{}\"))'".format( | |
206 | enc_foo, enc_foo, | |
207 | ), | |
208 | path=str(tmpdir), | |
209 | ) | |
210 | assert stdout == b"True\n" | |
211 | assert stderr == b"" | |
212 | assert rcode == 0 | |
213 | ||
214 | stdout, stderr, rcode = run( | |
215 | "bw debug -c 'print(repo.vault.decrypt(\"{}\") == repo.vault.decrypt(\"{}\"))'".format( | |
216 | enc_foo, enc_bar, | |
217 | ), | |
218 | path=str(tmpdir), | |
219 | ) | |
220 | assert stdout == b"False\n" | |
221 | assert stderr == b"" | |
222 | assert rcode == 0 | |
223 | ||
224 | ||
225 | def test_faults_equality_decrypt_file(tmpdir): | |
226 | make_repo(tmpdir) | |
227 | ||
228 | source_file = join(str(tmpdir), "data", "source") | |
229 | with open(source_file, 'w') as f: | |
230 | f.write("foo") | |
231 | stdout, stderr, rcode = run( | |
232 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
233 | source_file, | |
234 | "enc_foo", | |
235 | ), | |
236 | path=str(tmpdir), | |
237 | ) | |
238 | assert stderr == b"" | |
239 | assert rcode == 0 | |
240 | ||
241 | source_file = join(str(tmpdir), "data", "source") | |
242 | with open(source_file, 'w') as f: | |
243 | f.write("bar") | |
244 | stdout, stderr, rcode = run( | |
245 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
246 | source_file, | |
247 | "enc_bar", | |
248 | ), | |
249 | path=str(tmpdir), | |
250 | ) | |
251 | assert stderr == b"" | |
252 | assert rcode == 0 | |
253 | ||
254 | stdout, stderr, rcode = run( | |
255 | "bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format( | |
256 | "enc_foo", "enc_foo", | |
257 | ), | |
258 | path=str(tmpdir), | |
259 | ) | |
260 | assert stdout == b"True\n" | |
261 | assert stderr == b"" | |
262 | assert rcode == 0 | |
263 | ||
264 | stdout, stderr, rcode = run( | |
265 | "bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format( | |
266 | "enc_foo", "enc_bar", | |
267 | ), | |
268 | path=str(tmpdir), | |
269 | ) | |
270 | assert stdout == b"False\n" | |
271 | assert stderr == b"" | |
272 | assert rcode == 0 | |
273 | ||
274 | ||
275 | def test_faults_equality_decrypt_file_as_base64(tmpdir): | |
276 | make_repo(tmpdir) | |
277 | ||
278 | source_file = join(str(tmpdir), "data", "source") | |
279 | with open(source_file, 'w') as f: | |
280 | f.write("foo") | |
281 | stdout, stderr, rcode = run( | |
282 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
283 | source_file, | |
284 | "enc_foo", | |
285 | ), | |
286 | path=str(tmpdir), | |
287 | ) | |
288 | assert stderr == b"" | |
289 | assert rcode == 0 | |
290 | ||
291 | source_file = join(str(tmpdir), "data", "source") | |
292 | with open(source_file, 'w') as f: | |
293 | f.write("bar") | |
294 | stdout, stderr, rcode = run( | |
295 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
296 | source_file, | |
297 | "enc_bar", | |
298 | ), | |
299 | path=str(tmpdir), | |
300 | ) | |
301 | assert stderr == b"" | |
302 | assert rcode == 0 | |
303 | ||
304 | stdout, stderr, rcode = run( | |
305 | "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format( | |
306 | "enc_foo", "enc_foo", | |
307 | ), | |
308 | path=str(tmpdir), | |
309 | ) | |
310 | assert stdout == b"True\n" | |
311 | assert stderr == b"" | |
312 | assert rcode == 0 | |
313 | ||
314 | stdout, stderr, rcode = run( | |
315 | "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format( | |
316 | "enc_foo", "enc_bar", | |
317 | ), | |
318 | path=str(tmpdir), | |
319 | ) | |
320 | assert stdout == b"False\n" | |
321 | assert stderr == b"" | |
322 | assert rcode == 0 | |
323 | ||
324 | ||
325 | def test_faults_equality_decrypt_file_mixed(tmpdir): | |
326 | make_repo(tmpdir) | |
327 | ||
328 | source_file = join(str(tmpdir), "data", "source") | |
329 | with open(source_file, 'w') as f: | |
330 | f.write("foo") | |
331 | stdout, stderr, rcode = run( | |
332 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
333 | source_file, | |
334 | "enc_foo", | |
335 | ), | |
336 | path=str(tmpdir), | |
337 | ) | |
338 | assert stderr == b"" | |
339 | assert rcode == 0 | |
340 | ||
341 | stdout, stderr, rcode = run( | |
342 | "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format( | |
343 | "enc_foo", "enc_foo", | |
344 | ), | |
345 | path=str(tmpdir), | |
346 | ) | |
347 | assert stdout == b"False\n" | |
348 | assert stderr == b"" | |
349 | assert rcode == 0 | |
350 | ||
351 | ||
352 | def test_faults_equality_human_password_for(tmpdir): | |
353 | make_repo(tmpdir) | |
354 | ||
355 | stdout, stderr, rcode = run( | |
356 | "bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"a\"))'", | |
357 | path=str(tmpdir), | |
358 | ) | |
359 | assert stdout == b"True\n" | |
360 | assert stderr == b"" | |
361 | assert rcode == 0 | |
362 | ||
363 | stdout, stderr, rcode = run( | |
364 | "bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"b\"))'", | |
365 | path=str(tmpdir), | |
366 | ) | |
367 | assert stdout == b"False\n" | |
368 | assert stderr == b"" | |
369 | assert rcode == 0 | |
370 | ||
371 | ||
372 | def test_faults_equality_password_for(tmpdir): | |
373 | make_repo(tmpdir) | |
374 | ||
375 | stdout, stderr, rcode = run( | |
376 | "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"a\"))'", | |
377 | path=str(tmpdir), | |
378 | ) | |
379 | assert stdout == b"True\n" | |
380 | assert stderr == b"" | |
381 | assert rcode == 0 | |
382 | ||
383 | stdout, stderr, rcode = run( | |
384 | "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"b\"))'", | |
385 | path=str(tmpdir), | |
386 | ) | |
387 | assert stdout == b"False\n" | |
388 | assert stderr == b"" | |
389 | assert rcode == 0 | |
390 | ||
391 | ||
392 | def test_faults_equality_password_for_mixed(tmpdir): | |
393 | make_repo(tmpdir) | |
394 | ||
395 | stdout, stderr, rcode = run( | |
396 | "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.human_password_for(\"a\"))'", | |
397 | path=str(tmpdir), | |
398 | ) | |
399 | assert stdout == b"False\n" | |
400 | assert stderr == b"" | |
401 | assert rcode == 0 | |
402 | ||
403 | ||
404 | def test_faults_equality_random_bytes_as_base64(tmpdir): | |
405 | make_repo(tmpdir) | |
406 | ||
407 | stdout, stderr, rcode = run( | |
408 | "bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"a\"))'", | |
409 | path=str(tmpdir), | |
410 | ) | |
411 | assert stdout == b"True\n" | |
412 | assert stderr == b"" | |
413 | assert rcode == 0 | |
414 | ||
415 | stdout, stderr, rcode = run( | |
416 | "bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"b\"))'", | |
417 | path=str(tmpdir), | |
418 | ) | |
419 | assert stdout == b"False\n" | |
420 | assert stderr == b"" | |
421 | assert rcode == 0 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from base64 import b64decode | |
4 | from os.path import join | |
5 | ||
6 | from bundlewrap.utils.testing import make_repo, run | |
7 | ||
8 | ||
9 | def test_b64encode_fault(tmpdir): | |
10 | make_repo(tmpdir) | |
11 | ||
12 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir)) | |
13 | assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n" | |
14 | assert stderr == b"" | |
15 | assert rcode == 0 | |
16 | ||
17 | ||
18 | def test_encrypt(tmpdir): | |
19 | make_repo(tmpdir) | |
20 | ||
21 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir)) | |
22 | assert stderr == b"" | |
23 | assert rcode == 0 | |
24 | ||
25 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) | |
26 | assert stdout == b"test\n" | |
27 | assert stderr == b"" | |
28 | assert rcode == 0 | |
29 | ||
30 | ||
31 | def test_encrypt_different_key_autodetect(tmpdir): | |
32 | make_repo(tmpdir) | |
33 | ||
34 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\", key=\"generate\"))'", path=str(tmpdir)) | |
35 | assert stderr == b"" | |
36 | assert rcode == 0 | |
37 | ||
38 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) | |
39 | assert stdout == b"test\n" | |
40 | assert stderr == b"" | |
41 | assert rcode == 0 | |
42 | ||
43 | ||
44 | def test_encrypt_file(tmpdir): | |
45 | make_repo(tmpdir) | |
46 | ||
47 | source_file = join(str(tmpdir), "data", "source") | |
48 | with open(source_file, 'w') as f: | |
49 | f.write("ohai") | |
50 | ||
51 | stdout, stderr, rcode = run( | |
52 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
53 | source_file, | |
54 | "encrypted", | |
55 | ), | |
56 | path=str(tmpdir), | |
57 | ) | |
58 | assert stderr == b"" | |
59 | assert rcode == 0 | |
60 | ||
61 | stdout, stderr, rcode = run( | |
62 | "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( | |
63 | "encrypted", | |
64 | ), | |
65 | path=str(tmpdir), | |
66 | ) | |
67 | assert stdout == b"ohai\n" | |
68 | assert stderr == b"" | |
69 | assert rcode == 0 | |
70 | ||
71 | ||
72 | def test_encrypt_file_different_key_autodetect(tmpdir): | |
73 | make_repo(tmpdir) | |
74 | ||
75 | source_file = join(str(tmpdir), "data", "source") | |
76 | with open(source_file, 'w') as f: | |
77 | f.write("ohai") | |
78 | ||
79 | stdout, stderr, rcode = run( | |
80 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\", \"{}\")'".format( | |
81 | source_file, | |
82 | "encrypted", | |
83 | "generate", | |
84 | ), | |
85 | path=str(tmpdir), | |
86 | ) | |
87 | assert stderr == b"" | |
88 | assert rcode == 0 | |
89 | ||
90 | stdout, stderr, rcode = run( | |
91 | "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( | |
92 | "encrypted", | |
93 | ), | |
94 | path=str(tmpdir), | |
95 | ) | |
96 | assert stdout == b"ohai\n" | |
97 | assert stderr == b"" | |
98 | assert rcode == 0 | |
99 | ||
100 | ||
101 | def test_encrypt_file_base64(tmpdir): | |
102 | make_repo(tmpdir) | |
103 | ||
104 | source_file = join(str(tmpdir), "data", "source") | |
105 | with open(source_file, 'wb') as f: | |
106 | f.write("öhai".encode('latin-1')) | |
107 | ||
108 | stdout, stderr, rcode = run( | |
109 | "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( | |
110 | source_file, | |
111 | "encrypted", | |
112 | ), | |
113 | path=str(tmpdir), | |
114 | ) | |
115 | assert stderr == b"" | |
116 | assert rcode == 0 | |
117 | ||
118 | stdout, stderr, rcode = run( | |
119 | "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format( | |
120 | "encrypted", | |
121 | ), | |
122 | path=str(tmpdir), | |
123 | ) | |
124 | assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1') | |
125 | assert stderr == b"" | |
126 | assert rcode == 0 | |
127 | ||
128 | ||
129 | def test_format_password(tmpdir): | |
130 | make_repo(tmpdir) | |
131 | ||
132 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir)) | |
133 | assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n" | |
134 | assert stderr == b"" | |
135 | assert rcode == 0 | |
136 | ||
137 | ||
138 | def test_human_password(tmpdir): | |
139 | make_repo(tmpdir) | |
140 | ||
141 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\"))'", path=str(tmpdir)) | |
142 | assert stdout == b"Xaint-Heep-Pier-Tikl-76\n" | |
143 | assert stderr == b"" | |
144 | assert rcode == 0 | |
145 | ||
146 | ||
147 | def test_human_password_digits(tmpdir): | |
148 | make_repo(tmpdir) | |
149 | ||
150 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir)) | |
151 | assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n" | |
152 | assert stderr == b"" | |
153 | assert rcode == 0 | |
154 | ||
155 | ||
156 | def test_human_password_per_word(tmpdir): | |
157 | make_repo(tmpdir) | |
158 | ||
159 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir)) | |
160 | assert stdout == b"X-D-F-H-42\n" | |
161 | assert stderr == b"" | |
162 | assert rcode == 0 | |
163 | ||
164 | ||
165 | def test_human_password_words(tmpdir): | |
166 | make_repo(tmpdir) | |
167 | ||
168 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir)) | |
169 | assert stdout == b"Xaint-Heep-13\n" | |
170 | assert stderr == b"" | |
171 | assert rcode == 0 | |
172 | ||
173 | ||
174 | def test_random_bytes_as_base64(tmpdir): | |
175 | make_repo(tmpdir) | |
176 | ||
177 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\"))'", path=str(tmpdir)) | |
178 | assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n" | |
179 | assert stderr == b"" | |
180 | assert rcode == 0 | |
181 | ||
182 | ||
183 | def test_random_bytes_as_base64_length(tmpdir): | |
184 | make_repo(tmpdir) | |
185 | ||
186 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\", length=1))'", path=str(tmpdir)) | |
187 | assert stdout == b"rg==\n" | |
188 | assert stderr == b"" | |
189 | assert rcode == 0 |
0 | from os.path import join | |
1 | ||
2 | from bundlewrap.repo import Repository | |
3 | from bundlewrap.utils import get_file_contents | |
4 | from bundlewrap.utils.testing import make_repo | |
5 | ||
6 | ||
7 | def test_toml_conversion(tmpdir): | |
8 | make_repo( | |
9 | tmpdir, | |
10 | nodes={ | |
11 | 'node1': { | |
12 | 'os': 'ubuntu', | |
13 | 'metadata': { | |
14 | "foo": { | |
15 | "bar": "baz", | |
16 | }, | |
17 | }, | |
18 | }, | |
19 | }, | |
20 | ) | |
21 | repo = Repository(tmpdir) | |
22 | node = repo.get_node("node1") | |
23 | node.toml_save() | |
24 | ||
25 | assert get_file_contents(join(tmpdir, "nodes", "node1.toml")) == \ | |
26 | b"""os = "ubuntu" | |
27 | ||
28 | [metadata.foo] | |
29 | bar = "baz" | |
30 | """ |
0 | from bundlewrap.utils import Fault | |
1 | ||
2 | ||
3 | def test_basic_resolve(): | |
4 | def callback(): | |
5 | return 4 # Chosen by fair dice roll. Guaranteed to be random. | |
6 | ||
7 | f = Fault('id', callback) | |
8 | assert f.value == 4 | |
9 | ||
10 | ||
11 | def test_add_fault(): | |
12 | def callback_a(): | |
13 | return 'foo' | |
14 | def callback_b(): | |
15 | return 'bar' | |
16 | ||
17 | a = Fault('id foo', callback_a) | |
18 | b = Fault('id bar', callback_b) | |
19 | c = a + b | |
20 | assert c.value == 'foobar' | |
21 | ||
22 | ||
23 | def test_add_fault_nonstring(): | |
24 | def callback_a(): | |
25 | return 4 | |
26 | def callback_b(): | |
27 | return 8 | |
28 | ||
29 | a = Fault('id foo', callback_a) | |
30 | b = Fault('id bar', callback_b) | |
31 | c = a + b | |
32 | assert c.value == 12 | |
33 | ||
34 | ||
35 | def test_add_plain_nonstring(): | |
36 | def callback(): | |
37 | return 4 | |
38 | ||
39 | a = Fault('id foo', callback) | |
40 | b = a + 8 | |
41 | assert b.value == 12 | |
42 | ||
43 | ||
44 | def test_add_plain(): | |
45 | def callback_a(): | |
46 | return 'foo' | |
47 | ||
48 | a = Fault('id foo', callback_a) | |
49 | c = a + 'bar' | |
50 | assert c.value == 'foobar' | |
51 | ||
52 | ||
53 | def test_order(): | |
54 | def callback_a(): | |
55 | return 'foo' | |
56 | def callback_b(): | |
57 | return 'bar' | |
58 | def callback_c(): | |
59 | return '0first' | |
60 | ||
61 | a = Fault('id foo', callback_a) | |
62 | b = Fault('id bar', callback_b) | |
63 | c = Fault('id 0first', callback_c) | |
64 | ||
65 | lst = sorted([a, b, c]) | |
66 | ||
67 | assert lst[0].value == '0first' | |
68 | assert lst[1].value == 'bar' | |
69 | assert lst[2].value == 'foo' | |
70 | ||
71 | ||
72 | def test_b64encode(): | |
73 | def callback(): | |
74 | return 'foo' | |
75 | ||
76 | a = Fault('id foo', callback).b64encode() | |
77 | assert a.value == 'Zm9v' | |
78 | ||
79 | ||
80 | def test_format_into(): | |
81 | def callback(): | |
82 | return 'foo' | |
83 | ||
84 | a = Fault('id foo', callback).format_into('This is my secret: "{}"') | |
85 | assert a.value == 'This is my secret: "foo"' | |
86 | ||
87 | ||
88 | # XXX Other methods missing. This basically tests if | |
89 | # _make_method_callback() is working. | |
90 | def test_generic_method_lower(): | |
91 | def callback(): | |
92 | return 'FOO' | |
93 | ||
94 | a = Fault('id FOO', callback) | |
95 | assert a.lower().value == 'foo' | |
96 | ||
97 | ||
98 | def test_equal_no_operators(): | |
99 | def callback_a(): | |
100 | return 'foo' | |
101 | def callback_b(): | |
102 | return 'foo, but here you see the problem' | |
103 | ||
104 | a = Fault('id foo', callback_a) | |
105 | b = Fault('id foo', callback_b) | |
106 | assert id(a) != id(b) | |
107 | assert a == b | |
108 | ||
109 | ||
110 | def test_not_equal_no_operators(): | |
111 | def callback_a(): | |
112 | return 'this interface is not fool proof' | |
113 | def callback_b(): | |
114 | return 'this interface is not fool proof' | |
115 | ||
116 | a = Fault('id foo', callback_a) | |
117 | b = Fault('id bar', callback_b) | |
118 | assert id(a) != id(b) | |
119 | assert a != b | |
120 | ||
121 | ||
122 | def test_equal_lower(): | |
123 | def callback_a(): | |
124 | return 'foo' | |
125 | def callback_b(): | |
126 | return 'foo' | |
127 | ||
128 | a = Fault('id foo', callback_a).lower() | |
129 | b = Fault('id foo', callback_b).lower() | |
130 | assert id(a) != id(b) | |
131 | assert a == b | |
132 | ||
133 | ||
134 | def test_not_equal_lower(): | |
135 | def callback_a(): | |
136 | return 'foo' | |
137 | def callback_b(): | |
138 | return 'foo' | |
139 | ||
140 | a = Fault('id foo', callback_a).lower() | |
141 | b = Fault('id bar', callback_b).lower() | |
142 | assert id(a) != id(b) | |
143 | assert a != b | |
144 | ||
145 | ||
146 | def test_equal_b64encode(): | |
147 | def callback_a(): | |
148 | return 'foo' | |
149 | def callback_b(): | |
150 | return 'foo' | |
151 | ||
152 | a = Fault('id foo', callback_a).b64encode() | |
153 | b = Fault('id foo', callback_b).b64encode() | |
154 | assert id(a) != id(b) | |
155 | assert a == b | |
156 | ||
157 | ||
158 | def test_not_equal_b64encode(): | |
159 | def callback_a(): | |
160 | return 'foo' | |
161 | def callback_b(): | |
162 | return 'foo' | |
163 | ||
164 | a = Fault('id foo', callback_a).b64encode() | |
165 | b = Fault('id bar', callback_b).b64encode() | |
166 | assert id(a) != id(b) | |
167 | assert a != b | |
168 | ||
169 | ||
170 | def test_equal_format_into(): | |
171 | def callback_a(): | |
172 | return 'foo' | |
173 | def callback_b(): | |
174 | return 'foo' | |
175 | ||
176 | a = Fault('id foo', callback_a).format_into('bar {}') | |
177 | b = Fault('id foo', callback_b).format_into('bar {}') | |
178 | assert id(a) != id(b) | |
179 | assert a == b | |
180 | ||
181 | ||
182 | def test_not_equal_format_into(): | |
183 | def callback_a(): | |
184 | return 'foo' | |
185 | def callback_b(): | |
186 | return 'foo' | |
187 | ||
188 | a = Fault('id foo', callback_a).format_into('bar {}') | |
189 | b = Fault('id foo', callback_b).format_into('baz {}') | |
190 | assert id(a) != id(b) | |
191 | assert a != b | |
192 | ||
193 | ||
194 | def test_nested_equal(): | |
195 | def callback_a(): | |
196 | return 'foo' | |
197 | def callback_b(): | |
198 | return 'foo' | |
199 | ||
200 | a = Fault('id foo', callback_a).lower().b64encode() | |
201 | b = Fault('id foo', callback_b).lower().b64encode() | |
202 | assert id(a) != id(b) | |
203 | assert a == b | |
204 | ||
205 | ||
206 | def test_nested_not_equal_because_of_id(): | |
207 | def callback_a(): | |
208 | return 'foo' | |
209 | def callback_b(): | |
210 | return 'foo' | |
211 | ||
212 | a = Fault('id foo', callback_a).lower().b64encode() | |
213 | b = Fault('id bar', callback_b).lower().b64encode() | |
214 | assert id(a) != id(b) | |
215 | assert a != b | |
216 | ||
217 | ||
218 | def test_nested_not_equal_because_of_operators(): | |
219 | def callback_a(): | |
220 | return 'foo' | |
221 | def callback_b(): | |
222 | return 'foo' | |
223 | ||
224 | a = Fault('id foo', callback_a).lower().b64encode() | |
225 | b = Fault('id foo', callback_b).lower() | |
226 | assert id(a) != id(b) | |
227 | assert a != b | |
228 | ||
229 | ||
230 | def test_can_be_used_in_set(): | |
231 | def callback_a(): | |
232 | return 'foo' | |
233 | def callback_b(): | |
234 | return 'bar' | |
235 | ||
236 | a = Fault('id foo', callback_a) | |
237 | b = Fault('id bar', callback_b) | |
238 | s = {a, a, b} | |
239 | assert len(s) == 2 | |
240 | assert 'foo' in [i.value for i in s] | |
241 | assert 'bar' in [i.value for i in s] | |
242 | ||
243 | ||
244 | def test_kwargs_add_to_idlist(): | |
245 | def callback(): | |
246 | return 'foo' | |
247 | ||
248 | a = Fault('id foo', callback, foo='bar', baz='bam', frob='glob') | |
249 | b = Fault('id foo', callback, different='kwargs') | |
250 | assert a != b | |
251 | ||
252 | ||
253 | def test_eq_and_hash_do_not_resolve_fault(): | |
254 | def callback(): | |
255 | raise Exception('Fault resolved, this should not happen') | |
256 | ||
257 | a = Fault('id foo', callback) | |
258 | b = Fault('id foo', callback) | |
259 | assert a == b | |
260 | ||
261 | s = {a, b} |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from bundlewrap.utils import Fault | |
4 | 0 | from bundlewrap.utils.dicts import merge_dict |
5 | from bundlewrap.metadata import atomic, blame_changed_paths, changes_metadata | |
1 | from bundlewrap.metadata import atomic | |
6 | 2 | |
7 | 3 | |
8 | 4 | def test_atomic_no_merge_base(): |
17 | 13 | {1: [5]}, |
18 | 14 | {1: atomic([6, 7])}, |
19 | 15 | ) == {1: [6, 7]} |
20 | ||
21 | ||
22 | def test_blame_and_merge(): | |
23 | dict1 = { | |
24 | 'key1': 11, | |
25 | 'key2': { | |
26 | 'key21': 121, | |
27 | 'key22': 122, | |
28 | }, | |
29 | 'key3': { | |
30 | 'key31': { | |
31 | 'key311': [1311], | |
32 | }, | |
33 | }, | |
34 | } | |
35 | dict2 = { | |
36 | 'key2': { | |
37 | 'key21': 221, | |
38 | }, | |
39 | 'key3': { | |
40 | 'key31': { | |
41 | 'key311': [2311], | |
42 | 'key312': 2312, | |
43 | }, | |
44 | }, | |
45 | 'key4': 24, | |
46 | } | |
47 | from pprint import pprint | |
48 | blame = {} | |
49 | merged = merge_dict( | |
50 | {}, | |
51 | dict1, | |
52 | ) | |
53 | blame_changed_paths( | |
54 | {}, | |
55 | merged, | |
56 | blame, | |
57 | "dict1", | |
58 | ) | |
59 | pprint(blame) | |
60 | merged2 = merge_dict( | |
61 | merged, | |
62 | dict2, | |
63 | ) | |
64 | blame_changed_paths( | |
65 | merged, | |
66 | merged2, | |
67 | blame, | |
68 | "dict2", | |
69 | ) | |
70 | pprint(blame) | |
71 | ||
72 | should = { | |
73 | ('key1',): ("dict1",), | |
74 | ('key2',): ("dict1", "dict2"), | |
75 | ('key2', 'key21'): ("dict2",), | |
76 | ('key2', 'key22'): ("dict1",), | |
77 | ('key3',): ("dict1", "dict2"), | |
78 | ('key3', 'key31',): ("dict1", "dict2"), | |
79 | ('key3', 'key31', 'key311'): ("dict1", "dict2"), | |
80 | ('key3', 'key31', 'key312'): ("dict2",), | |
81 | ('key4',): ("dict2",), | |
82 | } | |
83 | pprint(should) | |
84 | assert blame == should | |
85 | ||
86 | assert merged2 == { | |
87 | 'key1': 11, | |
88 | 'key2': { | |
89 | 'key21': 221, | |
90 | 'key22': 122, | |
91 | }, | |
92 | 'key3': { | |
93 | 'key31': { | |
94 | 'key311': [1311, 2311], | |
95 | 'key312': 2312, | |
96 | }, | |
97 | }, | |
98 | 'key4': 24, | |
99 | } | |
100 | ||
101 | ||
102 | def test_changes_same(): | |
103 | assert not changes_metadata( | |
104 | { | |
105 | 'foo': 1, | |
106 | 'bar': 2, | |
107 | 'baz': [3], | |
108 | }, | |
109 | { | |
110 | 'baz': [3], | |
111 | }, | |
112 | ) | |
113 | ||
114 | ||
115 | def test_changes_list(): | |
116 | assert changes_metadata( | |
117 | { | |
118 | 'foo': 1, | |
119 | 'bar': 2, | |
120 | 'baz': [3], | |
121 | }, | |
122 | { | |
123 | 'baz': [4], | |
124 | }, | |
125 | ) | |
126 | ||
127 | ||
128 | def test_changes_nested_same(): | |
129 | assert not changes_metadata( | |
130 | { | |
131 | 'foo': 1, | |
132 | 'bar': 2, | |
133 | 'baz': { | |
134 | 'frob': 4, | |
135 | }, | |
136 | }, | |
137 | { | |
138 | 'baz': { | |
139 | 'frob': 4, | |
140 | }, | |
141 | }, | |
142 | ) | |
143 | ||
144 | ||
145 | def test_changes_nested(): | |
146 | assert changes_metadata( | |
147 | { | |
148 | 'foo': 1, | |
149 | 'bar': 2, | |
150 | 'baz': { | |
151 | 'frob': 4, | |
152 | }, | |
153 | }, | |
154 | { | |
155 | 'baz': { | |
156 | 'frob': 5, | |
157 | }, | |
158 | }, | |
159 | ) | |
160 | ||
161 | ||
162 | def test_changes_fault(): | |
163 | def callback1(): | |
164 | return 1 | |
165 | ||
166 | def callback2(): | |
167 | return 2 | |
168 | ||
169 | assert not changes_metadata( | |
170 | { | |
171 | 'foo': Fault(callback1), | |
172 | }, | |
173 | { | |
174 | 'foo': Fault(callback2), | |
175 | }, | |
176 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | ||
4 | 0 | from bundlewrap.metadata import atomic |
5 | 1 | from bundlewrap.utils.metastack import Metastack |
6 | 2 | from pytest import raises |
178 | 174 | assert stack.get('something', None) == 456 |
179 | 175 | |
180 | 176 | |
181 | def test_should_be_frozen(): | |
177 | def test_deepcopy(): | |
182 | 178 | stack = Metastack() |
183 | 179 | stack._set_layer('base', {'foo': {'bar': {1, 2, 3}}}) |
184 | 180 | foo = stack.get('foo', None) |
185 | ||
186 | with raises(AttributeError): | |
187 | foo['bar'].add(4) | |
188 | ||
189 | with raises(TypeError): | |
190 | del foo['bar'] | |
181 | foo['bar'].add(4) | |
182 | assert stack.get('foo/bar') == {1, 2, 3} | |
183 | del foo['bar'] | |
184 | assert stack.get('foo/bar') | |
191 | 185 | |
192 | 186 | |
193 | 187 | def test_atomic_in_base(): |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | 0 | from bundlewrap.items.pkg_openbsd import parse_pkg_name |
4 | 1 | from pytest import raises |
5 | 2 |
0 | 0 | from bundlewrap.metadata import atomic |
1 | from bundlewrap.utils.dicts import freeze_object, map_dict_keys, reduce_dict | |
1 | from bundlewrap.utils.dicts import ( | |
2 | map_dict_keys, | |
3 | reduce_dict, | |
4 | validate_dict, | |
5 | COLLECTION_OF_STRINGS, | |
6 | TUPLE_OF_INTS, | |
7 | ) | |
8 | ||
2 | 9 | from pytest import raises |
3 | ||
4 | from sys import version_info | |
5 | 10 | |
6 | 11 | |
7 | 12 | def test_dictmap(): |
24 | 29 | ("key2", "key5", "key6"), |
25 | 30 | ("key2", "key7"), |
26 | 31 | ]) |
27 | ||
28 | ||
29 | def test_freeze_object(): | |
30 | orig = { | |
31 | 'bool': True, | |
32 | 'int': 3, | |
33 | 'none': None, | |
34 | 'simple_list': [1, 2], | |
35 | 'simple_set': {3, 4}, | |
36 | 'recursive_dict': { | |
37 | 'something': { | |
38 | 'else': 3, | |
39 | }, | |
40 | 'str': 'str', | |
41 | }, | |
42 | 'list_of_dicts': [ | |
43 | { | |
44 | 'name': 'yaml', | |
45 | 'attribute': 123, | |
46 | 'see': 'how lists of dicts are a bad idea anyway', | |
47 | }, | |
48 | { | |
49 | 'name': 'yaml', | |
50 | 'attribute': 42, | |
51 | 'everything': ['got', 'the', 'same', 'name'], | |
52 | }, | |
53 | ], | |
54 | } | |
55 | ||
56 | frozen = freeze_object(orig) | |
57 | ||
58 | assert frozen['bool'] == True | |
59 | assert frozen['int'] == 3 | |
60 | assert frozen['none'] == None | |
61 | assert frozen['simple_list'][0] == 1 | |
62 | assert frozen['simple_list'][1] == 2 | |
63 | assert len(frozen['simple_list']) == 2 | |
64 | assert 4 in frozen['simple_set'] | |
65 | assert len(frozen['simple_set']) == 2 | |
66 | assert frozen['list_of_dicts'][0]['attribute'] == 123 | |
67 | assert frozen['recursive_dict']['something']['else'] == 3 | |
68 | ||
69 | # XXX Remove this if in bw 4.0 and always do the check | |
70 | if version_info[0] >= 3: | |
71 | with raises(TypeError): | |
72 | frozen['bool'] = False | |
73 | ||
74 | with raises(TypeError): | |
75 | frozen['int'] = 10 | |
76 | ||
77 | with raises(TypeError): | |
78 | frozen['none'] = None | |
79 | ||
80 | with raises(TypeError): | |
81 | frozen['list_of_dicts'][0]['attribute'] = 456 | |
82 | ||
83 | with raises(TypeError): | |
84 | frozen['recursive_dict']['something']['else'] = 4 | |
85 | ||
86 | with raises(TypeError): | |
87 | del frozen['int'] | |
88 | ||
89 | with raises(AttributeError): | |
90 | frozen['simple_list'].append(5) | |
91 | ||
92 | with raises(AttributeError): | |
93 | frozen['simple_set'].add(5) | |
94 | 32 | |
95 | 33 | |
96 | 34 | def test_reduce_dict_two_lists(): |
135 | 73 | }], |
136 | 74 | 'd': 3, |
137 | 75 | } |
76 | ||
77 | ||
78 | def test_validate_ok(): | |
79 | validate_dict( | |
80 | { | |
81 | 'a': 5, | |
82 | 'b': "bee", | |
83 | 'c': None, | |
84 | 'd': ("t", "u", "p", "l", "e"), | |
85 | 'e': ["l", "i", "s", "t"], | |
86 | 'f': {"s", "e", "t"}, | |
87 | 'g': (1, "2"), | |
88 | 'h': [1, "2"], | |
89 | 'i': {1, "2"}, | |
90 | 'j': True, | |
91 | 'k': False, | |
92 | 'l': (1, 2, 3), | |
93 | }, | |
94 | { | |
95 | 'a': int, | |
96 | 'b': str, | |
97 | 'c': type(None), | |
98 | 'd': COLLECTION_OF_STRINGS, | |
99 | 'e': COLLECTION_OF_STRINGS, | |
100 | 'f': COLLECTION_OF_STRINGS, | |
101 | 'g': tuple, | |
102 | 'h': list, | |
103 | 'i': set, | |
104 | 'j': bool, | |
105 | 'k': (int, bool), | |
106 | 'l': TUPLE_OF_INTS, | |
107 | }, | |
108 | ) | |
109 | ||
110 | ||
111 | def test_validate_single_type_error(): | |
112 | with raises(ValueError): | |
113 | validate_dict( | |
114 | { | |
115 | 'a': 5, | |
116 | }, | |
117 | { | |
118 | 'a': str, | |
119 | }, | |
120 | ) | |
121 | ||
122 | ||
123 | def test_validate_multi_type_error(): | |
124 | with raises(ValueError): | |
125 | validate_dict( | |
126 | { | |
127 | 'a': 5, | |
128 | }, | |
129 | { | |
130 | 'a': (str, list), | |
131 | }, | |
132 | ) | |
133 | ||
134 | ||
135 | def test_validate_inner_type_error(): | |
136 | with raises(ValueError): | |
137 | validate_dict( | |
138 | { | |
139 | 'd': ("t", "u", "p", "l", "e", 47), | |
140 | }, | |
141 | { | |
142 | 'd': COLLECTION_OF_STRINGS, | |
143 | }, | |
144 | ) | |
145 | ||
146 | ||
147 | def test_validate_inner_type_error2(): | |
148 | with raises(ValueError): | |
149 | validate_dict( | |
150 | { | |
151 | 'l': (1, 2, "3"), | |
152 | }, | |
153 | { | |
154 | 'l': TUPLE_OF_INTS, | |
155 | }, | |
156 | ) | |
157 | ||
158 | ||
159 | def test_validate_missing_key(): | |
160 | with raises(ValueError): | |
161 | validate_dict( | |
162 | { | |
163 | 'a': 5, | |
164 | }, | |
165 | { | |
166 | 'a': int, | |
167 | 'b': str, | |
168 | }, | |
169 | required_keys=['a', 'b'], | |
170 | ) | |
171 | ||
172 | ||
173 | def test_validate_required_key(): | |
174 | validate_dict( | |
175 | { | |
176 | 'a': 5, | |
177 | 'b': "bee", | |
178 | }, | |
179 | { | |
180 | 'a': int, | |
181 | 'b': str, | |
182 | }, | |
183 | required_keys=['a', 'b'], | |
184 | ) |