New upstream release
Jonathan Carter
3 years ago
0 | # 3.10.0 | |
1 | ||
2 | 2020-05-17 | |
3 | ||
4 | * added metadata defaults and reactors | |
5 | * added `bw diff` | |
6 | * `items/` is now searched recursively | |
7 | ||
8 | ||
0 | 9 | # 3.9.0 |
1 | 10 | |
2 | 11 | 2020-05-04 |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | VERSION = (3, 9, 0) | |
3 | VERSION = (3, 10, 0) | |
4 | 4 | VERSION_STRING = ".".join([str(v) for v in VERSION]) |
3 | 3 | from os.path import exists, join |
4 | 4 | |
5 | 5 | from .exceptions import BundleError, NoSuchBundle, RepositoryError |
6 | from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE | |
7 | from .utils import cached_property, get_all_attrs_from_file | |
6 | from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE, DoNotRunAgain | |
7 | from .utils import cached_property | |
8 | 8 | from .utils.text import bold, mark_for_translation as _ |
9 | 9 | from .utils.text import validate_name |
10 | 10 | from .utils.ui import io |
14 | 14 | FILENAME_METADATA = "metadata.py" |
15 | 15 | |
16 | 16 | |
17 | def metadata_processor(func): | |
17 | def metadata_processor_classic(func): | |
18 | 18 | """ |
19 | 19 | Decorator that tags metadata processors. |
20 | 20 | """ |
21 | func.__is_a_metadata_processor = True | |
21 | func._is_metadata_processor = True | |
22 | func._is_classic_metadata_processor = True | |
23 | return func | |
24 | ||
25 | ||
26 | def metadata_reactor(func): | |
27 | """ | |
28 | Decorator that tags metadata reactors. | |
29 | """ | |
30 | func._is_metadata_processor = True | |
31 | func._is_metadata_reactor = True | |
22 | 32 | return func |
23 | 33 | |
24 | 34 | |
51 | 61 | if not exists(self.bundle_file): |
52 | 62 | return {} |
53 | 63 | else: |
54 | return get_all_attrs_from_file( | |
64 | return self.repo.get_all_attrs_from_file( | |
55 | 65 | self.bundle_file, |
56 | 66 | base_env={ |
57 | 67 | 'node': self.node, |
87 | 97 | ) |
88 | 98 | |
89 | 99 | @cached_property |
90 | def metadata_processors(self): | |
100 | def _metadata_processors(self): | |
91 | 101 | with io.job(_("{node} {bundle} collecting metadata processors").format( |
92 | 102 | node=bold(self.node.name), |
93 | 103 | bundle=bold(self.name), |
94 | 104 | )): |
95 | 105 | if not exists(self.metadata_file): |
96 | return [] | |
97 | result = [] | |
106 | return {}, set(), set() | |
107 | defaults = {} | |
108 | reactors = set() | |
109 | classic_processors = set() | |
98 | 110 | internal_names = set() |
99 | for name, attr in get_all_attrs_from_file( | |
111 | for name, attr in self.repo.get_all_attrs_from_file( | |
100 | 112 | self.metadata_file, |
101 | 113 | base_env={ |
102 | 114 | 'DEFAULTS': DEFAULTS, |
103 | 115 | 'DONE': DONE, |
116 | 'OVERWRITE': OVERWRITE, | |
104 | 117 | 'RUN_ME_AGAIN': RUN_ME_AGAIN, |
105 | 'OVERWRITE': OVERWRITE, | |
106 | 'metadata_processor': metadata_processor, | |
118 | 'DoNotRunAgain': DoNotRunAgain, | |
119 | 'metadata_processor': metadata_processor_classic, | |
120 | 'metadata_reactor': metadata_reactor, | |
107 | 121 | 'node': self.node, |
108 | 122 | 'repo': self.repo, |
109 | 123 | }, |
110 | 124 | ).items(): |
111 | if getattr(attr, '__is_a_metadata_processor', False): | |
125 | if name == "defaults": | |
126 | defaults = attr | |
127 | elif getattr(attr, '_is_metadata_processor', False): | |
112 | 128 | internal_name = getattr(attr, '__name__', name) |
113 | 129 | if internal_name in internal_names: |
114 | 130 | raise BundleError(_( |
125 | 141 | name=name, |
126 | 142 | )) |
127 | 143 | internal_names.add(internal_name) |
128 | result.append(attr) | |
129 | return result | |
144 | if getattr(attr, '_is_metadata_reactor', False): | |
145 | reactors.add(attr) | |
146 | elif getattr(attr, '_is_classic_metadata_processor', False): | |
147 | classic_processors.add(attr) | |
148 | else: | |
149 | # this should never happen | |
150 | raise AssertionError | |
151 | return defaults, reactors, classic_processors |
3 | 3 | from cProfile import Profile |
4 | 4 | from functools import wraps |
5 | 5 | from os import environ |
6 | from os.path import abspath, dirname | |
6 | from os.path import abspath | |
7 | 7 | from pipes import quote |
8 | 8 | from sys import argv, exit, stderr, stdout |
9 | 9 | from traceback import format_exc, print_exc |
122 | 122 | # 'bw repo create' is a special case that only takes a path |
123 | 123 | repo = path |
124 | 124 | else: |
125 | while True: | |
126 | try: | |
127 | repo = Repository(path) | |
128 | break | |
129 | except NoSuchRepository: | |
130 | if path == dirname(path): | |
131 | io.stderr(_( | |
132 | "{x} {path} " | |
133 | "is not a BundleWrap repository." | |
134 | ).format(path=quote(abspath(pargs.repo_path)), x=red("!!!"))) | |
135 | io.deactivate() | |
136 | exit(1) | |
137 | else: | |
138 | path = dirname(path) | |
139 | except MissingRepoDependency as exc: | |
140 | io.stderr(str(exc)) | |
141 | io.deactivate() | |
142 | exit(1) | |
143 | except Exception: | |
144 | io.stderr(format_exc()) | |
145 | io.deactivate() | |
146 | exit(1) | |
125 | try: | |
126 | repo = Repository(path) | |
127 | except NoSuchRepository: | |
128 | io.stderr(_( | |
129 | "{x} {path} " | |
130 | "is not a BundleWrap repository." | |
131 | ).format(path=quote(abspath(pargs.repo_path)), x=red("!!!"))) | |
132 | io.deactivate() | |
133 | exit(1) | |
134 | except MissingRepoDependency as exc: | |
135 | io.stderr(str(exc)) | |
136 | io.deactivate() | |
137 | exit(1) | |
138 | except Exception: | |
139 | io.stderr(format_exc()) | |
140 | io.deactivate() | |
141 | exit(1) | |
147 | 142 | |
148 | 143 | # convert all string args into text |
149 | 144 | text_pargs = {key: force_text(value) for key, value in vars(pargs).items()} |
0 | from difflib import unified_diff | |
1 | ||
2 | from ..items.files import DIFF_MAX_FILE_SIZE | |
3 | from ..metadata import metadata_to_json | |
4 | from ..repo import Repository | |
5 | from ..utils.cmdline import get_target_nodes | |
6 | from ..utils.dicts import diff_keys | |
7 | from ..utils.scm import get_git_branch, get_git_rev, set_git_rev | |
8 | from ..utils.text import force_text, mark_for_translation as _, red, blue, yellow | |
9 | from ..utils.ui import io, QUIT_EVENT | |
10 | ||
11 | from subprocess import check_call | |
12 | ||
13 | ||
14 | def diff_metadata(node_a, node_b): | |
15 | node_a_metadata = metadata_to_json(node_a.metadata).splitlines() | |
16 | node_b_metadata = metadata_to_json(node_b.metadata).splitlines() | |
17 | io.stdout("\n".join(unified_diff( | |
18 | node_a_metadata, | |
19 | node_b_metadata, | |
20 | fromfile=node_a.name, | |
21 | tofile=node_b.name, | |
22 | lineterm='', | |
23 | ))) | |
24 | ||
25 | ||
26 | def diff_item(node_a, node_b, item): | |
27 | item_a = node_a.get_item(item) | |
28 | item_a_dict = item_a.cdict() | |
29 | item_b = node_b.get_item(item) | |
30 | item_b_dict = item_b.cdict() | |
31 | ||
32 | if ( | |
33 | item.startswith("file:") | |
34 | and item_a.attributes['content_type'] not in ('base64', 'binary') | |
35 | and item_b.attributes['content_type'] not in ('base64', 'binary') | |
36 | and len(item_a.content) < DIFF_MAX_FILE_SIZE | |
37 | and len(item_b.content) < DIFF_MAX_FILE_SIZE | |
38 | ): | |
39 | del item_a_dict['content_hash'] | |
40 | del item_b_dict['content_hash'] | |
41 | item_a_dict['content'] = item_a.content | |
42 | item_b_dict['content'] = item_b.content | |
43 | ||
44 | relevant_keys = diff_keys(item_a_dict, item_b_dict) | |
45 | io.stdout(item_a.ask(item_b_dict, item_a_dict, relevant_keys)) | |
46 | ||
47 | ||
48 | def diff_node(node_a, node_b): | |
49 | node_a_hashes = sorted( | |
50 | ["{}\t{}".format(i, h) for i, h in node_a.cdict.items()] | |
51 | ) | |
52 | node_b_hashes = sorted( | |
53 | ["{}\t{}".format(i, h) for i, h in node_b.cdict.items()] | |
54 | ) | |
55 | io.stdout("\n".join( | |
56 | filter( | |
57 | lambda line: line.startswith("+") or line.startswith("-"), | |
58 | unified_diff( | |
59 | node_a_hashes, | |
60 | node_b_hashes, | |
61 | fromfile=node_a.name, | |
62 | tofile=node_b.name, | |
63 | lineterm='', | |
64 | n=0, | |
65 | ), | |
66 | ), | |
67 | )) | |
68 | ||
69 | ||
70 | def command_closure(command): | |
71 | def run_it(): | |
72 | io.stderr(_( | |
73 | "{x} Running: {command}" | |
74 | ).format( | |
75 | command=command, | |
76 | x=yellow("i"), | |
77 | )) | |
78 | check_call(command, shell=True) | |
79 | ||
80 | return run_it | |
81 | ||
82 | ||
83 | def git_checkout_closure(rev, detach=False): | |
84 | def run_it(): | |
85 | io.stderr(_( | |
86 | "{x} Switching to git rev: {rev}" | |
87 | ).format( | |
88 | rev=rev, | |
89 | x=yellow("i"), | |
90 | )) | |
91 | set_git_rev(rev, detach=detach) | |
92 | ||
93 | return run_it | |
94 | ||
95 | ||
96 | def hooked_diff_metadata_single_node(repo, node, intermissions, epilogues): | |
97 | node_before_metadata = metadata_to_json(node.metadata).splitlines() | |
98 | ||
99 | for intermission in intermissions: | |
100 | intermission() | |
101 | ||
102 | after_repo = Repository(repo.path) | |
103 | node_after = after_repo.get_node(node.name) | |
104 | node_after_metadata = metadata_to_json(node_after.metadata).splitlines() | |
105 | io.stdout("\n".join(unified_diff( | |
106 | node_before_metadata, | |
107 | node_after_metadata, | |
108 | fromfile=_("before"), | |
109 | tofile=_("after"), | |
110 | lineterm='', | |
111 | ))) | |
112 | ||
113 | for epilogue in epilogues: | |
114 | epilogue() | |
115 | ||
116 | ||
117 | def hooked_diff_metadata_multiple_nodes(repo, nodes, intermissions, epilogues): | |
118 | nodes_metadata_before = {} | |
119 | for node in nodes: | |
120 | if QUIT_EVENT.is_set(): | |
121 | exit(1) | |
122 | nodes_metadata_before[node.name] = node.metadata_hash() | |
123 | ||
124 | for intermission in intermissions: | |
125 | intermission() | |
126 | ||
127 | after_repo = Repository(repo.path) | |
128 | nodes_metadata_after = {} | |
129 | for node_name in nodes_metadata_before: | |
130 | if QUIT_EVENT.is_set(): | |
131 | exit(1) | |
132 | nodes_metadata_after[node_name] = \ | |
133 | after_repo.get_node(node_name).metadata_hash() | |
134 | ||
135 | node_hashes_before = sorted( | |
136 | ["{}\t{}".format(i, h) for i, h in nodes_metadata_before.items()] | |
137 | ) | |
138 | node_hashes_after = sorted( | |
139 | ["{}\t{}".format(i, h) for i, h in nodes_metadata_after.items()] | |
140 | ) | |
141 | io.stdout("\n".join( | |
142 | filter( | |
143 | lambda line: line.startswith("+") or line.startswith("-"), | |
144 | unified_diff( | |
145 | node_hashes_before, | |
146 | node_hashes_after, | |
147 | fromfile=_("before"), | |
148 | tofile=_("after"), | |
149 | lineterm='', | |
150 | n=0, | |
151 | ), | |
152 | ), | |
153 | )) | |
154 | ||
155 | for epilogue in epilogues: | |
156 | epilogue() | |
157 | ||
158 | ||
159 | def hooked_diff_single_item(repo, node, item, intermissions, epilogues): | |
160 | item_before = node.get_item(item) | |
161 | item_before_dict = item_before.cdict() | |
162 | item_before_diffable = False | |
163 | item_before_content = None | |
164 | ||
165 | if ( | |
166 | item.startswith("file:") | |
167 | and item_before.attributes['content_type'] not in ('base64', 'binary') | |
168 | and len(item_before.content) < DIFF_MAX_FILE_SIZE | |
169 | ): | |
170 | item_before_diffable = True | |
171 | item_before_content = item_before.content | |
172 | ||
173 | for intermission in intermissions: | |
174 | intermission() | |
175 | ||
176 | repo_after = Repository(repo.path) | |
177 | node_after = repo_after.get_node(node.name) | |
178 | item_after = node_after.get_item(item) | |
179 | item_after_dict = item_after.cdict() | |
180 | ||
181 | if ( | |
182 | item.startswith("file:") | |
183 | and item_before_diffable | |
184 | and item_after.attributes['content_type'] not in ('base64', 'binary') | |
185 | and len(item_after.content) < DIFF_MAX_FILE_SIZE | |
186 | ): | |
187 | del item_before_dict['content_hash'] | |
188 | del item_after_dict['content_hash'] | |
189 | item_before_dict['content'] = item_before_content | |
190 | item_after_dict['content'] = item_after.content | |
191 | ||
192 | relevant_keys = diff_keys(item_before_dict, item_after_dict) | |
193 | io.stdout(item_before.ask(item_after_dict, item_before_dict, relevant_keys)) | |
194 | ||
195 | for epilogue in epilogues: | |
196 | epilogue() | |
197 | ||
198 | ||
199 | def hooked_diff_config_multiple_nodes(repo, nodes, intermissions, epilogues): | |
200 | nodes_config_before = {} | |
201 | for node in nodes: | |
202 | if QUIT_EVENT.is_set(): | |
203 | exit(1) | |
204 | nodes_config_before[node.name] = node.hash() | |
205 | ||
206 | for intermission in intermissions: | |
207 | intermission() | |
208 | ||
209 | after_repo = Repository(repo.path) | |
210 | nodes_config_after = {} | |
211 | for node_name in nodes_config_before: | |
212 | if QUIT_EVENT.is_set(): | |
213 | exit(1) | |
214 | nodes_config_after[node_name] = \ | |
215 | after_repo.get_node(node_name).hash() | |
216 | ||
217 | node_hashes_before = sorted( | |
218 | ["{}\t{}".format(i, h) for i, h in nodes_config_before.items()] | |
219 | ) | |
220 | node_hashes_after = sorted( | |
221 | ["{}\t{}".format(i, h) for i, h in nodes_config_after.items()] | |
222 | ) | |
223 | io.stdout("\n".join( | |
224 | filter( | |
225 | lambda line: line.startswith("+") or line.startswith("-"), | |
226 | unified_diff( | |
227 | node_hashes_before, | |
228 | node_hashes_after, | |
229 | fromfile=_("before"), | |
230 | tofile=_("after"), | |
231 | lineterm='', | |
232 | n=0, | |
233 | ), | |
234 | ), | |
235 | )) | |
236 | ||
237 | for epilogue in epilogues: | |
238 | epilogue() | |
239 | ||
240 | ||
241 | def bw_diff(repo, args): | |
242 | if args['metadata'] and args['item']: | |
243 | io.stderr(_( | |
244 | "{x} Cannot compare metadata and items at the same time" | |
245 | ).format(x=red("!!!"))) | |
246 | exit(1) | |
247 | ||
248 | target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
249 | ||
250 | if args['branch'] or args['cmd_change'] or args['cmd_reset'] or args['prompt']: | |
251 | intermissions = [] | |
252 | epilogues = [] | |
253 | if args['branch']: | |
254 | original_rev = force_text(get_git_branch() or get_git_rev()) | |
255 | intermissions.append(git_checkout_closure(force_text(args['branch']), detach=True)) | |
256 | if args['cmd_change']: | |
257 | intermissions.append(command_closure(args['cmd_change'])) | |
258 | if args['cmd_reset']: | |
259 | epilogues.append(command_closure(args['cmd_reset'])) | |
260 | if args['branch']: | |
261 | epilogues.append(git_checkout_closure(original_rev, detach=False)) | |
262 | ||
263 | if args['metadata']: | |
264 | if len(target_nodes) == 1: | |
265 | def intermission(): | |
266 | io.stdout(_("{x} Took a snapshot of that node's metadata.").format(x=blue("i"))) | |
267 | io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) | |
268 | if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): | |
269 | exit(1) | |
270 | if args['prompt']: | |
271 | intermissions.append(intermission) | |
272 | hooked_diff_metadata_single_node(repo, target_nodes[0], intermissions, epilogues) | |
273 | else: | |
274 | def intermission(): | |
275 | io.stdout(_("{x} Took a snapshot of those nodes' metadata.").format(x=blue("i"))) | |
276 | io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) | |
277 | if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): | |
278 | exit(1) | |
279 | if args['prompt']: | |
280 | intermissions.append(intermission) | |
281 | hooked_diff_metadata_multiple_nodes(repo, target_nodes, intermissions, epilogues) | |
282 | elif args['item']: | |
283 | if len(target_nodes) != 1: | |
284 | io.stderr(_( | |
285 | "{x} Select exactly one node to compare item" | |
286 | ).format(x=red("!!!"))) | |
287 | exit(1) | |
288 | ||
289 | def intermission(): | |
290 | io.stdout(_("{x} Took a snapshot of that item.").format(x=blue("i"))) | |
291 | io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) | |
292 | if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): | |
293 | exit(1) | |
294 | if args['prompt']: | |
295 | intermissions.append(intermission) | |
296 | hooked_diff_single_item(repo, target_nodes[0], args['item'], intermissions, epilogues) | |
297 | else: | |
298 | def intermission(): | |
299 | io.stdout(_("{x} Took a snapshot of those nodes.").format(x=blue("i"))) | |
300 | io.stdout(_("{x} You may now make changes to your repo.").format(x=blue("i"))) | |
301 | if not io.ask(_("{x} Ready to proceed? (n to cancel)").format(x=blue("?")), True): | |
302 | exit(1) | |
303 | if args['prompt']: | |
304 | intermissions.append(intermission) | |
305 | hooked_diff_config_multiple_nodes(repo, target_nodes, intermissions, epilogues) | |
306 | else: | |
307 | if len(target_nodes) != 2: | |
308 | io.stderr(_( | |
309 | "{x} Exactly two nodes must be selected" | |
310 | ).format(x=red("!!!"))) | |
311 | exit(1) | |
312 | node_a, node_b = target_nodes | |
313 | ||
314 | if args['metadata']: | |
315 | diff_metadata(node_a, node_b) | |
316 | elif args['item']: | |
317 | diff_item(node_a, node_b, args['item']) | |
318 | else: | |
319 | diff_node(node_a, node_b) |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | 3 | from decimal import Decimal |
4 | from json import dumps | |
5 | 4 | |
6 | from ..metadata import MetadataJSONEncoder | |
5 | from ..metadata import metadata_to_json | |
7 | 6 | from ..utils import Fault |
8 | 7 | from ..utils.cmdline import get_node, get_target_nodes |
9 | 8 | from ..utils.dicts import value_at_key_path |
50 | 49 | break |
51 | 50 | page_lines(render_table(table)) |
52 | 51 | else: |
53 | for line in dumps( | |
52 | for line in metadata_to_json( | |
54 | 53 | value_at_key_path(node.metadata, args['keys']), |
55 | cls=MetadataJSONEncoder, | |
56 | indent=4, | |
57 | sort_keys=True, | |
58 | 54 | ).splitlines(): |
59 | 55 | io.stdout(force_text(line)) |
8 | 8 | from ..utils.text import mark_for_translation as _ |
9 | 9 | from .apply import bw_apply |
10 | 10 | from .debug import bw_debug |
11 | from .diff import bw_diff | |
11 | 12 | from .groups import bw_groups |
12 | 13 | from .hash import bw_hash |
13 | 14 | from .items import bw_items |
205 | 206 | help=_("name of node to inspect"), |
206 | 207 | ) |
207 | 208 | |
209 | # bw diff | |
210 | help_diff = _("Show differences between nodes") | |
211 | parser_diff = subparsers.add_parser("diff", description=help_diff, help=help_diff) | |
212 | parser_diff.set_defaults(func=bw_diff) | |
213 | parser_diff.add_argument( | |
214 | "-b", | |
215 | "--branch", | |
216 | default=None, | |
217 | dest='branch', | |
218 | metavar=_("REV"), | |
219 | required=False, | |
220 | type=str, | |
221 | help=_("compare with this git rev instead (requires clean working dir)"), | |
222 | ) | |
223 | parser_diff.add_argument( | |
224 | "-c", | |
225 | "--cmd-change", | |
226 | default=None, | |
227 | dest='cmd_change', | |
228 | metavar=_("CMD_CHANGE"), | |
229 | required=False, | |
230 | type=str, | |
231 | help=_("command to execute between taking metadata snapshots (e.g., change Git branch)"), | |
232 | ) | |
233 | parser_diff.add_argument( | |
234 | "-r", | |
235 | "--cmd-reset", | |
236 | default=None, | |
237 | dest='cmd_reset', | |
238 | metavar=_("CMD_RESET"), | |
239 | required=False, | |
240 | type=str, | |
241 | help=_("command to execute when finished (e.g., switch back to original Git branch)"), | |
242 | ) | |
243 | parser_diff.add_argument( | |
244 | "-p", | |
245 | "--prompt", | |
246 | action='store_true', | |
247 | default=False, | |
248 | dest='prompt', | |
249 | help=_("interactively ask for user to make changes"), | |
250 | ) | |
251 | parser_diff.add_argument( | |
252 | "-i", | |
253 | "--item", | |
254 | default=None, | |
255 | dest='item', | |
256 | metavar=_("ITEM"), | |
257 | required=False, | |
258 | type=str, | |
259 | help=_("compare this specific item between nodes"), | |
260 | ) | |
261 | parser_diff.add_argument( | |
262 | "-m", | |
263 | "--metadata", | |
264 | action='store_true', | |
265 | default=False, | |
266 | dest='metadata', | |
267 | help=_("compare metadata instead of configuration"), | |
268 | ) | |
269 | parser_diff.add_argument( | |
270 | 'target', | |
271 | metavar=_("TARGETS"), | |
272 | type=str, | |
273 | help=HELP_get_target_nodes, | |
274 | ) | |
275 | ||
208 | 276 | # bw groups |
209 | 277 | help_groups = _("Lists groups in this repository") |
210 | 278 | parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups) |
9 | 9 | |
10 | 10 | def bw_stats(repo, args): |
11 | 11 | items = {} |
12 | metaprocs = set() | |
12 | metadata_defaults = set() | |
13 | metadata_processors = set() | |
14 | metadata_reactors = set() | |
13 | 15 | for node in repo.nodes: |
14 | for metadata_processor_name, metadata_processor in node.metadata_processors: | |
15 | metaprocs.add(metadata_processor_name) | |
16 | for metadata_default_name, metadata_default in node.metadata_defaults: | |
17 | metadata_defaults.add(metadata_default_name) | |
18 | # TODO remove this in 4.0 | |
19 | for metadata_processor_name, metadata_processor in node._metadata_processors[2]: | |
20 | metadata_processors.add(metadata_processor_name) | |
21 | for metadata_reactor_name, metadata_reactor in node.metadata_reactors: | |
22 | metadata_reactors.add(metadata_reactor_name) | |
16 | 23 | for item in node.items: |
17 | 24 | items.setdefault(item.ITEM_TYPE_NAME, 0) |
18 | 25 | items[item.ITEM_TYPE_NAME] += 1 |
26 | 33 | [str(len(repo.nodes)), _("nodes")], |
27 | 34 | [str(len(repo.groups)), _("groups")], |
28 | 35 | [str(len(repo.bundle_names)), _("bundles")], |
29 | [str(len(metaprocs)), _("metadata processors")], | |
36 | [str(len(metadata_defaults)), _("metadata defaults")], | |
37 | [str(len(metadata_processors)), _("metadata processors")], | |
38 | [str(len(metadata_reactors)), _("metadata reactors")], | |
30 | 39 | [str(sum([len(list(node.items)) for node in repo.nodes])), _("items")], |
31 | 40 | ROW_SEPARATOR, |
32 | 41 | ] |
6 | 6 | import re |
7 | 7 | |
8 | 8 | from bundlewrap.exceptions import BundleError |
9 | from bundlewrap.metadata import MetadataJSONEncoder | |
9 | from bundlewrap.metadata import metadata_to_json | |
10 | 10 | from bundlewrap.operations import run_local |
11 | 11 | from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item |
12 | 12 | from bundlewrap.items.files import content_processor_jinja2, content_processor_mako |
148 | 148 | |
149 | 149 | @property |
150 | 150 | def manifest(self): |
151 | return json.dumps( | |
152 | self._manifest_dict, | |
153 | cls=MetadataJSONEncoder, | |
154 | indent=4, | |
155 | sort_keys=True, | |
156 | ) | |
151 | return metadata_to_json(self._manifest_dict) | |
157 | 152 | |
158 | 153 | @property |
159 | 154 | def namespace(self): |
31 | 31 | RUN_ME_AGAIN = 2 |
32 | 32 | DEFAULTS = 3 |
33 | 33 | OVERWRITE = 4 |
34 | ||
35 | ||
36 | class DoNotRunAgain(Exception): | |
37 | """ | |
38 | Raised from metadata reactors to indicate they can be disregarded. | |
39 | """ | |
40 | pass | |
41 | ||
42 | ||
43 | def validate_metadata(metadata, _top_level=True): | |
44 | if _top_level and not isinstance(metadata, dict): | |
45 | raise TypeError(_("metadata must be a dict")) | |
46 | if isinstance(metadata, dict): | |
47 | for key, value in metadata.items(): | |
48 | if not isinstance(key, text_type): | |
49 | raise TypeError(_("metadata keys must be str, not: {}").format(repr(key))) | |
50 | validate_metadata(value, _top_level=False) | |
51 | elif isinstance(metadata, (tuple, list, set)): | |
52 | for value in metadata: | |
53 | validate_metadata(value, _top_level=False) | |
54 | elif not isinstance(metadata, METADATA_TYPES): | |
55 | raise TypeError(_("illegal metadata value type: {}").format(repr(metadata))) | |
34 | 56 | |
35 | 57 | |
36 | 58 | def atomic(obj): |
76 | 98 | else: |
77 | 99 | blame_dict[path] = (blame_name,) |
78 | 100 | return blame_dict |
101 | ||
102 | ||
103 | def changes_metadata(existing_metadata, new_metadata): | |
104 | """ | |
105 | Returns True if new_metadata contains any keys or values not present | |
106 | in or different from existing_metadata. | |
107 | """ | |
108 | for key, new_value in new_metadata.items(): | |
109 | if key not in existing_metadata: | |
110 | return True | |
111 | if isinstance(new_value, dict): | |
112 | if not isinstance(existing_metadata[key], dict): | |
113 | return True | |
114 | if changes_metadata(existing_metadata[key], new_value): | |
115 | return True | |
116 | if isinstance(existing_metadata[key], Fault) and isinstance(new_value, Fault): | |
117 | # Always consider Faults as equal. It would arguably be more correct to | |
118 | # always assume them to be different, but that would mean that we could | |
119 | # never do change detection between two dicts of metadata. So we have no | |
120 | # choice but to warn users in docs that Faults will always be considered | |
121 | # equal to one another. | |
122 | continue | |
123 | if new_value != existing_metadata[key]: | |
124 | return True | |
125 | return False | |
79 | 126 | |
80 | 127 | |
81 | 128 | def check_metadata_keys(node): |
357 | 404 | raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) |
358 | 405 | |
359 | 406 | |
407 | def metadata_to_json(metadata): | |
408 | return dumps( | |
409 | metadata, | |
410 | cls=MetadataJSONEncoder, | |
411 | indent=4, | |
412 | sort_keys=True, | |
413 | ) | |
414 | ||
415 | ||
360 | 416 | def hash_metadata(sdict): |
361 | 417 | """ |
362 | 418 | Returns a canonical SHA1 hash to describe this dict. |
363 | 419 | """ |
364 | return sha1(dumps( | |
365 | sdict, | |
366 | cls=MetadataJSONEncoder, | |
367 | indent=None, | |
368 | sort_keys=True, | |
369 | ).encode('utf-8')).hexdigest() | |
420 | return sha1(metadata_to_json(sdict).encode('utf-8')).hexdigest() |
29 | 29 | from .metadata import hash_metadata |
30 | 30 | from .utils import cached_property, names |
31 | 31 | from .utils.dicts import hash_statedict |
32 | from .utils.metastack import Metastack | |
32 | 33 | from .utils.text import ( |
33 | 34 | blue, |
34 | 35 | bold, |
703 | 704 | return hash_metadata(self.metadata) |
704 | 705 | |
705 | 706 | @property |
706 | def metadata_processors(self): | |
707 | def metadata_defaults(self): | |
708 | return self._metadata_processors[0] | |
709 | ||
710 | @property | |
711 | def _metadata_processors(self): | |
712 | def tuple_with_name(kind, bundle, metadata_processor): | |
713 | return ( | |
714 | "{}:{}.{}".format( | |
715 | kind, | |
716 | bundle.name, | |
717 | metadata_processor.__name__, | |
718 | ), | |
719 | metadata_processor, | |
720 | ) | |
721 | ||
722 | defaults = [] | |
723 | reactors = set() | |
724 | classic_metaprocs = set() | |
725 | ||
707 | 726 | for bundle in self.bundles: |
708 | for metadata_processor in bundle.metadata_processors: | |
709 | yield ( | |
710 | "{}.{}".format( | |
711 | bundle.name, | |
712 | metadata_processor.__name__, | |
713 | ), | |
714 | metadata_processor, | |
715 | ) | |
727 | if bundle._metadata_processors[0]: | |
728 | defaults.append(( | |
729 | "metadata_defaults:{}".format(bundle.name), | |
730 | bundle._metadata_processors[0], | |
731 | )) | |
732 | for reactor in bundle._metadata_processors[1]: | |
733 | reactors.add(tuple_with_name("metadata_reactor", bundle, reactor)) | |
734 | for classic_metaproc in bundle._metadata_processors[2]: | |
735 | classic_metaprocs.add(tuple_with_name("metadata_processor", bundle, classic_metaproc)) | |
736 | ||
737 | return defaults, reactors, classic_metaprocs | |
738 | ||
739 | @property | |
740 | def metadata_reactors(self): | |
741 | return self._metadata_processors[1] | |
716 | 742 | |
717 | 743 | @property |
718 | 744 | def partial_metadata(self): |
725 | 751 | because they will be fed all metadata updates until no more |
726 | 752 | changes are made by any metadata processor. |
727 | 753 | """ |
728 | return self.repo._metadata_for_node(self.name, partial=True) | |
754 | ||
755 | partial = self.repo._metadata_for_node(self.name, partial=True) | |
756 | ||
757 | # TODO remove this mechanism in bw 4.0, always return Metastacks | |
758 | if self.repo._in_new_metareactor: | |
759 | return Metastack(partial) | |
760 | else: | |
761 | return partial | |
729 | 762 | |
730 | 763 | def run(self, command, data_stdin=None, may_fail=False, log_output=False): |
731 | 764 | assert self.os in self.OS_FAMILY_UNIX |
2 | 2 | |
3 | 3 | from imp import load_source |
4 | 4 | from inspect import isabstract |
5 | from os import listdir, mkdir | |
6 | from os.path import isdir, isfile, join | |
5 | from os import environ, listdir, mkdir, walk | |
6 | from os.path import abspath, dirname, isdir, isfile, join | |
7 | 7 | from threading import Lock |
8 | 8 | |
9 | 9 | from pkg_resources import DistributionNotFound, require, VersionConflict |
10 | 10 | |
11 | from . import items, utils, VERSION_STRING | |
11 | from . import items, VERSION_STRING | |
12 | 12 | from .bundle import FILENAME_BUNDLE |
13 | 13 | from .exceptions import ( |
14 | 14 | NoSuchGroup, |
20 | 20 | from .group import Group |
21 | 21 | from .metadata import ( |
22 | 22 | blame_changed_paths, |
23 | changes_metadata, | |
23 | 24 | check_metadata_processor_result, |
24 | 25 | deepcopy_metadata, |
25 | 26 | DEFAULTS, |
26 | 27 | DONE, |
27 | 28 | OVERWRITE, |
29 | DoNotRunAgain, | |
28 | 30 | ) |
29 | 31 | from .node import _flatten_group_hierarchy, Node |
30 | 32 | from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy |
31 | from .utils import cached_property, names | |
33 | from .utils import cached_property, get_file_contents, names | |
32 | 34 | from .utils.scm import get_git_branch, get_git_clean, get_rev |
33 | 35 | from .utils.dicts import hash_statedict, merge_dict |
36 | from .utils.metastack import Metastack | |
34 | 37 | from .utils.text import bold, mark_for_translation as _, red, validate_name |
35 | 38 | from .utils.ui import io, QUIT_EVENT |
36 | 39 | |
42 | 45 | FILENAME_GROUPS = "groups.py" |
43 | 46 | FILENAME_NODES = "nodes.py" |
44 | 47 | FILENAME_REQUIREMENTS = "requirements.txt" |
48 | MAX_METADATA_ITERATIONS = int(environ.get("BW_MAX_METADATA_ITERATIONS", "100")) | |
45 | 49 | |
46 | 50 | HOOK_EVENTS = ( |
47 | 51 | 'action_run_end', |
97 | 101 | } |
98 | 102 | |
99 | 103 | |
100 | def groups_from_file(filepath, libs, repo_path, vault): | |
101 | """ | |
102 | Returns all groups as defined in the given groups.py. | |
103 | """ | |
104 | try: | |
105 | flat_group_dict = utils.getattr_from_file( | |
106 | filepath, | |
107 | 'groups', | |
108 | base_env={ | |
109 | 'libs': libs, | |
110 | 'repo_path': repo_path, | |
111 | 'vault': vault, | |
112 | }, | |
113 | ) | |
114 | except KeyError: | |
115 | raise RepositoryError(_( | |
116 | "{} must define a 'groups' variable" | |
117 | ).format(filepath)) | |
118 | for groupname, infodict in flat_group_dict.items(): | |
119 | yield Group(groupname, infodict) | |
120 | ||
121 | ||
122 | 104 | class HooksProxy(object): |
123 | def __init__(self, path): | |
105 | def __init__(self, repo, path): | |
106 | self.repo = repo | |
124 | 107 | self.__hook_cache = {} |
125 | 108 | self.__module_cache = {} |
126 | 109 | self.__path = path |
170 | 153 | continue |
171 | 154 | self.__module_cache[filename] = {} |
172 | 155 | self.__registered_hooks[filename] = [] |
173 | for name, obj in utils.get_all_attrs_from_file(filepath).items(): | |
156 | for name, obj in self.repo.get_all_attrs_from_file(filepath).items(): | |
174 | 157 | if name not in HOOK_EVENTS: |
175 | 158 | continue |
176 | 159 | self.__module_cache[filename][name] = obj |
177 | 160 | self.__registered_hooks[filename].append(name) |
178 | ||
179 | ||
180 | def items_from_path(path): | |
181 | """ | |
182 | Looks for Item subclasses in the given path. | |
183 | ||
184 | An alternative method would involve metaclasses (as Django | |
185 | does it), but then it gets very hard to have two separate repos | |
186 | in the same process, because both of them would register config | |
187 | item classes globally. | |
188 | """ | |
189 | if not isdir(path): | |
190 | return | |
191 | for filename in listdir(path): | |
192 | filepath = join(path, filename) | |
193 | if not filename.endswith(".py") or \ | |
194 | not isfile(filepath) or \ | |
195 | filename.startswith("_"): | |
196 | continue | |
197 | for name, obj in \ | |
198 | utils.get_all_attrs_from_file(filepath).items(): | |
199 | if obj == items.Item or name.startswith("_"): | |
200 | continue | |
201 | try: | |
202 | if issubclass(obj, items.Item) and not isabstract(obj): | |
203 | yield obj | |
204 | except TypeError: | |
205 | pass | |
206 | 161 | |
207 | 162 | |
208 | 163 | class LibsProxy(object): |
225 | 180 | return self.__module_cache[attrname] |
226 | 181 | |
227 | 182 | |
228 | def nodes_from_file(filepath, libs, repo_path, vault): | |
229 | """ | |
230 | Returns a list of nodes as defined in the given nodes.py. | |
231 | """ | |
232 | try: | |
233 | flat_node_dict = utils.getattr_from_file( | |
234 | filepath, | |
235 | 'nodes', | |
236 | base_env={ | |
237 | 'libs': libs, | |
238 | 'repo_path': repo_path, | |
239 | 'vault': vault, | |
240 | }, | |
241 | ) | |
242 | except KeyError: | |
243 | raise RepositoryError( | |
244 | _("{} must define a 'nodes' variable").format(filepath) | |
245 | ) | |
246 | for nodename, infodict in flat_node_dict.items(): | |
247 | yield Node(nodename, infodict) | |
248 | ||
249 | ||
250 | 183 | class Repository(object): |
251 | 184 | def __init__(self, repo_path=None): |
252 | self.path = "/dev/null" if repo_path is None else repo_path | |
185 | if repo_path is None: | |
186 | self.path = "/dev/null" | |
187 | else: | |
188 | self.path = self._discover_root_path(abspath(repo_path)) | |
253 | 189 | |
254 | 190 | self._set_path(self.path) |
255 | 191 | |
256 | 192 | self.bundle_names = [] |
257 | 193 | self.group_dict = {} |
258 | 194 | self.node_dict = {} |
195 | self._get_all_attr_code_cache = {} | |
196 | self._get_all_attr_result_cache = {} | |
259 | 197 | self._node_metadata_blame = {} |
260 | 198 | self._node_metadata_complete = {} |
261 | 199 | self._node_metadata_partial = {} |
263 | 201 | self._node_metadata_lock = Lock() |
264 | 202 | |
265 | 203 | if repo_path is not None: |
266 | self.populate_from_path(repo_path) | |
204 | self.populate_from_path(self.path) | |
267 | 205 | else: |
268 | self.item_classes = list(items_from_path(items.__path__[0])) | |
206 | self.item_classes = list(self.items_from_dir(items.__path__[0])) | |
269 | 207 | |
270 | 208 | def __eq__(self, other): |
271 | 209 | if self.path == "/dev/null": |
293 | 231 | """ |
294 | 232 | Adds the given group object to this repo. |
295 | 233 | """ |
296 | if group.name in utils.names(self.nodes): | |
234 | if group.name in names(self.nodes): | |
297 | 235 | raise RepositoryError(_("you cannot have a node and a group " |
298 | 236 | "both named '{}'").format(group.name)) |
299 | if group.name in utils.names(self.groups): | |
237 | if group.name in names(self.groups): | |
300 | 238 | raise RepositoryError(_("you cannot have two groups " |
301 | 239 | "both named '{}'").format(group.name)) |
302 | 240 | group.repo = self |
306 | 244 | """ |
307 | 245 | Adds the given node object to this repo. |
308 | 246 | """ |
309 | if node.name in utils.names(self.groups): | |
247 | if node.name in names(self.groups): | |
310 | 248 | raise RepositoryError(_("you cannot have a node and a group " |
311 | 249 | "both named '{}'").format(node.name)) |
312 | if node.name in utils.names(self.nodes): | |
250 | if node.name in names(self.nodes): | |
313 | 251 | raise RepositoryError(_("you cannot have two nodes " |
314 | 252 | "both named '{}'").format(node.name)) |
315 | 253 | |
371 | 309 | node = Node(node_name) |
372 | 310 | self.add_node(node) |
373 | 311 | return node |
312 | ||
313 | def get_all_attrs_from_file(self, path, base_env=None): | |
314 | """ | |
315 | Reads all 'attributes' (if it were a module) from a source file. | |
316 | """ | |
317 | if base_env is None: | |
318 | base_env = {} | |
319 | ||
320 | if not base_env and path in self._get_all_attr_result_cache: | |
321 | # do not allow caching when passing in a base env because that | |
322 | # breaks repeated calls with different base envs for the same | |
323 | # file | |
324 | return self._get_all_attr_result_cache[path] | |
325 | ||
326 | if path not in self._get_all_attr_code_cache: | |
327 | source = get_file_contents(path) | |
328 | self._get_all_attr_code_cache[path] = \ | |
329 | compile(source, path, mode='exec') | |
330 | ||
331 | code = self._get_all_attr_code_cache[path] | |
332 | env = base_env.copy() | |
333 | try: | |
334 | exec(code, env) | |
335 | except: | |
336 | io.stderr("Exception while executing {}".format(path)) | |
337 | raise | |
338 | ||
339 | if not base_env: | |
340 | self._get_all_attr_result_cache[path] = env | |
341 | ||
342 | return env | |
343 | ||
344 | def nodes_or_groups_from_file(self, path, attribute): | |
345 | try: | |
346 | flat_dict = self.get_all_attrs_from_file( | |
347 | path, | |
348 | base_env={ | |
349 | 'libs': self.libs, | |
350 | 'repo_path': self.path, | |
351 | 'vault': self.vault, | |
352 | }, | |
353 | )[attribute] | |
354 | except KeyError: | |
355 | raise RepositoryError(_( | |
356 | "{} must define a '{}' variable" | |
357 | ).format(path, attribute)) | |
358 | for name, infodict in flat_dict.items(): | |
359 | yield (name, infodict) | |
360 | ||
361 | def items_from_dir(self, path): | |
362 | """ | |
363 | Looks for Item subclasses in the given path. | |
364 | ||
365 | An alternative method would involve metaclasses (as Django | |
366 | does it), but then it gets very hard to have two separate repos | |
367 | in the same process, because both of them would register config | |
368 | item classes globally. | |
369 | """ | |
370 | if not isdir(path): | |
371 | return | |
372 | for root_dir, _dirs, files in walk(path): | |
373 | for filename in files: | |
374 | filepath = join(root_dir, filename) | |
375 | if not filename.endswith(".py") or \ | |
376 | not isfile(filepath) or \ | |
377 | filename.startswith("_"): | |
378 | continue | |
379 | for name, obj in self.get_all_attrs_from_file(filepath).items(): | |
380 | if obj == items.Item or name.startswith("_"): | |
381 | continue | |
382 | try: | |
383 | if issubclass(obj, items.Item) and not isabstract(obj): | |
384 | yield obj | |
385 | except TypeError: | |
386 | pass | |
387 | ||
388 | def _discover_root_path(self, path): | |
389 | while True: | |
390 | if self.is_repo(path): | |
391 | return path | |
392 | ||
393 | previous_component = dirname(path) | |
394 | if path == previous_component: | |
395 | raise NoSuchRepository | |
396 | ||
397 | path = previous_component | |
374 | 398 | |
375 | 399 | def get_group(self, group_name): |
376 | 400 | try: |
477 | 501 | Builds complete metadata for all nodes that appear in |
478 | 502 | self._node_metadata_partial.keys(). |
479 | 503 | """ |
504 | # TODO remove this mechanism in bw 4.0 | |
505 | self._in_new_metareactor = False | |
506 | ||
480 | 507 | # these processors have indicated that they do not need to be run again |
481 | 508 | blacklisted_metaprocs = set() |
509 | ||
510 | keyerrors = {} | |
511 | ||
512 | iterations = 0 | |
513 | reactors_that_returned_something_in_last_iteration = set() | |
482 | 514 | while not QUIT_EVENT.is_set(): |
515 | iterations += 1 | |
516 | if iterations > MAX_METADATA_ITERATIONS: | |
517 | proclist = "" | |
518 | for node, metaproc in sorted(reactors_that_returned_something_in_last_iteration): | |
519 | proclist += node + " " + metaproc + "\n" | |
520 | raise ValueError(_( | |
521 | "Infinite loop detected between these metadata reactors:\n" | |
522 | ) + proclist) | |
523 | ||
483 | 524 | # First, get the static metadata out of the way |
484 | 525 | for node_name in list(self._node_metadata_partial): |
485 | 526 | if QUIT_EVENT.is_set(): |
489 | 530 | # check if static metadata for this node is already done |
490 | 531 | if node_name in self._node_metadata_static_complete: |
491 | 532 | continue |
492 | else: | |
493 | self._node_metadata_static_complete.add(node_name) | |
494 | 533 | |
495 | 534 | with io.job(_("{node} building group metadata").format(node=bold(node.name))): |
496 | 535 | group_order = _flatten_group_hierarchy(node.groups) |
531 | 570 | ) |
532 | 571 | self._node_metadata_partial[node.name] = new_metadata |
533 | 572 | |
573 | # At this point, static metadata from groups and nodes has been merged. | |
574 | # Next, we look at defaults from metadata.py. | |
575 | ||
576 | for node_name in list(self._node_metadata_partial): | |
577 | # check if static metadata for this node is already done | |
578 | if node_name in self._node_metadata_static_complete: | |
579 | continue | |
580 | ||
581 | node_blame = self._node_metadata_blame[node_name] | |
582 | with io.job(_("{node} running metadata defaults").format(node=bold(node.name))): | |
583 | for defaults_name, defaults in node.metadata_defaults: | |
584 | if blame: | |
585 | blame_changed_paths( | |
586 | self._node_metadata_partial[node.name], | |
587 | defaults, | |
588 | node_blame, | |
589 | defaults_name, | |
590 | defaults=True, | |
591 | ) | |
592 | self._node_metadata_partial[node.name] = merge_dict( | |
593 | defaults, | |
594 | self._node_metadata_partial[node.name], | |
595 | ) | |
596 | ||
597 | # This will ensure node/group metadata and defaults are | |
598 | # skipped over in future iterations. | |
599 | self._node_metadata_static_complete.add(node_name) | |
600 | ||
601 | # TODO remove this in 4.0 | |
534 | 602 | # Now for the interesting part: We run all metadata processors |
535 | 603 | # until none of them return DONE anymore (indicating that they're |
536 | 604 | # just waiting for another metaproc to maybe insert new data, |
537 | 605 | # which isn't happening if none return DONE) |
538 | 606 | metaproc_returned_DONE = False |
607 | ||
608 | # Now for the interesting part: We run all metadata reactors | |
609 | # until none of them return changed metadata anymore. | |
610 | reactor_returned_changed_metadata = False | |
611 | reactors_that_returned_something_in_last_iteration = set() | |
612 | ||
539 | 613 | for node_name in list(self._node_metadata_partial): |
540 | 614 | if QUIT_EVENT.is_set(): |
541 | 615 | break |
542 | 616 | node = self.get_node(node_name) |
543 | 617 | node_blame = self._node_metadata_blame[node_name] |
618 | ||
619 | with io.job(_("{node} running metadata reactors").format(node=bold(node.name))): | |
620 | # TODO remove this mechanism in bw 4.0 | |
621 | self._in_new_metareactor = True | |
622 | ||
623 | for metadata_reactor_name, metadata_reactor in node.metadata_reactors: | |
624 | if (node_name, metadata_reactor_name) in blacklisted_metaprocs: | |
625 | continue | |
626 | io.debug(_( | |
627 | "running metadata reactor {metaproc} for node {node}" | |
628 | ).format( | |
629 | metaproc=metadata_reactor_name, | |
630 | node=node.name, | |
631 | )) | |
632 | if blame: | |
633 | # We need to deepcopy here because otherwise we have no chance of | |
634 | # figuring out what changed... | |
635 | input_metadata = deepcopy_metadata( | |
636 | self._node_metadata_partial[node.name] | |
637 | ) | |
638 | else: | |
639 | # ...but we can't always do it for performance reasons. | |
640 | input_metadata = self._node_metadata_partial[node.name] | |
641 | try: | |
642 | stack = Metastack() | |
643 | stack._set_layer("flattened", input_metadata) | |
644 | new_metadata = metadata_reactor(stack) | |
645 | except KeyError as exc: | |
646 | keyerrors[(node_name, metadata_reactor_name)] = exc | |
647 | except DoNotRunAgain: | |
648 | blacklisted_metaprocs.add((node_name, metadata_reactor_name)) | |
649 | except Exception as exc: | |
650 | io.stderr(_( | |
651 | "{x} Exception while executing metadata reactor " | |
652 | "{metaproc} for node {node}:" | |
653 | ).format( | |
654 | x=red("!!!"), | |
655 | metaproc=metadata_reactor_name, | |
656 | node=node.name, | |
657 | )) | |
658 | raise exc | |
659 | else: | |
660 | # reactor terminated normally, clear any previously stored exception | |
661 | try: | |
662 | del keyerrors[(node_name, metadata_reactor_name)] | |
663 | except KeyError: | |
664 | pass | |
665 | reactors_that_returned_something_in_last_iteration.add( | |
666 | (node_name, metadata_reactor_name), | |
667 | ) | |
668 | if not reactor_returned_changed_metadata: | |
669 | reactor_returned_changed_metadata = changes_metadata( | |
670 | self._node_metadata_partial[node.name], | |
671 | new_metadata, | |
672 | ) | |
673 | ||
674 | if blame: | |
675 | blame_changed_paths( | |
676 | self._node_metadata_partial[node.name], | |
677 | new_metadata, | |
678 | node_blame, | |
679 | "metadata_reactor:{}".format(metadata_reactor_name), | |
680 | ) | |
681 | self._node_metadata_partial[node.name] = merge_dict( | |
682 | self._node_metadata_partial[node.name], | |
683 | new_metadata, | |
684 | ) | |
685 | ||
686 | # TODO remove this mechanism in bw 4.0 | |
687 | self._in_new_metareactor = False | |
688 | ||
689 | ### TODO remove this block in 4.0 BEGIN | |
544 | 690 | with io.job(_("{node} running metadata processors").format(node=bold(node.name))): |
545 | for metadata_processor_name, metadata_processor in node.metadata_processors: | |
691 | for metadata_processor_name, metadata_processor in node._metadata_processors[2]: | |
546 | 692 | if (node_name, metadata_processor_name) in blacklisted_metaprocs: |
547 | 693 | continue |
548 | 694 | io.debug(_( |
618 | 764 | ) |
619 | 765 | |
620 | 766 | self._node_metadata_partial[node.name] = processed_dict |
621 | ||
622 | if not metaproc_returned_DONE: | |
767 | ### TODO remove this block in 4.0 END | |
768 | ||
769 | if not metaproc_returned_DONE and not reactor_returned_changed_metadata: | |
623 | 770 | if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()): |
624 | # During metadata processor execution, partial metadata may | |
771 | # During metadata reactor execution, partial metadata may | |
625 | 772 | # have been requested for nodes we did not previously |
626 | # consider. Since partial metadata may defaults to | |
773 | # consider. Since partial metadata may default to | |
627 | 774 | # just an empty dict, we still need to make sure to |
628 | 775 | # generate static metadata for these new nodes, as |
629 | 776 | # that may trigger additional runs of metadata |
630 | # processors. | |
777 | # reactors. | |
631 | 778 | continue |
632 | 779 | else: |
633 | 780 | break |
781 | ||
782 | if keyerrors: | |
783 | reactors = "" | |
784 | for source, exc in keyerrors.items(): | |
785 | node_name, reactor = source | |
786 | reactors += "{} {} {}\n".format(node_name, reactor, exc) | |
787 | raise ValueError(_( | |
788 | "These metadata reactors raised a KeyError " | |
789 | "even after all other reactors were done:\n" | |
790 | ) + reactors) | |
634 | 791 | |
635 | 792 | def metadata_hash(self): |
636 | 793 | repo_dict = {} |
687 | 844 | |
688 | 845 | # populate groups |
689 | 846 | self.group_dict = {} |
690 | for group in groups_from_file(self.groups_file, self.libs, self.path, self.vault): | |
691 | self.add_group(group) | |
847 | for group in self.nodes_or_groups_from_file(self.groups_file, 'groups'): | |
848 | self.add_group(Group(*group)) | |
692 | 849 | |
693 | 850 | # populate items |
694 | self.item_classes = list(items_from_path(items.__path__[0])) | |
695 | for item_class in items_from_path(self.items_dir): | |
851 | self.item_classes = list(self.items_from_dir(items.__path__[0])) | |
852 | for item_class in self.items_from_dir(self.items_dir): | |
696 | 853 | self.item_classes.append(item_class) |
697 | 854 | |
698 | 855 | # populate nodes |
699 | 856 | self.node_dict = {} |
700 | for node in nodes_from_file(self.nodes_file, self.libs, self.path, self.vault): | |
701 | self.add_node(node) | |
702 | ||
703 | @utils.cached_property | |
857 | for node in self.nodes_or_groups_from_file(self.nodes_file, 'nodes'): | |
858 | self.add_node(Node(*node)) | |
859 | ||
860 | @cached_property | |
704 | 861 | def revision(self): |
705 | 862 | return get_rev() |
706 | 863 | |
714 | 871 | self.libs_dir = join(self.path, DIRNAME_LIBS) |
715 | 872 | self.nodes_file = join(self.path, FILENAME_NODES) |
716 | 873 | |
717 | self.hooks = HooksProxy(self.hooks_dir) | |
874 | self.hooks = HooksProxy(self, self.hooks_dir) | |
718 | 875 | self.libs = LibsProxy(self.libs_dir) |
162 | 162 | return content |
163 | 163 | |
164 | 164 | |
165 | def get_all_attrs_from_file(path, base_env=None): | |
166 | """ | |
167 | Reads all 'attributes' (if it were a module) from a source file. | |
168 | """ | |
169 | if base_env is None: | |
170 | base_env = {} | |
171 | ||
172 | if not base_env and path in __GETATTR_RESULT_CACHE: | |
173 | # do not allow caching when passing in a base env because that | |
174 | # breaks repeated calls with different base envs for the same | |
175 | # file | |
176 | return __GETATTR_RESULT_CACHE[path] | |
177 | ||
178 | if path not in __GETATTR_CODE_CACHE: | |
179 | source = get_file_contents(path) | |
180 | __GETATTR_CODE_CACHE[path] = compile(source, path, mode='exec') | |
181 | ||
182 | code = __GETATTR_CODE_CACHE[path] | |
183 | env = base_env.copy() | |
184 | try: | |
185 | exec(code, env) | |
186 | except: | |
187 | from .ui import io | |
188 | io.stderr("Exception while executing {}".format(path)) | |
189 | raise | |
190 | ||
191 | if not base_env: | |
192 | __GETATTR_RESULT_CACHE[path] = env | |
193 | ||
194 | return env | |
195 | ||
196 | ||
197 | def getattr_from_file(path, attrname, base_env=None, default=__GETATTR_NODEFAULT): | |
198 | """ | |
199 | Reads a specific 'attribute' (if it were a module) from a source | |
200 | file. | |
201 | """ | |
202 | env = get_all_attrs_from_file(path, base_env=base_env) | |
203 | if default == __GETATTR_NODEFAULT: | |
204 | return env[attrname] | |
205 | else: | |
206 | return env.get(attrname, default) | |
207 | ||
208 | ||
209 | 165 | def hash_local_file(path): |
210 | 166 | """ |
211 | 167 | Retuns the sha1 hash of a file on the local machine. |
15 | 15 | except NameError: |
16 | 16 | text_type = str |
17 | 17 | byte_type = bytes |
18 | ||
19 | try: | |
20 | from types import MappingProxyType | |
21 | except ImportError: | |
22 | # XXX Not available in Python 2, but that's EOL anyway and we're | |
23 | # going to drop support for it very soon. The following at least | |
24 | # creates a new object, so updates to it will not be persistent. | |
25 | MappingProxyType = dict | |
18 | 26 | |
19 | 27 | DIFF_MAX_INLINE_LENGTH = 36 |
20 | 28 | DIFF_MAX_LINE_LENGTH = 1024 |
130 | 138 | green(value2), |
131 | 139 | ) |
132 | 140 | output = bold(title) + "\n" |
133 | for line in unified_diff( | |
141 | for line in tuple(unified_diff( | |
134 | 142 | value1.splitlines(True), |
135 | 143 | value2.splitlines(True), |
136 | fromfile=_("<node>"), | |
137 | tofile=_("<bundlewrap>"), | |
138 | ): | |
144 | ))[2:]: | |
139 | 145 | suffix = "" |
140 | 146 | if len(line) > DIFF_MAX_LINE_LENGTH: |
141 | 147 | suffix += _(" (line truncated after {} characters)").format(DIFF_MAX_LINE_LENGTH) |
180 | 186 | return sorted(obj) |
181 | 187 | else: |
182 | 188 | return JSONEncoder.default(self, obj) |
189 | ||
190 | ||
191 | def freeze_object(obj): | |
192 | """ | |
193 | Returns a read-only version of the given object (if possible). | |
194 | """ | |
195 | if isinstance(obj, dict): | |
196 | keys = set(obj.keys()) | |
197 | for k in keys: | |
198 | obj[k] = freeze_object(obj[k]) | |
199 | return MappingProxyType(obj) | |
200 | elif isinstance(obj, (list, tuple)): | |
201 | result = [] | |
202 | for i in obj: | |
203 | result.append(freeze_object(i)) | |
204 | return tuple(result) | |
205 | elif isinstance(obj, set): | |
206 | result = set() | |
207 | for i in obj: | |
208 | result.add(freeze_object(i)) | |
209 | return frozenset(obj) | |
210 | else: | |
211 | return obj | |
183 | 212 | |
184 | 213 | |
185 | 214 | def hash_statedict(sdict): |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from collections import OrderedDict | |
4 | from sys import version_info | |
5 | ||
6 | from ..metadata import validate_metadata, value_at_key_path | |
7 | from .dicts import freeze_object, map_dict_keys, merge_dict | |
8 | ||
9 | ||
10 | _NO_DEFAULT = "<NO METASTACK DEFAULT PROVIDED>" | |
11 | ||
12 | ||
13 | class Metastack: | |
14 | """ | |
15 | Holds a number of metadata layers. When laid on top of one another, | |
16 | these layers form complete metadata for a node. Each layer comes | |
17 | from one particular source of metadata: a bundle default, a group, | |
18 | the node itself, or a metadata reactor. Metadata reactors are unique | |
19 | in their ability to revise their own layer each time they are run. | |
20 | """ | |
21 | def __init__(self): | |
22 | # We rely heavily on insertion order in this dict. | |
23 | if version_info < (3, 7): | |
24 | self._layers = OrderedDict() | |
25 | else: | |
26 | self._layers = {} | |
27 | ||
28 | def get(self, path, default=_NO_DEFAULT): | |
29 | """ | |
30 | Get the value at the given path, merging all layers together. | |
31 | Path may either be string like | |
32 | 'foo/bar' | |
33 | accessing the 'bar' key in the dict at the 'foo' key | |
34 | or a tuple like | |
35 | ('fo/o', 'bar') | |
36 | accessing the 'bar' key in the dict at the 'fo/o' key. | |
37 | """ | |
38 | if not isinstance(path, (tuple, list)): | |
39 | path = path.split('/') | |
40 | ||
41 | result = None | |
42 | undef = True | |
43 | ||
44 | for layer in self._layers.values(): | |
45 | try: | |
46 | value = value_at_key_path(layer, path) | |
47 | except KeyError: | |
48 | pass | |
49 | else: | |
50 | if undef: | |
51 | # First time we see anything. | |
52 | result = {'data': value} | |
53 | undef = False | |
54 | else: | |
55 | result = merge_dict(result, {'data': value}) | |
56 | ||
57 | if undef: | |
58 | if default != _NO_DEFAULT: | |
59 | return default | |
60 | else: | |
61 | raise KeyError('/'.join(path)) | |
62 | else: | |
63 | return freeze_object(result['data']) | |
64 | ||
65 | def _as_dict(self): | |
66 | final_dict = {} | |
67 | ||
68 | for layer in self._layers.values(): | |
69 | final_dict = merge_dict(final_dict, layer) | |
70 | ||
71 | return final_dict | |
72 | ||
73 | def _as_blame(self): | |
74 | keymap = map_dict_keys(self._as_dict()) | |
75 | blame = {} | |
76 | for path in keymap: | |
77 | for identifier, layer in self._layers.items(): | |
78 | try: | |
79 | value_at_key_path(layer, path) | |
80 | except KeyError: | |
81 | pass | |
82 | else: | |
83 | blame.setdefault(path, []).append(identifier) | |
84 | return blame | |
85 | ||
86 | def _set_layer(self, identifier, new_layer): | |
87 | # Marked with an underscore because only the internal metadata | |
88 | # reactor routing is supposed to call this method. | |
89 | validate_metadata(new_layer) | |
90 | changed = self._layers.get(identifier, {}) != new_layer | |
91 | self._layers[identifier] = new_layer | |
92 | return changed |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | from pipes import quote | |
3 | 4 | from subprocess import CalledProcessError, check_output, STDOUT |
5 | ||
6 | from .text import mark_for_translation as _ | |
4 | 7 | |
5 | 8 | |
6 | 9 | def get_git_branch(): |
64 | 67 | if rev is not None: |
65 | 68 | return rev |
66 | 69 | return None |
70 | ||
71 | ||
72 | def set_git_rev(rev, detach=False): | |
73 | if not get_git_clean(): | |
74 | raise RuntimeError(_("git working dir not clean, won't change rev")) | |
75 | if detach: | |
76 | command = "git checkout --detach {}".format(quote(rev)) | |
77 | else: | |
78 | command = "git checkout {}".format(quote(rev)) | |
79 | check_output( | |
80 | command, | |
81 | shell=True, | |
82 | stderr=STDOUT, | |
83 | ) |
0 | bundlewrap (3.10.0-1) unstable; urgency=medium | |
1 | ||
2 | * New upstream release | |
3 | * Add python3-jinja2 and python3-mako to build-depends (for tests) | |
4 | ||
5 | -- Jonathan Carter <jcc@debian.org> Mon, 18 May 2020 11:39:02 +0200 | |
6 | ||
0 | 7 | bundlewrap (3.9.0-1) unstable; urgency=medium |
1 | 8 | |
2 | 9 | * New upstream release |
3 | 3 | Maintainer: Jonathan Carter <jcc@debian.org> |
4 | 4 | Uploaders: Python Applications Packaging Team <python-apps-team@lists.alioth.debian.org> |
5 | 5 | Build-Depends: debhelper-compat (= 13), |
6 | dh-python, | |
7 | python3-minimal, | |
8 | python3, | |
9 | python3-setuptools, | |
10 | python3-requests, | |
11 | python3-cryptography | |
6 | dh-python, | |
7 | python3, | |
8 | python3-cryptography, | |
9 | python3-jinja2, | |
10 | python3-mako, | |
11 | python3-minimal, | |
12 | python3-requests, | |
13 | python3-setuptools | |
12 | 14 | Standards-Version: 4.5.0 |
13 | 15 | Rules-Requires-Root: no |
14 | 16 | Homepage: http://bundlewrap.org/ |
17 | 19 | |
18 | 20 | Package: bundlewrap |
19 | 21 | Architecture: all |
20 | Depends: ${python3:Depends}, | |
21 | ${misc:Depends} | |
22 | Depends: ${misc:Depends}, ${python3:Depends} | |
22 | 23 | Description: Decentralized configuration management system with Python |
23 | 24 | By allowing for easy and low-overhead config management, BundleWrap fills |
24 | 25 | the gap between complex deployments using Chef or Puppet and old school |
30 | 30 | On Debian systems, the complete text of the GNU General Public |
31 | 31 | License, version 3, can be found in the file |
32 | 32 | `/usr/share/common-licenses/GPL-3'. |
33 |
28 | 28 | |
29 | 29 | ### bundlewrap.repo.Repository(path) |
30 | 30 | |
31 | The starting point of any interaction with BundleWrap. An object of this class represents the repository at the given path. | |
31 | The starting point of any interaction with BundleWrap. An object of this class represents the repository at the given path. `path` can be a subpath of your repository (e.g., `bundles/nginx/`) and will internally be resolved to the root path of said repository. | |
32 | 32 | |
33 | 33 | <br> |
34 | 34 |
45 | 45 | |
46 | 46 | <br> |
47 | 47 | |
48 | ## `BW_MAX_METADATA_ITERATIONS` | |
49 | ||
50 | Sets the limit of how often metadata reactors will be run before BundleWrap calls it a loop and terminates with an exception. Defaults to `100`. | |
51 | ||
52 | <br> | |
53 | ||
48 | 54 | ## `BW_REPO_PATH` |
49 | 55 | |
50 | 56 | Set this to a path pointing to your BundleWrap repository. If unset, the current working directory is used. Can be overridden with `bw --repository PATH`. Keep in mind that `bw` will also look for a repository in all parent directories until it finds one. |
32 | 32 | <tr><td>Cluster Role Binding</td><td>k8s_clusterrolebindings</td><td>rbac.authorization.k8s.io/v1</td></tr> |
33 | 33 | <tr><td>Config Map</td><td>k8s_configmaps</td><td>v1</td></tr> |
34 | 34 | <tr><td>Cron Job</td><td>k8s_cronjobs</td><td>batch/v1beta1</td></tr> |
35 | <tr><td>Custom Resource Definition</td><td>k8s_crd</td><td>apiextensions.k8s.io/v1beta1</td></tr> | |
36 | <tr><td>Daemon Set</td><td>k8s_daemonsets</td><td>v1</td></tr> | |
37 | <tr><td>Deployment</td><td>k8s_deployments</td><td>extensions/v1beta1</td></tr> | |
38 | <tr><td>Ingress</td><td>k8s_ingresses</td><td>extensions/v1beta1</td></tr> | |
35 | <tr><td>Custom Resource Definition</td><td>k8s_crd</td><td>apiextensions.k8s.io/v1</td></tr> | |
36 | <tr><td>Daemon Set</td><td>k8s_daemonsets</td><td>apps/v1</td></tr> | |
37 | <tr><td>Deployment</td><td>k8s_deployments</td><td>apps/v1</td></tr> | |
38 | <tr><td>Ingress</td><td>k8s_ingresses</td><td>networking.k8s.io/v1beta1</td></tr> | |
39 | 39 | <tr><td>Namespace</td><td>k8s_namespaces</td><td>v1</td></tr> |
40 | 40 | <tr><td>Network Policy</td><td>k8s_networkpolicies</td><td>networking.k8s.io/v1</td></tr> |
41 | 41 | <tr><td>Persistent Volume Claim</td><td>k8s_pvc</td><td>v1</td></tr> |
0 | 0 | # metadata.py |
1 | 1 | |
2 | Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. | |
2 | Alongside `items.py` you may create another file called `metadata.py`. It can be used to define defaults and do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. | |
3 | 3 | |
4 | This is accomplished through metadata processors. Metadata processors are functions that take the metadata dictionary generated so far as their single argument. You must then return a dictionary with any modifications you need to make plus at least one of several options: | |
5 | 4 | |
6 | @metadata_processor | |
7 | def my_metadata_processor(metadata): | |
8 | metadata["foo"] = node.name | |
9 | return metadata, DONE | |
5 | ## Defaults | |
10 | 6 | |
11 | You must always return the modified metadata dictionary as the first element. After that, there are a few options you can return. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until it indicates that it is done by returning the `DONE` option or until *all* remaining metadata processors return `RUN_ME_AGAIN`. You must always return one of `DONE` or `RUN_ME_AGAIN`. Use the latter if your metadata processor depends on metadata that is generated by another metadata processor (which may be called after yours). Here is another example: | |
7 | Let's look at defaults first: | |
12 | 8 | |
13 | @metadata_processor | |
14 | def first_metadata_processor(metadata): | |
15 | metadata["foo"] = node.name | |
16 | return metadata, DONE | |
9 | defaults = { | |
10 | "foo": 5, | |
11 | } | |
17 | 12 | |
18 | @metadata_processor | |
19 | def second_metadata_processor(metadata): | |
20 | if "foo" in metadata: | |
21 | metadata["bar"] = metadata["foo"] | |
22 | return metadata, DONE | |
13 | This will simply ensure that the `"foo"` key in metadata will always be set, but the default value of 5 can be overridden by node or group metadata or metadata reactors. | |
14 | ||
15 | ||
16 | ## Reactors | |
17 | ||
18 | So let's look at reactors next. Metadata reactors are functions that take the metadata generated so far as their single argument. You must then return a new dictionary with any metadata you wish to have added: | |
19 | ||
20 | @metadata_reactor | |
21 | def bar(metadata): | |
22 | return { | |
23 | "bar": metadata.get("foo"), | |
24 | } | |
25 | ||
26 | While this looks simple enough, there are some important caveats. First and foremost: Metadata reactors must assume to be called many times. This is to give you an opportunity to react to metadata provided by other reactors. All reactors will be run again and again until none of them return any changed metadata. Anything you return from a reactor will overwrite existing metadata. | |
27 | ||
28 | The parameter `metadata` is not a dictionary but an instance of `Metastack`. You cannot modify the contents of this object. It provides `.get("some/path", "default")` to query a key path (equivalent to `metadata["some"]["path"]` in a dict) and accepts an optional default value. It will raise a `KeyError` when called for a non-existant path without a default. | |
29 | ||
30 | While node and group metadata and metadata defaults will always be available to reactors, you should not rely on that for the simple reason that you may one day move some metadata from those static sources into another reactor, which may be run later. Thus you may need to wait for some iterations before that data shows up in `metadata`. Note that BundleWrap will catch any `KeyError`s raised in metadata reactors and only report them if they don't go away after all other relevant reactors are done. | |
31 | ||
32 | To avoid deadlocks when accessing *other* nodes' metadata from within a metadata reactor, use `other_node.partial_metadata` instead of `other_node.metadata`. For the same reason, always use the `metadata` parameter to access the current node's metadata, never `node.metadata`. | |
33 | ||
34 | <div class="alert alert-danger">Be careful when returning <a href="../../guide/api#bundlewraputilsfault">Fault</a> objects from reactors. <strong>All</strong> Fault objects (including those returned from <code>repo.vault.*</code>) will be considered <strong>equal</strong> to one another when BundleWrap inspects the returned metadata to check if anything changed compared to what was returned in an earlier iteration.</div> | |
35 | ||
36 | ||
37 | ### DoNotRunAgain | |
38 | ||
39 | On the other hand, if your reactor only needs to provide new metadata in *some* cases, you can tell BundleWrap to not run it again to save some performance: | |
40 | ||
41 | @metadata_reactor | |
42 | def foo(metadata): | |
43 | if node.has_bundle("bar"): | |
44 | return {"bar": metadata.get("foo") + 1} | |
23 | 45 | else: |
24 | return metadata, RUN_ME_AGAIN | |
46 | raise DoNotRunAgain | |
25 | 47 | |
26 | In this example, `"bar"` can only be set once `"foo"` is available and thus the `second_metadata_processor` has to wait and request to `RUN_ME_AGAIN` until `first_metadata_processor` ran. This is necessary because the running order of metadata processors is undefined. | |
27 | 48 | |
28 | <div class="alert alert-danger">To avoid deadlocks when accessing <strong>other</strong> nodes' metadata from within a metadata processor, use <code>other_node.partial_metadata</code> instead of <code>other_node.metadata</code>. For the same reason, always use the <code>metadata</code> parameter to access the current node's metadata, never <code>node.metadata</code>.</div> | |
29 | ||
30 | <br> | |
31 | ||
32 | ## Available options | |
33 | ||
34 | <table> | |
35 | <tr><th>Option</th><th>Description</th></tr> | |
36 | <tr><td><code>DONE</code></td><td>Indicates that this metadata processor has done all it can and need not be called again. Return this whenever possible.</td></tr> | |
37 | <tr><td><code>RUN_ME_AGAIN</code></td><td>Indicates that this metadata processor is still waiting for metadata from another metadata processor to become available.</td></tr> | |
38 | <tr><td><code>DEFAULTS</code></td><td>The returned metadata dictionary will only be used to provide default values. The actual metadata generated so far will be recursively merged into the returned dict. When using this flag, you must not return the original metadata dictionary but construct a new one as in the example below.</td></tr> | |
39 | <tr><td><code>OVERWRITE</code></td><td>The returned metadata dictionary will be recursively merged into the actual metadata generated so far (inverse of <code>DEFAULTS</code>). When using this flag, you must not return the original metadata dictionary but construct a new one as in the `DEFAULTS` example below.</td></tr> | |
40 | </table> | |
41 | ||
42 | Here is an example of how to use `DEFAULTS`: | |
43 | ||
44 | @metadata_processor | |
45 | def my_metadata_processor(metadata): | |
46 | return { | |
47 | "foo": { | |
48 | "bar": 47, | |
49 | }, | |
50 | }, DONE, DEFAULTS | |
51 | ||
52 | This means `node.metadata["foo"]["bar"]` will be 47 by default, but can also be overridden in static metadata at the node/group level. | |
53 | ||
54 | <br> | |
55 | ||
56 | <div class="alert alert-info">For your convenience, you can access <code>repo</code>, <code>node</code>, <code>metadata_processor</code> and all the options in <code>metadata.py</code> without importing them.</div> | |
49 | <div class="alert alert-info">For your convenience, you can access <code>repo</code>, <code>node</code>, <code>metadata_reactors</code>, and <code>DoNotRunAgain</code> in <code>metadata.py</code> without importing them.</div> |
16 | 16 | |
17 | 17 | setup( |
18 | 18 | name="bundlewrap", |
19 | version="3.9.0", | |
19 | version="3.10.0", | |
20 | 20 | description="Config management with Python", |
21 | 21 | long_description=( |
22 | 22 | "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" |
0 | from bundlewrap.utils.testing import make_repo, run | |
1 | ||
2 | ||
3 | def test_metadata(tmpdir): | |
4 | make_repo( | |
5 | tmpdir, | |
6 | nodes={ | |
7 | "node1": {'metadata': {"key": "value1"}}, | |
8 | "node2": {'metadata': {"key": "value2"}}, | |
9 | }, | |
10 | ) | |
11 | stdout, stderr, rcode = run("bw diff -m node1,node2", path=str(tmpdir)) | |
12 | assert b"value1" in stdout | |
13 | assert b"value2" in stdout | |
14 | assert stderr == b"" | |
15 | assert rcode == 0 | |
16 | ||
17 | ||
18 | def test_file_items(tmpdir): | |
19 | make_repo( | |
20 | tmpdir, | |
21 | nodes={ | |
22 | "node1": {'bundles': ["bundle1"]}, | |
23 | "node2": {'bundles': ["bundle2"]}, | |
24 | }, | |
25 | bundles={ | |
26 | "bundle1": { | |
27 | "files": { | |
28 | "/tmp/test": { | |
29 | 'content': "one", | |
30 | }, | |
31 | }, | |
32 | }, | |
33 | "bundle2": { | |
34 | "files": { | |
35 | "/tmp/test": { | |
36 | 'content': "two", | |
37 | }, | |
38 | }, | |
39 | }, | |
40 | }, | |
41 | ) | |
42 | stdout, stderr, rcode = run("bw diff -i file:/tmp/test node1,node2", path=str(tmpdir)) | |
43 | assert b"one" in stdout | |
44 | assert b"two" in stdout | |
45 | assert stderr == b"" | |
46 | assert rcode == 0 | |
47 | ||
48 | ||
49 | def test_whole_node(tmpdir): | |
50 | make_repo( | |
51 | tmpdir, | |
52 | nodes={ | |
53 | "node1": {'bundles': ["bundle1", "bundle3"]}, | |
54 | "node2": {'bundles': ["bundle2", "bundle3"]}, | |
55 | }, | |
56 | bundles={ | |
57 | "bundle1": { | |
58 | "files": { | |
59 | "/tmp/foo": { | |
60 | 'content': "one", | |
61 | }, | |
62 | }, | |
63 | }, | |
64 | "bundle2": { | |
65 | "files": { | |
66 | "/tmp/foo": { | |
67 | 'content': "two", | |
68 | }, | |
69 | }, | |
70 | }, | |
71 | "bundle3": { | |
72 | "files": { | |
73 | "/tmp/bar": { | |
74 | 'content': "common", | |
75 | }, | |
76 | }, | |
77 | }, | |
78 | }, | |
79 | ) | |
80 | stdout, stderr, rcode = run("bw diff node1,node2", path=str(tmpdir)) | |
81 | assert b"/tmp/foo" in stdout | |
82 | assert b"/tmp/bar" not in stdout | |
83 | assert stderr == b"" | |
84 | assert rcode == 0 |
142 | 142 | }, |
143 | 143 | } |
144 | 144 | """) |
145 | print(run("bw debug -c 'print(repo.vault.password_for(\"testing\"))'", path=str(tmpdir))) | |
146 | 145 | stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) |
147 | assert stdout1 == b"d0c998fd17a68322a03345954bb0a75301d3a127\n" | |
146 | assert stdout1 == b"b60c0959c9c1ff38940d7b6d4121b2162be34fc9\n" | |
148 | 147 | assert stderr == b"" |
149 | 148 | assert rcode == 0 |
150 | 149 | stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) |
192 | 191 | |
193 | 192 | stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) |
194 | 193 | assert rcode == 0 |
195 | assert stdout1 == b"bc403a093ca3399cd3efa7a64ec420e0afef5e70\n" | |
194 | assert stdout1 == b"d96dc8da8948d0da7924954a657ac960ce7194e9\n" | |
196 | 195 | |
197 | 196 | stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) |
198 | 197 | assert rcode == 0 |
213 | 212 | |
214 | 213 | stdout, stderr, rcode = run("bw hash -m", path=str(tmpdir)) |
215 | 214 | assert rcode == 0 |
216 | assert stdout == b"c0cc160ab1b6e71155cd4f65139bc7f66304d7f3\n" | |
215 | assert stdout == b"8c4a30eaa521c966c678d6e51070f6b3a34b7322\n" | |
217 | 216 | |
218 | 217 | |
219 | 218 | def test_metadata_repo_dict(tmpdir): |
230 | 229 | |
231 | 230 | stdout, stderr, rcode = run("bw hash -md", path=str(tmpdir)) |
232 | 231 | assert rcode == 0 |
233 | assert stdout == b"node1\t013b3a8199695eb45c603ea4e0a910148d80e7ed\n" | |
232 | assert stdout == b"node1\t223fb72805ecab20f92b463af65896303f997f1c\n" | |
234 | 233 | |
235 | 234 | |
236 | 235 | def test_groups_repo(tmpdir): |
474 | 474 | ) |
475 | 475 | stdout, stderr, rcode = run("bw metadata --table node1", path=str(tmpdir)) |
476 | 476 | assert rcode == 1 |
477 | ||
478 | ||
479 | def test_metadatapy_proc_merge_order(tmpdir): | |
480 | make_repo( | |
481 | tmpdir, | |
482 | bundles={"test": {}}, | |
483 | nodes={ | |
484 | "node1": { | |
485 | 'bundles': ["test"], | |
486 | 'metadata': { | |
487 | "one": "node", | |
488 | "two": "node", | |
489 | "five": "node", | |
490 | }, | |
491 | }, | |
492 | }, | |
493 | ) | |
494 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
495 | f.write( | |
496 | """defaults = { | |
497 | "two": "defaults", | |
498 | "three": "defaults", | |
499 | "four": "defaults", | |
500 | } | |
501 | ||
502 | @metadata_reactor | |
503 | def foo_reactor(metadata): | |
504 | return { | |
505 | "four": "reactor", | |
506 | "five": "reactor", | |
507 | } | |
508 | """) | |
509 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
510 | assert loads(stdout.decode()) == { | |
511 | "one": "node", | |
512 | "two": "node", | |
513 | "three": "defaults", | |
514 | "four": "reactor", | |
515 | "five": "reactor", | |
516 | } | |
517 | assert stderr == b"" | |
518 | assert rcode == 0 | |
519 | ||
520 | ||
521 | def test_metadatapy_do_not_run_me_again(tmpdir): | |
522 | make_repo( | |
523 | tmpdir, | |
524 | bundles={"test": {}}, | |
525 | nodes={ | |
526 | "node1": { | |
527 | 'bundles': ["test"], | |
528 | }, | |
529 | }, | |
530 | ) | |
531 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
532 | f.write( | |
533 | """called = False | |
534 | @metadata_reactor | |
535 | def foo_reactor(metadata): | |
536 | global called | |
537 | if not called: | |
538 | called = True | |
539 | raise DoNotRunAgain | |
540 | else: | |
541 | raise AssertionError | |
542 | @metadata_reactor | |
543 | def bar_reactor(metadata): | |
544 | return {'called': called} | |
545 | """) | |
546 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
547 | assert loads(stdout.decode()) == { | |
548 | "called": True, | |
549 | } | |
550 | assert stderr == b"" | |
551 | assert rcode == 0 | |
552 | ||
553 | ||
554 | def test_metadatapy_reactor_keyerror_from_metastack(tmpdir): | |
555 | make_repo( | |
556 | tmpdir, | |
557 | bundles={"test": {}}, | |
558 | nodes={ | |
559 | "node1": { | |
560 | 'bundles': ["test"], | |
561 | }, | |
562 | }, | |
563 | ) | |
564 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
565 | f.write( | |
566 | """ | |
567 | @metadata_reactor | |
568 | def foo_reactor(metadata): | |
569 | return {'foo': metadata.get('bar')} | |
570 | """) | |
571 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
572 | assert rcode == 1 | |
573 | assert b"node1" in stderr | |
574 | assert b"foo_reactor" in stderr | |
575 | assert b"'bar'" in stderr | |
576 | ||
577 | ||
578 | def test_metadatapy_reactor_keyerror_from_dict(tmpdir): | |
579 | make_repo( | |
580 | tmpdir, | |
581 | bundles={"test": {}}, | |
582 | nodes={ | |
583 | "node1": { | |
584 | 'bundles': ["test"], | |
585 | }, | |
586 | }, | |
587 | ) | |
588 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
589 | f.write( | |
590 | """ | |
591 | @metadata_reactor | |
592 | def foo_reactor(metadata): | |
593 | x = {}['baz'] | |
594 | return {'x': x} | |
595 | """) | |
596 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
597 | assert rcode == 1 | |
598 | assert b"node1" in stderr | |
599 | assert b"foo_reactor" in stderr | |
600 | assert b"'baz'" in stderr | |
601 | ||
602 | ||
603 | def test_metadatapy_reactor_keyerror_fixed(tmpdir): | |
604 | make_repo( | |
605 | tmpdir, | |
606 | bundles={"test": {}}, | |
607 | nodes={ | |
608 | "node1": { | |
609 | 'bundles': ["test"], | |
610 | }, | |
611 | }, | |
612 | ) | |
613 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
614 | f.write( | |
615 | """ | |
616 | @metadata_reactor | |
617 | def foo(metadata): | |
618 | bar_ran = metadata.get('bar_ran', False) | |
619 | if not bar_ran: | |
620 | return {'foo_ran': True} | |
621 | else: | |
622 | return {'foo': metadata.get('bar'), 'foo_ran': True} | |
623 | @metadata_reactor | |
624 | def bar(metadata): | |
625 | foo_ran = metadata.get('foo_ran', False) | |
626 | if not foo_ran: | |
627 | return {'bar_ran': False} | |
628 | else: | |
629 | return {'bar': 47, 'bar_ran': True} | |
630 | """) | |
631 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
632 | assert loads(stdout.decode()) == { | |
633 | "bar": 47, | |
634 | "bar_ran": True, | |
635 | "foo": 47, | |
636 | "foo_ran": True, | |
637 | } | |
638 | assert stderr == b"" | |
639 | assert rcode == 0 | |
640 | ||
641 | ||
642 | def test_metadatapy_infinite_loop(tmpdir): | |
643 | make_repo( | |
644 | tmpdir, | |
645 | bundles={"test": {}}, | |
646 | nodes={ | |
647 | "node1": { | |
648 | 'bundles': ["test"], | |
649 | }, | |
650 | }, | |
651 | ) | |
652 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
653 | f.write( | |
654 | """ | |
655 | @metadata_reactor | |
656 | def plusone(metadata): | |
657 | return {'foo': metadata.get('foo', 0) + 1 } | |
658 | ||
659 | @metadata_reactor | |
660 | def plustwo(metadata): | |
661 | return {'foo': metadata.get('foo', 0) + 2 } | |
662 | """) | |
663 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
664 | assert rcode == 1 |
47 | 47 | assert rcode == 0 |
48 | 48 | |
49 | 49 | |
50 | def test_bundles_via_group(tmpdir): | |
51 | make_repo( | |
52 | tmpdir, | |
53 | bundles={ | |
54 | "bundle1": {}, | |
55 | "bundle2": {}, | |
56 | "bundle3": {}, | |
57 | }, | |
58 | groups={ | |
59 | "group1": { | |
60 | 'bundles': {"bundle2"}, | |
61 | 'subgroups': {"group2"}, | |
62 | }, | |
63 | "group2": { | |
64 | 'bundles': {"bundle3"}, | |
65 | } | |
66 | }, | |
67 | nodes={ | |
68 | "node1": { | |
69 | 'bundles': {"bundle1"}, | |
70 | 'groups': {"group2"}, | |
71 | }, | |
72 | }, | |
73 | ) | |
74 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 bundles | cut -f 2", path=str(tmpdir)) | |
75 | assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2", "bundle3"] | |
76 | assert stderr == b"" | |
77 | assert rcode == 0 | |
78 | ||
79 | ||
50 | 80 | def test_template_node(tmpdir): |
51 | 81 | make_repo( |
52 | 82 | tmpdir, |
32 | 32 | │ 1 │ nodes │ |
33 | 33 | │ 0 │ groups │ |
34 | 34 | │ 1 │ bundles │ |
35 | │ 0 │ metadata defaults │ | |
35 | 36 | │ 0 │ metadata processors │ |
37 | │ 0 │ metadata reactors │ | |
36 | 38 | │ 2 │ items │ |
37 | 39 | ├───────┼─────────────────────┤ |
38 | 40 | │ 2 │ file │ |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | from bundlewrap.utils import Fault | |
3 | 4 | from bundlewrap.utils.dicts import merge_dict |
4 | from bundlewrap.metadata import atomic, blame_changed_paths | |
5 | from bundlewrap.metadata import atomic, blame_changed_paths, changes_metadata | |
5 | 6 | |
6 | 7 | |
7 | 8 | def test_atomic_no_merge_base(): |
96 | 97 | }, |
97 | 98 | 'key4': 24, |
98 | 99 | } |
100 | ||
101 | ||
102 | def test_changes_same(): | |
103 | assert not changes_metadata( | |
104 | { | |
105 | 'foo': 1, | |
106 | 'bar': 2, | |
107 | 'baz': [3], | |
108 | }, | |
109 | { | |
110 | 'baz': [3], | |
111 | }, | |
112 | ) | |
113 | ||
114 | ||
115 | def test_changes_list(): | |
116 | assert changes_metadata( | |
117 | { | |
118 | 'foo': 1, | |
119 | 'bar': 2, | |
120 | 'baz': [3], | |
121 | }, | |
122 | { | |
123 | 'baz': [4], | |
124 | }, | |
125 | ) | |
126 | ||
127 | ||
128 | def test_changes_nested_same(): | |
129 | assert not changes_metadata( | |
130 | { | |
131 | 'foo': 1, | |
132 | 'bar': 2, | |
133 | 'baz': { | |
134 | 'frob': 4, | |
135 | }, | |
136 | }, | |
137 | { | |
138 | 'baz': { | |
139 | 'frob': 4, | |
140 | }, | |
141 | }, | |
142 | ) | |
143 | ||
144 | ||
145 | def test_changes_nested(): | |
146 | assert changes_metadata( | |
147 | { | |
148 | 'foo': 1, | |
149 | 'bar': 2, | |
150 | 'baz': { | |
151 | 'frob': 4, | |
152 | }, | |
153 | }, | |
154 | { | |
155 | 'baz': { | |
156 | 'frob': 5, | |
157 | }, | |
158 | }, | |
159 | ) | |
160 | ||
161 | ||
162 | def test_changes_fault(): | |
163 | def callback1(): | |
164 | return 1 | |
165 | ||
166 | def callback2(): | |
167 | return 2 | |
168 | ||
169 | assert not changes_metadata( | |
170 | { | |
171 | 'foo': Fault(callback1), | |
172 | }, | |
173 | { | |
174 | 'foo': Fault(callback2), | |
175 | }, | |
176 | ) |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | ||
4 | from bundlewrap.metadata import atomic | |
5 | from bundlewrap.utils.metastack import Metastack | |
6 | from pytest import raises | |
7 | ||
8 | ||
9 | def test_has_no_top(): | |
10 | stack = Metastack() | |
11 | with raises(KeyError): | |
12 | stack.get('something') | |
13 | ||
14 | ||
15 | def test_has_no_subpath(): | |
16 | stack = Metastack() | |
17 | stack._set_layer('base', {'something': {'in': {}}}) | |
18 | with raises(KeyError): | |
19 | stack.get('something/in/a/path') | |
20 | ||
21 | ||
22 | def test_get_top(): | |
23 | stack = Metastack() | |
24 | stack._set_layer('base', {'something': 123}) | |
25 | assert stack.get('something') == 123 | |
26 | ||
27 | ||
28 | def test_get_subpath(): | |
29 | stack = Metastack() | |
30 | stack._set_layer('base', {'something': {'in': {'a': 'subpath'}}}) | |
31 | assert stack.get('something/in/a', None) == 'subpath' | |
32 | ||
33 | ||
34 | def test_get_default_with_empty(): | |
35 | stack = Metastack() | |
36 | assert stack.get('something', 123) == 123 | |
37 | ||
38 | ||
39 | def test_get_default_with_base(): | |
40 | stack = Metastack() | |
41 | stack._set_layer('', {'foo': 'bar'}) | |
42 | assert stack.get('something', 123) == 123 | |
43 | ||
44 | ||
45 | def test_get_default_with_overlay(): | |
46 | stack = Metastack() | |
47 | stack._set_layer('base', {'foo': 'bar'}) | |
48 | stack._set_layer('overlay', {'baz': 'boing'}) | |
49 | assert stack.get('something', 123) == 123 | |
50 | ||
51 | ||
52 | def test_overlay_value(): | |
53 | stack = Metastack() | |
54 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
55 | stack._set_layer('overlay', {'something': {'a_value': 10}}) | |
56 | assert stack.get('something/a_value', None) == 10 | |
57 | ||
58 | ||
59 | def test_merge_lists(): | |
60 | stack = Metastack() | |
61 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
62 | stack._set_layer('overlay', {'something': {'a_list': [3]}}) | |
63 | assert sorted(stack.get('something/a_list', None)) == sorted([1, 2, 3]) | |
64 | ||
65 | ||
66 | def test_merge_sets(): | |
67 | stack = Metastack() | |
68 | stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) | |
69 | stack._set_layer('overlay', {'something': {'a_set': {3}}}) | |
70 | assert stack.get('something/a_set', None) == {1, 2, 3} | |
71 | ||
72 | ||
73 | def test_overlay_value_multi_layers(): | |
74 | stack = Metastack() | |
75 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
76 | stack._set_layer('overlay', {'something': {'a_value': 10}}) | |
77 | stack._set_layer('unrelated', {'something': {'another_value': 10}}) | |
78 | assert stack.get('something/a_value', None) == 10 | |
79 | ||
80 | ||
81 | def test_merge_lists_multi_layers(): | |
82 | stack = Metastack() | |
83 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
84 | stack._set_layer('overlay', {'something': {'a_list': [3]}}) | |
85 | stack._set_layer('unrelated', {'something': {'another_value': 10}}) | |
86 | ||
87 | # Objects in Metastacks are frozen. This converts lists to tuples. | |
88 | # Unlike set and frozenset, list and tuple doesn't naturally support | |
89 | # "is equal". | |
90 | # | |
91 | # This is acceptable, because in metaprocs people are expected to | |
92 | # maybe check if something is in a list and maybe access some item | |
93 | # of a list. All that works. Operations like .append() do not work | |
94 | # and they are not supposed to. | |
95 | assert len(stack.get('something/a_list', None)) == 3 | |
96 | assert stack.get('something/a_list', None)[0] == 1 | |
97 | assert stack.get('something/a_list', None)[1] == 2 | |
98 | assert stack.get('something/a_list', None)[2] == 3 | |
99 | ||
100 | ||
101 | def test_merge_sets_multi_layers(): | |
102 | stack = Metastack() | |
103 | stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) | |
104 | stack._set_layer('overlay', {'something': {'a_set': {3}}}) | |
105 | stack._set_layer('unrelated', {'something': {'another_value': 10}}) | |
106 | assert stack.get('something/a_set', None) == {1, 2, 3} | |
107 | ||
108 | ||
109 | def test_merge_lists_with_empty_layer(): | |
110 | stack = Metastack() | |
111 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
112 | stack._set_layer('overlay1', {'something': {'a_list': []}}) | |
113 | stack._set_layer('overlay2', {'something': {'a_list': [3]}}) | |
114 | assert sorted(stack.get('something/a_list', None)) == sorted([1, 2, 3]) | |
115 | ||
116 | ||
117 | def test_merge_sets_with_empty_layer(): | |
118 | stack = Metastack() | |
119 | stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) | |
120 | stack._set_layer('overlay1', {'something': {'a_set': set()}}) | |
121 | stack._set_layer('overlay2', {'something': {'a_set': {3}}}) | |
122 | assert stack.get('something/a_set', None) == {1, 2, 3} | |
123 | ||
124 | ||
125 | def test_merge_lists_with_multiple_used_layers(): | |
126 | stack = Metastack() | |
127 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
128 | stack._set_layer('overlay1', {'something': {'a_list': [3]}}) | |
129 | stack._set_layer('overlay2', {'something': {'a_list': [4]}}) | |
130 | stack._set_layer('overlay3', {'something': {'a_list': [6, 5]}}) | |
131 | assert sorted(stack.get('something/a_list', None)) == sorted([1, 2, 3, 4, 5, 6]) | |
132 | ||
133 | ||
134 | def test_merge_sets_with_multiple_used_layers(): | |
135 | stack = Metastack() | |
136 | stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}}) | |
137 | stack._set_layer('overlay1', {'something': {'a_set': {3}}}) | |
138 | stack._set_layer('overlay2', {'something': {'a_set': {4}}}) | |
139 | stack._set_layer('overlay3', {'something': {'a_set': {6, 5}}}) | |
140 | assert stack.get('something/a_set', None) == {1, 2, 3, 4, 5, 6} | |
141 | ||
142 | ||
143 | def test_merge_dicts(): | |
144 | stack = Metastack() | |
145 | stack._set_layer('overlay1', {'something': {'a_value': 3}}) | |
146 | stack._set_layer('overlay2', {'something': {'another_value': 5}}) | |
147 | stack._set_layer('overlay3', {'something': {'this': {'and': 'that'}}}) | |
148 | stack._set_layer('overlay4', {'something': {'a_set': {1, 2}}}) | |
149 | stack._set_layer('overlay5', {'something': {'a_set': {3, 4}}}) | |
150 | assert stack.get('something', None) == { | |
151 | 'a_set': {1, 2, 3, 4}, | |
152 | 'a_value': 3, | |
153 | 'another_value': 5, | |
154 | 'this': { | |
155 | 'and': 'that', | |
156 | }, | |
157 | } | |
158 | ||
159 | ||
160 | def test_requesting_empty_path(): | |
161 | stack = Metastack() | |
162 | stack._set_layer('base', {'foo': {'bar': 'baz'}}) | |
163 | assert stack.get('', 'default') == 'default' | |
164 | ||
165 | ||
166 | def test_update_layer_for_new_value(): | |
167 | stack = Metastack() | |
168 | stack._set_layer('base', {'foo': 'bar'}) | |
169 | ||
170 | stack._set_layer('overlay', {'something': 123}) | |
171 | assert stack.get('foo', None) == 'bar' | |
172 | assert stack.get('boing', 'default') == 'default' | |
173 | assert stack.get('something', None) == 123 | |
174 | ||
175 | stack._set_layer('overlay', {'something': 456}) | |
176 | assert stack.get('foo', None) == 'bar' | |
177 | assert stack.get('boing', 'default') == 'default' | |
178 | assert stack.get('something', None) == 456 | |
179 | ||
180 | ||
181 | def test_should_be_frozen(): | |
182 | stack = Metastack() | |
183 | stack._set_layer('base', {'foo': {'bar': {1, 2, 3}}}) | |
184 | foo = stack.get('foo', None) | |
185 | ||
186 | with raises(AttributeError): | |
187 | foo['bar'].add(4) | |
188 | ||
189 | with raises(TypeError): | |
190 | del foo['bar'] | |
191 | ||
192 | ||
193 | def test_atomic_in_base(): | |
194 | stack = Metastack() | |
195 | stack._set_layer('base', {'list': atomic([1, 2, 3])}) | |
196 | stack._set_layer('overlay', {'list': [4]}) | |
197 | assert list(stack.get('list', None)) == [4] | |
198 | ||
199 | ||
200 | def test_atomic_in_layer(): | |
201 | stack = Metastack() | |
202 | stack._set_layer('base', {'list': [1, 2, 3]}) | |
203 | stack._set_layer('overlay', {'list': atomic([4])}) | |
204 | assert list(stack.get('list', None)) == [4] | |
205 | ||
206 | ||
207 | def test_set_layer_return_code(): | |
208 | stack = Metastack() | |
209 | ret = stack._set_layer('overlay', {'foo': 'bar'}) | |
210 | assert ret is True | |
211 | ret = stack._set_layer('overlay', {'foo': 'bar'}) | |
212 | assert ret is False | |
213 | ret = stack._set_layer('overlay', {'foo': 'baz'}) | |
214 | assert ret is True | |
215 | ret = stack._set_layer('overlay', {'foo': 'baz', 'bar': 1}) | |
216 | assert ret is True | |
217 | ||
218 | ||
219 | def test_as_dict(): | |
220 | stack = Metastack() | |
221 | stack._set_layer('base', { | |
222 | 'bool': True, | |
223 | 'bytes': b'howdy', | |
224 | 'dict': {'1': 2}, | |
225 | 'int': 1, | |
226 | 'list': [1], | |
227 | 'none': None, | |
228 | 'set': {1}, | |
229 | 'str': 'howdy', | |
230 | 'tuple': (1, 2), | |
231 | }) | |
232 | stack._set_layer('overlay1', {'int': 1000}) | |
233 | stack._set_layer('overlay2', {'list': [2]}) | |
234 | stack._set_layer('overlay3', {'new_element': True}) | |
235 | assert stack._as_dict() == { | |
236 | 'bool': True, | |
237 | 'bytes': b'howdy', | |
238 | 'dict': {'1': 2}, | |
239 | 'int': 1000, | |
240 | 'list': [1, 2], | |
241 | 'new_element': True, | |
242 | 'none': None, | |
243 | 'set': {1}, | |
244 | 'str': 'howdy', | |
245 | 'tuple': (1, 2), | |
246 | } | |
247 | ||
248 | ||
249 | def test_as_blame(): | |
250 | stack = Metastack() | |
251 | stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}}) | |
252 | stack._set_layer('overlay', {'something': {'a_list': [3]}}) | |
253 | stack._set_layer('unrelated', {'something': {'another_value': 10}}) | |
254 | assert stack._as_blame() == { | |
255 | ('something',): ['base', 'overlay', 'unrelated'], | |
256 | ('something', 'a_list'): ['base', 'overlay'], | |
257 | ('something', 'a_value'): ['base'], | |
258 | ('something', 'another_value'): ['unrelated'], | |
259 | } |
0 | 0 | from bundlewrap.metadata import atomic |
1 | from bundlewrap.utils.dicts import map_dict_keys, reduce_dict | |
1 | from bundlewrap.utils.dicts import freeze_object, map_dict_keys, reduce_dict | |
2 | from pytest import raises | |
3 | ||
4 | from sys import version_info | |
2 | 5 | |
3 | 6 | |
4 | 7 | def test_dictmap(): |
21 | 24 | ("key2", "key5", "key6"), |
22 | 25 | ("key2", "key7"), |
23 | 26 | ]) |
27 | ||
28 | ||
29 | def test_freeze_object(): | |
30 | orig = { | |
31 | 'bool': True, | |
32 | 'int': 3, | |
33 | 'none': None, | |
34 | 'simple_list': [1, 2], | |
35 | 'simple_set': {3, 4}, | |
36 | 'recursive_dict': { | |
37 | 'something': { | |
38 | 'else': 3, | |
39 | }, | |
40 | 'str': 'str', | |
41 | }, | |
42 | 'list_of_dicts': [ | |
43 | { | |
44 | 'name': 'yaml', | |
45 | 'attribute': 123, | |
46 | 'see': 'how lists of dicts are a bad idea anyway', | |
47 | }, | |
48 | { | |
49 | 'name': 'yaml', | |
50 | 'attribute': 42, | |
51 | 'everything': ['got', 'the', 'same', 'name'], | |
52 | }, | |
53 | ], | |
54 | } | |
55 | ||
56 | frozen = freeze_object(orig) | |
57 | ||
58 | assert frozen['bool'] == True | |
59 | assert frozen['int'] == 3 | |
60 | assert frozen['none'] == None | |
61 | assert frozen['simple_list'][0] == 1 | |
62 | assert frozen['simple_list'][1] == 2 | |
63 | assert len(frozen['simple_list']) == 2 | |
64 | assert 4 in frozen['simple_set'] | |
65 | assert len(frozen['simple_set']) == 2 | |
66 | assert frozen['list_of_dicts'][0]['attribute'] == 123 | |
67 | assert frozen['recursive_dict']['something']['else'] == 3 | |
68 | ||
69 | # XXX Remove this if in bw 4.0 and always do the check | |
70 | if version_info[0] >= 3: | |
71 | with raises(TypeError): | |
72 | frozen['bool'] = False | |
73 | ||
74 | with raises(TypeError): | |
75 | frozen['int'] = 10 | |
76 | ||
77 | with raises(TypeError): | |
78 | frozen['none'] = None | |
79 | ||
80 | with raises(TypeError): | |
81 | frozen['list_of_dicts'][0]['attribute'] = 456 | |
82 | ||
83 | with raises(TypeError): | |
84 | frozen['recursive_dict']['something']['else'] = 4 | |
85 | ||
86 | with raises(TypeError): | |
87 | del frozen['int'] | |
88 | ||
89 | with raises(AttributeError): | |
90 | frozen['simple_list'].append(5) | |
91 | ||
92 | with raises(AttributeError): | |
93 | frozen['simple_set'].add(5) | |
24 | 94 | |
25 | 95 | |
26 | 96 | def test_reduce_dict_two_lists(): |