Codebase list bundlewrap / 158653c
New upstream release Jonathan Carter 3 years ago
27 changed file(s) with 833 addition(s) and 502 deletion(s). Raw diff Collapse all Expand all
0 # 4.1.0
1
2 2020-07-27
3
4 * added `bw test --quiet`
5 * `apply_start` hook can now raise GracefulApplyException
6 * performance improvements in metadata generation
7 * improved reporting of persistent metadata KeyErrors
8 * clashing metadata keys are now allowed for equal values
9 * git_deploy: fixed attempted shallow clones over HTTP
10 * k8s: improved handling of absent `apiVersion`
11 * fixed `cascade_skip` not affecting recursively skipped items
12 * fixed `bw metadata -b -k`
13 * fixed metadata reactors seeing their own previous results
14 * fixed SCM information being returned as bytes
15
16
017 # 4.0.0
118
219 2020-06-22
0 VERSION = (4, 0, 0)
0 VERSION = (4, 1, 0)
11 VERSION_STRING = ".".join([str(v) for v in VERSION])
2929
3030 io.progress_set_total(count_items(pending_nodes))
3131
32 repo.hooks.apply_start(
33 repo,
34 args['targets'],
35 target_nodes,
36 interactive=args['interactive'],
37 )
32 try:
33 repo.hooks.apply_start(
34 repo,
35 args['targets'],
36 target_nodes,
37 interactive=args['interactive'],
38 )
39 except GracefulApplyException as exc:
40 io.stderr(_("{x} apply aborted by hook ({reason})").format(
41 reason=str(exc) or _("no reason given"),
42 x=red("!!!"),
43 ))
44 exit(1)
3845
3946 start_time = datetime.now()
4047 results = []
137137 table = [[bold(_("path")), bold(_("source"))], ROW_SEPARATOR]
138138 for path, blamed in sorted(node.metadata_blame.items()):
139139 joined_path = "/".join(path)
140 for key_path in key_paths:
141 if joined_path.startswith(key_path):
142 table.append([joined_path, ", ".join(blamed)])
143 break
140 if key_paths:
141 for key_path in key_paths:
142 if _list_starts_with(path, key_path):
143 table.append([joined_path, ", ".join(blamed)])
144 break
145 else:
146 table.append([joined_path, ", ".join(blamed)])
144147 page_lines(render_table(table))
145148 else:
146149 metadata = deepcopy_metadata(node.metadata)
530530 )
531531
532532 # bw metadata
533 help_metadata = ("View a JSON representation of a node's metadata (defaults blue, reactors green, groups yellow, node red) or a table of selected metadata keys from multiple nodes")
533 help_metadata = ("View a JSON representation of a node's metadata (defaults blue, reactors green, groups yellow, node red, uncolored if mixed-source) or a table of selected metadata keys from multiple nodes")
534534 parser_metadata = subparsers.add_parser(
535535 "metadata",
536536 description=help_metadata,
905905 help=_("check for bundles not assigned to any node"),
906906 )
907907 parser_test.add_argument(
908 "-q",
909 "--quiet",
910 action='store_true',
911 dest='quiet',
912 help=_("don't show successful tests"),
913 )
914 parser_test.add_argument(
908915 "-S",
909916 "--subgroup-loops",
910917 action='store_true',
1111 from ..utils.ui import io, QUIT_EVENT
1212
1313
14 def test_items(nodes, ignore_missing_faults):
14 def test_items(nodes, ignore_missing_faults, quiet):
1515 io.progress_set_total(count_items(nodes))
1616 for node in nodes:
1717 if QUIT_EVENT.is_set():
5959 if item.id.count(":") < 2:
6060 # don't count canned actions
6161 io.progress_advance()
62 io.stdout("{x} {node} {bundle} {item}".format(
63 bundle=bold(item.bundle.name),
64 item=item.id,
65 node=bold(node.name),
66 x=green("✓"),
67 ))
62 if not quiet:
63 io.stdout("{x} {node} {bundle} {item}".format(
64 bundle=bold(item.bundle.name),
65 item=item.id,
66 node=bold(node.name),
67 x=green("✓"),
68 ))
6869 if item_queue.items_with_deps and not QUIT_EVENT.is_set():
6970 exception = ItemDependencyLoop(item_queue.items_with_deps)
7071 for line in explain_item_dependency_loop(exception, node.name):
7374 io.progress_set_total(0)
7475
7576
76 def test_subgroup_loops(repo):
77 def test_subgroup_loops(repo, quiet):
7778 checked_groups = []
7879 for group in repo.groups:
7980 if QUIT_EVENT.is_set():
8283 continue
8384 with io.job(_("{group} checking for subgroup loops").format(group=bold(group.name))):
8485 checked_groups.extend(group.subgroups) # the subgroups property has the check built in
85 io.stdout(_("{x} {group} has no subgroup loops").format(
86 x=green("✓"),
87 group=bold(group.name),
88 ))
89
90
91 def test_metadata_conflicts(node):
86 if not quiet:
87 io.stdout(_("{x} {group} has no subgroup loops").format(
88 x=green("✓"),
89 group=bold(group.name),
90 ))
91
92
93 def test_metadata_conflicts(node, quiet):
9294 with io.job(_("{node} checking for metadata conflicts").format(node=bold(node.name))):
9395 check_for_metadata_conflicts(node)
94 io.stdout(_("{x} {node} has no metadata conflicts").format(
95 x=green("✓"),
96 node=bold(node.name),
97 ))
96 if not quiet:
97 io.stdout(_("{x} {node} has no metadata conflicts").format(
98 x=green("✓"),
99 node=bold(node.name),
100 ))
98101
99102
100103 def test_orphaned_bundles(repo):
131134 exit(1)
132135
133136
134 def test_determinism_config(repo, nodes, iterations):
137 def test_determinism_config(repo, nodes, iterations, quiet):
135138 """
136139 Generate configuration a couple of times for every node and see if
137140 anything changes between iterations
166169 exit(1)
167170 io.progress_advance()
168171 io.progress_set_total(0)
169 io.stdout(_("{x} Configuration remained the same after being generated {n} times").format(
170 n=iterations,
171 x=green("✓"),
172 ))
173
174
175 def test_determinism_metadata(repo, nodes, iterations):
172 if not quiet:
173 io.stdout(_("{x} Configuration remained the same after being generated {n} times").format(
174 n=iterations,
175 x=green("✓"),
176 ))
177
178
179 def test_determinism_metadata(repo, nodes, iterations, quiet):
176180 """
177181 Generate metadata a couple of times for every node and see if
178182 anything changes between iterations
207211 exit(1)
208212 io.progress_advance()
209213 io.progress_set_total(0)
210 io.stdout(_("{x} Metadata remained the same after being generated {n} times").format(
211 n=iterations,
212 x=green("✓"),
213 ))
214 if not quiet:
215 io.stdout(_("{x} Metadata remained the same after being generated {n} times").format(
216 n=iterations,
217 x=green("✓"),
218 ))
214219
215220
216221 def bw_test(repo, args):
243248 args['subgroup_loops'] = True
244249
245250 if args['subgroup_loops'] and not QUIT_EVENT.is_set():
246 test_subgroup_loops(repo)
251 test_subgroup_loops(repo, args['quiet'])
247252
248253 if args['empty_groups'] and not QUIT_EVENT.is_set():
249254 test_empty_groups(repo)
256261 for node in nodes:
257262 if QUIT_EVENT.is_set():
258263 break
259 test_metadata_conflicts(node)
264 test_metadata_conflicts(node, args['quiet'])
260265 io.progress_advance()
261266 io.progress_set_total(0)
262267
263268 if args['items']:
264 test_items(nodes, args['ignore_missing_faults'])
269 test_items(nodes, args['ignore_missing_faults'], args['quiet'])
265270
266271 if args['determinism_metadata'] > 1 and not QUIT_EVENT.is_set():
267 test_determinism_metadata(repo, nodes, args['determinism_metadata'])
272 test_determinism_metadata(repo, nodes, args['determinism_metadata'], args['quiet'])
268273
269274 if args['determinism_config'] > 1 and not QUIT_EVENT.is_set():
270 test_determinism_config(repo, nodes, args['determinism_config'])
275 test_determinism_config(repo, nodes, args['determinism_config'], args['quiet'])
271276
272277 if args['hooks_node'] and not QUIT_EVENT.is_set():
273278 io.progress_set_total(len(nodes))
66
77 class DummyItem:
88 bundle = None
9 cascade_skip = True
910 triggered = False
1011
1112 def __init__(self, *args, **kwargs):
667668
668669 all_recursively_removed_items = []
669670 for removed_item in removed_items:
670 items, recursively_removed_items = \
671 remove_item_dependents(items, removed_item, skipped=skipped)
672 all_recursively_removed_items += recursively_removed_items
671 if removed_item.cascade_skip:
672 items, recursively_removed_items = \
673 remove_item_dependents(items, removed_item, skipped=skipped)
674 all_recursively_removed_items += recursively_removed_items
675 else:
676 items = remove_dep_from_items(items, removed_item.id)
673677
674678 return (items, removed_items + all_recursively_removed_items)
675679
106106 pass
107107
108108
109 class MetadataPersistentKeyError(RepositoryError):
110 """
111 Raised when metadata reactors keep raising KeyErrors indefinitely.
112 """
113 pass
114
115
109116 class MissingRepoDependency(RepositoryError):
110117 """
111118 Raised when a dependency from requirements.txt is missing.
3535 Returns the path to the directory.
3636 """
3737 tmpdir = mkdtemp()
38 if is_ref(rev):
38 if is_ref(rev) and not remote_url.startswith('http'):
3939 git_cmdline = ["clone", "--bare", "--depth", "1", "--no-single-branch", remote_url, "."]
4040 else:
4141 git_cmdline = ["clone", "--bare", remote_url, "."]
132132 user_manifest,
133133 )
134134
135 if merged_manifest['apiVersion'] is None:
135 if merged_manifest.get('apiVersion') is None:
136136 raise BundleError(_(
137137 "{item} from bundle '{bundle}' needs an apiVersion in its manifest"
138138 ).format(item=self.id, bundle=self.bundle.name))
103103 TYPE_SET = 2
104104 TYPE_OTHER = 3
105105
106 def paths_with_types(d):
106 def paths_with_values_and_types(d):
107107 for path in map_dict_keys(d):
108108 value = value_at_key_path(d, path)
109109 if isinstance(value, dict):
110 yield path, TYPE_DICT
110 yield path, value, TYPE_DICT
111111 elif isinstance(value, set):
112 yield path, TYPE_SET
112 yield path, value, TYPE_SET
113113 else:
114 yield path, TYPE_OTHER
114 yield path, value, TYPE_OTHER
115115
116116 for prefix in ("metadata_defaults:", "metadata_reactor:"):
117117 paths = {}
118 for identifier, layer in node._metadata_stack._layers.items():
119 if identifier.startswith(prefix):
120 for path, current_type in paths_with_types(layer):
121 try:
122 prev_type, prev_identifier = paths[path]
123 except KeyError:
124 paths[path] = current_type, identifier
125 else:
126 if (
127 prev_type == TYPE_DICT
128 and current_type == TYPE_DICT
129 ):
130 pass
131 elif (
132 prev_type == TYPE_SET
133 and current_type == TYPE_SET
134 ):
135 pass
118 for partition in node._metadata_stack._partitions:
119 for identifier, layer in partition.items():
120 if identifier.startswith(prefix):
121 for path, value, current_type in paths_with_values_and_types(layer):
122 try:
123 prev_type, prev_identifier, prev_value = paths[path]
124 except KeyError:
125 paths[path] = current_type, identifier, value
136126 else:
137 raise ValueError(_(
138 "{a} and {b} are clashing over this key path: {path}"
139 ).format(
140 a=identifier,
141 b=prev_identifier,
142 path="/".join(path),
143 ))
127 if (
128 prev_type == TYPE_DICT
129 and current_type == TYPE_DICT
130 ):
131 pass
132 elif (
133 prev_type == TYPE_SET
134 and current_type == TYPE_SET
135 ):
136 pass
137 elif value != prev_value:
138 raise ValueError(_(
139 "{node}: {a} and {b} are clashing over this key path: {path} "
140 "(\"{val_a}\" vs. \"{val_b}\")"
141 ).format(
142 a=identifier,
143 b=prev_identifier,
144 node=node.name,
145 path="/".join(path),
146 val_a=value,
147 val_b=prev_value,
148 ))
144149
145150
146151 def check_for_metadata_conflicts_between_groups(node):
0 from collections import Counter
1 from os import environ
2 from traceback import TracebackException
3
4 from .exceptions import MetadataPersistentKeyError
5 from .metadata import DoNotRunAgain
6 from .node import _flatten_group_hierarchy
7 from .utils import randomize_order
8 from .utils.ui import io, QUIT_EVENT
9 from .utils.metastack import Metastack
10 from .utils.text import bold, mark_for_translation as _, red
11
12
13 MAX_METADATA_ITERATIONS = int(environ.get("BW_MAX_METADATA_ITERATIONS", "5000"))
14
15
16 class MetadataGenerator:
17 # are we currently executing a reactor?
18 __in_a_reactor = False
19
20 def __reset(self):
21 # reactors that raise DoNotRunAgain
22 self.__do_not_run_again = set()
23 # reactors that raised KeyErrors (and which ones)
24 self.__keyerrors = {}
25 # a Metastack for every node
26 self.__metastacks = {}
27 # mapping each node to all nodes that depend on it
28 self.__node_deps = {}
29 # A node is 'stable' when all its reactors return unchanged
30 # metadata, except for those reactors that look at other nodes.
31 # This dict maps node names to True/False indicating stable status.
32 self.__node_stable = {}
33 # nodes we encountered as a dependency through partial_metadata,
34 # but haven't run yet
35 self.__nodes_that_never_ran = set()
36 # nodes whose dependencies changed and that have to rerun their
37 # reactors depending on those nodes
38 self.__triggered_nodes = set()
39 # nodes we already did initial processing on
40 self.__nodes_that_ran_at_least_once = set()
41 # how often we called reactors
42 self.__reactors_run = 0
43 # how often each reactor changed
44 self.__reactor_changes = {}
45 # tracks which reactors on a node have look at other nodes
46 # through partial_metadata
47 self.__reactors_with_deps = {}
48
49 def _metadata_for_node(self, node_name, blame=False, stack=False):
50 """
51 Returns full or partial metadata for this node. This is the
52 primary entrypoint accessed from node.metadata.
53
54 Partial metadata may only be requested from inside a metadata
55 reactor.
56
57 If necessary, this method will build complete metadata for this
58 node and all related nodes. Related meaning nodes that this node
59 depends on in one of its metadata reactors.
60 """
61 if self.__in_a_reactor:
62 if node_name in self._node_metadata_complete:
63 # We already completed metadata for this node, but partial must
64 # return a Metastack, so we build a single-layered one just for
65 # the interface.
66 metastack = Metastack()
67 metastack._set_layer(
68 0,
69 "flattened",
70 self._node_metadata_complete[node_name],
71 )
72 return metastack
73 else:
74 self.__partial_metadata_accessed_for.add(node_name)
75 return self.__metastacks.setdefault(node_name, Metastack())
76
77 if blame or stack:
78 # cannot return cached result here, force rebuild
79 try:
80 del self._node_metadata_complete[node_name]
81 except KeyError:
82 pass
83
84 try:
85 return self._node_metadata_complete[node_name]
86 except KeyError:
87 pass
88
89 # Different worker threads might request metadata at the same time.
90
91 with self._node_metadata_lock:
92 try:
93 # maybe our metadata got completed while waiting for the lock
94 return self._node_metadata_complete[node_name]
95 except KeyError:
96 pass
97
98 self.__build_node_metadata(node_name)
99
100 # now that we have completed all metadata for this
101 # node and all related nodes, copy that data over
102 # to the complete dict
103 for some_node_name in self.__nodes_that_ran_at_least_once:
104 self._node_metadata_complete[some_node_name] = \
105 self.__metastacks[some_node_name]._as_dict()
106
107 if blame:
108 blame_result = self.__metastacks[node_name]._as_blame()
109 elif stack:
110 stack_result = self.__metastacks[node_name]
111
112 # reset temporary vars (this isn't strictly necessary, but might
113 # free up some memory and avoid confusion)
114 self.__reset()
115
116 if blame:
117 return blame_result
118 elif stack:
119 return stack_result
120 else:
121 return self._node_metadata_complete[node_name]
122
123 def __build_node_metadata(self, initial_node_name):
124 self.__reset()
125 self.__nodes_that_never_ran.add(initial_node_name)
126
127 iterations = 0
128 while not QUIT_EVENT.is_set():
129 iterations += 1
130 if iterations > MAX_METADATA_ITERATIONS:
131 top_changers = Counter(self.__reactor_changes).most_common(25)
132 msg = _(
133 "MAX_METADATA_ITERATIONS({m}) exceeded, "
134 "likely an infinite loop between flip-flopping metadata reactors.\n"
135 "These are the reactors that changed most often:\n\n"
136 ).format(m=MAX_METADATA_ITERATIONS)
137 for reactor, count in top_changers:
138 msg += f" {count}\t{reactor[0]}\t{reactor[1]}\n"
139 raise RuntimeError(msg)
140
141 io.debug(f"metadata iteration #{iterations}")
142
143 jobmsg = _("{b} ({i} iterations, {n} nodes, {r} reactors, {e} runs)").format(
144 b=bold(_("running metadata reactors")),
145 i=iterations,
146 n=len(self.__nodes_that_never_ran) + len(self.__nodes_that_ran_at_least_once),
147 r=len(self.__reactor_changes),
148 e=self.__reactors_run,
149 )
150 with io.job(jobmsg):
151 try:
152 node_name = self.__nodes_that_never_ran.pop()
153 except KeyError:
154 pass
155 else:
156 self.__nodes_that_ran_at_least_once.add(node_name)
157 self.__initial_run_for_node(node_name)
158 continue
159
160 # at this point, we have run all relevant nodes at least once
161
162 # if we have any triggered nodes from below, run their reactors
163 # with deps to see if they become unstable
164
165 try:
166 node_name = self.__triggered_nodes.pop()
167 except KeyError:
168 pass
169 else:
170 io.debug(f"triggered metadata run for {node_name}")
171 self.__run_reactors(
172 self.get_node(node_name),
173 with_deps=True,
174 without_deps=False,
175 )
176 continue
177
178 # now (re)stabilize all nodes
179
180 encountered_unstable_node = False
181 for node, stable in self.__node_stable.items():
182 if stable:
183 continue
184 self.__run_reactors(node, with_deps=False, without_deps=True)
185 if self.__node_stable[node]:
186 io.debug(f"metadata stabilized for {node_name}")
187 else:
188 io.debug(f"metadata remains unstable for {node_name}")
189 encountered_unstable_node = True
190 if encountered_unstable_node:
191 # start over until everything is stable
192 continue
193
194 # at this point, all nodes should be stable except for their reactors with deps
195
196 encountered_unstable_node = False
197 for node in randomize_order(self.__node_stable.keys()):
198 self.__run_reactors(node, with_deps=True, without_deps=False)
199 if not self.__node_stable[node]:
200 encountered_unstable_node = True
201 if encountered_unstable_node:
202 # start over until everything is stable
203 continue
204
205 # if we get here, we're done!
206 break
207
208 if self.__keyerrors and not QUIT_EVENT.is_set():
209 msg = _(
210 "These metadata reactors raised a KeyError "
211 "even after all other reactors were done:"
212 )
213 for source, exc in sorted(self.__keyerrors.items()):
214 node_name, reactor = source
215 msg += f"\n\n {node_name} {reactor}\n\n"
216 for line in TracebackException.from_exception(exc).format():
217 msg += " " + line
218 raise MetadataPersistentKeyError(msg)
219
220 def __initial_run_for_node(self, node_name):
221 io.debug(f"initial metadata run for {node_name}")
222 node = self.get_node(node_name)
223 self.__metastacks[node_name] = Metastack()
224
225 # randomize order to increase chance of exposing clashing defaults
226 for defaults_name, defaults in randomize_order(node.metadata_defaults):
227 self.__metastacks[node_name]._set_layer(
228 2,
229 defaults_name,
230 defaults,
231 )
232 self.__metastacks[node_name]._cache_partition(2)
233
234 group_order = _flatten_group_hierarchy(node.groups)
235 for group_name in group_order:
236 self.__metastacks[node_name]._set_layer(
237 0,
238 "group:{}".format(group_name),
239 self.get_group(group_name)._attributes.get('metadata', {}),
240 )
241
242 self.__metastacks[node_name]._set_layer(
243 0,
244 "node:{}".format(node_name),
245 node._attributes.get('metadata', {}),
246 )
247 self.__metastacks[node_name]._cache_partition(0)
248
249 self.__reactors_with_deps[node_name] = set()
250 # run all reactors once to get started
251 self.__run_reactors(node, with_deps=True, without_deps=True)
252
253 def __run_reactors(self, node, with_deps=True, without_deps=True):
254 any_reactor_changed = False
255
256 for depsonly in (True, False):
257 if depsonly and not with_deps:
258 # skip reactors with deps
259 continue
260 if not depsonly and not without_deps:
261 # skip reactors without deps
262 continue
263 # TODO ideally, we should run the least-run reactors first
264 for reactor_name, reactor in randomize_order(node.metadata_reactors):
265 if (
266 (depsonly and reactor_name not in self.__reactors_with_deps[node.name]) or
267 (not depsonly and reactor_name in self.__reactors_with_deps[node.name])
268 ):
269 # this if makes sure we run reactors with deps first
270 continue
271 reactor_changed, deps = self.__run_reactor(node.name, reactor_name, reactor)
272 io.debug(f"{node.name}:{reactor_name} changed={reactor_changed} deps={deps}")
273 if reactor_changed:
274 any_reactor_changed = True
275 if deps:
276 # record that this reactor has dependencies
277 self.__reactors_with_deps[node.name].add(reactor_name)
278 # we could also remove this marker if we end up without
279 # deps again in future iterations, but that is too
280 # unlikely and the housekeeping cost too great
281 for required_node_name in deps:
282 if required_node_name not in self.__nodes_that_ran_at_least_once:
283 # we found a node that we didn't need until now
284 self.__nodes_that_never_ran.add(required_node_name)
285 # this is so we know the current node needs to be run
286 # again if the required node changes
287 self.__node_deps.setdefault(required_node_name, set())
288 self.__node_deps[required_node_name].add(node.name)
289
290 if any_reactor_changed:
291 # something changed on this node, mark all dependent nodes as unstable
292 for required_node_name in self.__node_deps.get(node.name, set()):
293 io.debug(f"{node.name} triggering metadata rerun on {required_node_name}")
294 self.__triggered_nodes.add(required_node_name)
295
296 if with_deps and any_reactor_changed:
297 self.__node_stable[node] = False
298 elif without_deps:
299 self.__node_stable[node] = not any_reactor_changed
300
301 def __run_reactor(self, node_name, reactor_name, reactor):
302 if (node_name, reactor_name) in self.__do_not_run_again:
303 return False, set()
304 self.__partial_metadata_accessed_for = set()
305 self.__reactors_run += 1
306 self.__reactor_changes.setdefault((node_name, reactor_name), 0)
307 # make sure the reactor doesn't react to its own output
308 old_metadata = self.__metastacks[node_name]._pop_layer(1, reactor_name)
309 self.__in_a_reactor = True
310 try:
311 new_metadata = reactor(self.__metastacks[node_name])
312 except KeyError as exc:
313 self.__keyerrors[(node_name, reactor_name)] = exc
314 return False, self.__partial_metadata_accessed_for
315 except DoNotRunAgain:
316 self.__do_not_run_again.add((node_name, reactor_name))
317 # clear any previously stored exception
318 try:
319 del self.__keyerrors[(node_name, reactor_name)]
320 except KeyError:
321 pass
322 return False, set()
323 except Exception as exc:
324 io.stderr(_(
325 "{x} Exception while executing metadata reactor "
326 "{metaproc} for node {node}:"
327 ).format(
328 x=red("!!!"),
329 metaproc=reactor_name,
330 node=node_name,
331 ))
332 raise exc
333 finally:
334 self.__in_a_reactor = False
335
336 # reactor terminated normally, clear any previously stored exception
337 try:
338 del self.__keyerrors[(node_name, reactor_name)]
339 except KeyError:
340 pass
341
342 try:
343 self.__metastacks[node_name]._set_layer(
344 1,
345 reactor_name,
346 new_metadata,
347 )
348 except TypeError as exc:
349 # TODO catch validation errors better
350 io.stderr(_(
351 "{x} Exception after executing metadata reactor "
352 "{metaproc} for node {node}:"
353 ).format(
354 x=red("!!!"),
355 metaproc=reactor_name,
356 node=node_name,
357 ))
358 raise exc
359
360 changed = old_metadata != new_metadata
361 if changed:
362 self.__reactor_changes[(node_name, reactor_name)] += 1
363
364 return changed, self.__partial_metadata_accessed_for
676676
677677 @property
678678 def metadata(self):
679 """
680 Returns full metadata for a node. MUST NOT be used from inside a
681 metadata processor. Use .partial_metadata instead.
682 """
683 return self.repo._metadata_for_node(self.name, partial=False)
679 return self.repo._metadata_for_node(self.name)
684680
685681 @property
686682 def metadata_blame(self):
687 return self.repo._metadata_for_node(self.name, partial=False, blame=True)
683 return self.repo._metadata_for_node(self.name, blame=True)
688684
689685 @property
690686 def _metadata_stack(self):
691 return self.repo._metadata_for_node(self.name, partial=False, stack=True)
687 return self.repo._metadata_for_node(self.name, stack=True)
692688
693689 def metadata_get(self, path, default=NO_DEFAULT):
694690 if not isinstance(path, (tuple, list)):
728724 @property
729725 def partial_metadata(self):
730726 """
731 Only to be used from inside metadata reactors. Can't use the
732 normal .metadata there because it might deadlock when nodes
733 have interdependent metadata.
734
735 It's OK for metadata reactors to work with partial metadata
736 because they will be fed all metadata updates until no more
737 changes are made by any metadata reactor.
727 Deprecated, remove in 5.0.0
738728 """
739 return self.repo._metadata_for_node(self.name, partial=True)
729 return self.metadata
740730
741731 def run(self, command, data_stdin=None, may_fail=False, log_output=False):
742732 assert self.os in self.OS_FAMILY_UNIX
00 from importlib.machinery import SourceFileLoader
11 from inspect import isabstract
2 from os import environ, listdir, mkdir, walk
2 from os import listdir, mkdir, walk
33 from os.path import abspath, dirname, isdir, isfile, join
44 from threading import Lock
55
1616 RepositoryError,
1717 )
1818 from .group import Group
19 from .metadata import DoNotRunAgain
20 from .node import _flatten_group_hierarchy, Node
19 from .metagen import MetadataGenerator
20 from .node import Node
2121 from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy
2222 from .utils import (
2323 cached_property,
2424 error_context,
2525 get_file_contents,
2626 names,
27 randomize_order,
2827 )
2928 from .utils.scm import get_git_branch, get_git_clean, get_rev
3029 from .utils.dicts import hash_statedict
31 from .utils.metastack import Metastack
32 from .utils.text import bold, mark_for_translation as _, red, validate_name
33 from .utils.ui import io, QUIT_EVENT
30 from .utils.text import mark_for_translation as _, red, validate_name
31 from .utils.ui import io
3432
3533 DIRNAME_BUNDLES = "bundles"
3634 DIRNAME_DATA = "data"
4038 FILENAME_GROUPS = "groups.py"
4139 FILENAME_NODES = "nodes.py"
4240 FILENAME_REQUIREMENTS = "requirements.txt"
43 MAX_METADATA_ITERATIONS = int(environ.get("BW_MAX_METADATA_ITERATIONS", "100"))
4441
4542 HOOK_EVENTS = (
4643 'action_run_end',
178175 return self.__module_cache[attrname]
179176
180177
181 class Repository:
178 class Repository(MetadataGenerator):
182179 def __init__(self, repo_path=None):
183180 if repo_path is None:
184181 self.path = "/dev/null"
192189 self.node_dict = {}
193190 self._get_all_attr_code_cache = {}
194191 self._get_all_attr_result_cache = {}
192
193 # required by MetadataGenerator
195194 self._node_metadata_complete = {}
196195 self._node_metadata_lock = Lock()
197196
458457 Returns a list of nodes in the given group.
459458 """
460459 return self.nodes_in_all_groups([group_name])
461
462 def _metadata_for_node(self, node_name, partial=False, blame=False, stack=False):
463 """
464 Returns full or partial metadata for this node.
465
466 Partial metadata may only be requested from inside a metadata
467 reactor.
468
469 If necessary, this method will build complete metadata for this
470 node and all related nodes. Related meaning nodes that this node
471 depends on in one of its metadata processors.
472 """
473 if partial:
474 if node_name in self._node_metadata_complete:
475 # We already completed metadata for this node, but partial must
476 # return a Metastack, so we build a single-layered one just for
477 # the interface.
478 metastack = Metastack()
479 metastack._set_layer(
480 "flattened",
481 self._node_metadata_complete[node_name],
482 )
483 return metastack
484 else:
485 # Return the WIP Metastack or an empty one if we didn't start
486 # yet.
487 self._nodes_we_need_metadata_for.add(node_name)
488 return self._metastacks.setdefault(node_name, Metastack())
489
490 if blame or stack:
491 # cannot return cached result here, force rebuild
492 try:
493 del self._node_metadata_complete[node_name]
494 except KeyError:
495 pass
496
497 try:
498 return self._node_metadata_complete[node_name]
499 except KeyError:
500 pass
501
502 # Different worker threads might request metadata at the same time.
503 # This creates problems for the following variables:
504 #
505 # self._metastacks
506 # self._nodes_we_need_metadata_for
507 #
508 # Chaos would ensue if we allowed multiple instances of
509 # _build_node_metadata() running in parallel, messing with these
510 # vars. So we use a lock and reset the vars before and after.
511
512 with self._node_metadata_lock:
513 try:
514 # maybe our metadata got completed while waiting for the lock
515 return self._node_metadata_complete[node_name]
516 except KeyError:
517 pass
518
519 # set up temporary vars
520 self._metastacks = {}
521 self._nodes_we_need_metadata_for = {node_name}
522
523 self._build_node_metadata()
524
525 io.debug("completed metadata for {} nodes".format(
526 len(self._nodes_we_need_metadata_for),
527 ))
528 # now that we have completed all metadata for this
529 # node and all related nodes, copy that data over
530 # to the complete dict
531 for node_name in self._nodes_we_need_metadata_for:
532 self._node_metadata_complete[node_name] = \
533 self._metastacks[node_name]._as_dict()
534
535 if blame:
536 blame_result = self._metastacks[node_name]._as_blame()
537 elif stack:
538 stack_result = self._metastacks[node_name]
539
540 # reset temporary vars (this isn't strictly necessary, but might
541 # free up some memory and avoid confusion)
542 self._metastacks = {}
543 self._nodes_we_need_metadata_for = set()
544
545 if blame:
546 return blame_result
547 elif stack:
548 return stack_result
549 else:
550 return self._node_metadata_complete[node_name]
551
552 def _build_node_metadata(self):
553 """
554 Builds complete metadata for all nodes that appear in
555 self._nodes_we_need_metadata_for.
556 """
557 # Prevents us from reassembling static metadata needlessly and
558 # helps us detect nodes pulled into self._nodes_we_need_metadata_for
559 # by node.partial_metadata.
560 nodes_with_completed_static_metadata = set()
561 # these reactors have indicated that they do not need to be run again
562 do_not_run_again = set()
563 # these reactors have raised KeyErrors
564 keyerrors = {}
565 # loop detection
566 iterations = 0
567 reactors_that_changed_something_in_last_iteration = set()
568
569 while not QUIT_EVENT.is_set():
570 iterations += 1
571 if iterations > MAX_METADATA_ITERATIONS:
572 reactors = ""
573 for node, reactor in sorted(reactors_that_changed_something_in_last_iteration):
574 reactors += node + " " + reactor + "\n"
575 raise ValueError(_(
576 "Infinite loop detected between these metadata reactors:\n"
577 ) + reactors)
578
579 # First, get the static metadata out of the way
580 for node_name in list(self._nodes_we_need_metadata_for):
581 if QUIT_EVENT.is_set():
582 break
583 node = self.get_node(node_name)
584 # check if static metadata for this node is already done
585 if node_name in nodes_with_completed_static_metadata:
586 continue
587 self._metastacks[node_name] = Metastack()
588
589 with io.job(_("{node} adding metadata defaults").format(node=bold(node.name))):
590 # randomize order to increase chance of exposing clashing defaults
591 for defaults_name, defaults in randomize_order(node.metadata_defaults):
592 self._metastacks[node_name]._set_layer(
593 defaults_name,
594 defaults,
595 )
596
597 with io.job(_("{node} adding group metadata").format(node=bold(node.name))):
598 group_order = _flatten_group_hierarchy(node.groups)
599 for group_name in group_order:
600 self._metastacks[node_name]._set_layer(
601 "group:{}".format(group_name),
602 self.get_group(group_name)._attributes.get('metadata', {}),
603 )
604
605 with io.job(_("{node} adding node metadata").format(node=bold(node.name))):
606 self._metastacks[node_name]._set_layer(
607 "node:{}".format(node_name),
608 node._attributes.get('metadata', {}),
609 )
610
611 # This will ensure node/group metadata and defaults are
612 # skipped over in future iterations.
613 nodes_with_completed_static_metadata.add(node_name)
614
615 # Now for the interesting part: We run all metadata reactors
616 # until none of them return changed metadata anymore.
617 any_reactor_returned_changed_metadata = False
618 reactors_that_changed_something_in_last_iteration = set()
619
620 # randomize order to increase chance of exposing unintended
621 # non-deterministic effects of execution order
622 for node_name in randomize_order(self._nodes_we_need_metadata_for):
623 if QUIT_EVENT.is_set():
624 break
625 node = self.get_node(node_name)
626
627 with io.job(_("{node} running metadata reactors").format(node=bold(node.name))):
628 for reactor_name, reactor in randomize_order(node.metadata_reactors):
629 if (node_name, reactor_name) in do_not_run_again:
630 continue
631 try:
632 new_metadata = reactor(self._metastacks[node.name])
633 except KeyError as exc:
634 keyerrors[(node_name, reactor_name)] = exc
635 except DoNotRunAgain:
636 do_not_run_again.add((node_name, reactor_name))
637 except Exception as exc:
638 io.stderr(_(
639 "{x} Exception while executing metadata reactor "
640 "{metaproc} for node {node}:"
641 ).format(
642 x=red("!!!"),
643 metaproc=reactor_name,
644 node=node.name,
645 ))
646 raise exc
647 else:
648 # reactor terminated normally, clear any previously stored exception
649 try:
650 del keyerrors[(node_name, reactor_name)]
651 except KeyError:
652 pass
653
654 try:
655 this_changed = self._metastacks[node_name]._set_layer(
656 reactor_name,
657 new_metadata,
658 )
659 except TypeError as exc:
660 # TODO catch validation errors better
661 io.stderr(_(
662 "{x} Exception after executing metadata reactor "
663 "{metaproc} for node {node}:"
664 ).format(
665 x=red("!!!"),
666 metaproc=reactor_name,
667 node=node.name,
668 ))
669 raise exc
670 if this_changed:
671 reactors_that_changed_something_in_last_iteration.add(
672 (node_name, reactor_name),
673 )
674 any_reactor_returned_changed_metadata = True
675
676 if not any_reactor_returned_changed_metadata:
677 if nodes_with_completed_static_metadata != self._nodes_we_need_metadata_for:
678 # During metadata reactor execution, partial metadata may
679 # have been requested for nodes we did not previously
680 # consider. We still need to make sure to generate static
681 # metadata for these new nodes, as that may trigger
682 # additional results from metadata reactors.
683 continue
684 else:
685 # Now that we're done, re-sort static metadata to
686 # overrule reactors.
687 for node_name, metastack in self._metastacks.items():
688 for identifier in list(metastack._layers.keys()):
689 if (
690 identifier.startswith("group:") or
691 identifier.startswith("node:")
692 ):
693 metastack._layers[identifier] = metastack._layers.pop(identifier)
694 break
695
696 if keyerrors:
697 reactors = ""
698 for source, exc in keyerrors.items():
699 node_name, reactor = source
700 reactors += "{} {} {}\n".format(node_name, reactor, exc)
701 raise ValueError(_(
702 "These metadata reactors raised a KeyError "
703 "even after all other reactors were done:\n"
704 ) + reactors)
705460
706461 def metadata_hash(self):
707462 repo_dict = {}
431431 if not path:
432432 return dict_obj
433433 else:
434 return value_at_key_path(dict_obj[path[0]], path[1:])
434 nested_dict = dict_obj[path[0]]
435 remaining_path = path[1:]
436 if remaining_path and not isinstance(nested_dict, dict):
437 raise KeyError("/".join(path))
438 else:
439 return value_at_key_path(nested_dict, remaining_path)
00 from collections import OrderedDict
11 from sys import version_info
22
3 from ..metadata import deepcopy_metadata, validate_metadata, value_at_key_path
3 from ..metadata import METADATA_TYPES, deepcopy_metadata, validate_metadata, value_at_key_path
44 from . import NO_DEFAULT
5 from .dicts import map_dict_keys, merge_dict
5 from .dicts import ATOMIC_TYPES, map_dict_keys, merge_dict
6
7
8 UNMERGEABLE = tuple(METADATA_TYPES) + tuple(ATOMIC_TYPES.values())
69
710
811 class Metastack:
1417 in their ability to revise their own layer each time they are run.
1518 """
1619 def __init__(self):
17 # We rely heavily on insertion order in this dict.
18 if version_info < (3, 7):
19 self._layers = OrderedDict()
20 else:
21 self._layers = {}
20 self._partitions = (
21 # We rely heavily on insertion order in these dicts.
22 {} if version_info >= (3, 7) else OrderedDict(), # node/groups
23 {} if version_info >= (3, 7) else OrderedDict(), # reactors
24 {} if version_info >= (3, 7) else OrderedDict(), # defaults
25 )
26 self._cached_partitions = {}
2227
2328 def get(self, path, default=NO_DEFAULT):
2429 """
4146 result = None
4247 undef = True
4348
44 for layer in self._layers.values():
45 try:
46 value = value_at_key_path(layer, path)
47 except KeyError:
48 pass
49 else:
50 if undef:
51 # First time we see anything.
52 result = {'data': value}
53 undef = False
49 for part_index, partition in enumerate(self._partitions):
50 # prefer cached partitions if available
51 partition = self._cached_partitions.get(part_index, partition)
52 for layer in reversed(list(partition.values())):
53 try:
54 value = value_at_key_path(layer, path)
55 except KeyError:
56 pass
5457 else:
55 result = merge_dict(result, {'data': value})
58 if undef:
59 # First time we see anything. If we can't merge
60 # it anyway, then return early.
61 if isinstance(value, UNMERGEABLE):
62 return value
63 result = {'data': value}
64 undef = False
65 else:
66 result = merge_dict({'data': value}, result)
5667
5768 if undef:
5869 if default != NO_DEFAULT:
6273 else:
6374 return deepcopy_metadata(result['data'])
6475
65 def _as_dict(self):
76 def _as_dict(self, partitions=None):
6677 final_dict = {}
6778
68 for layer in self._layers.values():
69 final_dict = merge_dict(final_dict, layer)
79 if partitions is None:
80 partitions = tuple(range(len(self._partitions)))
81 else:
82 partitions = sorted(partitions)
83
84 for part_index in partitions:
85 # prefer cached partitions if available
86 partition = self._cached_partitions.get(part_index, self._partitions[part_index])
87 for layer in reversed(list(partition.values())):
88 final_dict = merge_dict(layer, final_dict)
7089
7190 return final_dict
7291
7493 keymap = map_dict_keys(self._as_dict())
7594 blame = {}
7695 for path in keymap:
77 for identifier, layer in self._layers.items():
78 try:
79 value_at_key_path(layer, path)
80 except KeyError:
81 pass
82 else:
83 blame.setdefault(path, []).append(identifier)
96 for partition in self._partitions:
97 for identifier, layer in partition.items():
98 try:
99 value_at_key_path(layer, path)
100 except KeyError:
101 pass
102 else:
103 blame.setdefault(path, []).append(identifier)
84104 return blame
85105
86 def _set_layer(self, identifier, new_layer):
87 # Marked with an underscore because only the internal metadata
88 # reactor routing is supposed to call this method.
106 def _pop_layer(self, partition_index, identifier):
107 try:
108 return self._partitions[partition_index].pop(identifier)
109 except (KeyError, IndexError):
110 return {}
111
112 def _set_layer(self, partition_index, identifier, new_layer):
89113 validate_metadata(new_layer)
90 changed = self._layers.get(identifier, {}) != new_layer
91 self._layers[identifier] = new_layer
92 return changed
114 self._partitions[partition_index][identifier] = new_layer
115
116 def _cache_partition(self, partition_index):
117 self._cached_partitions[partition_index] = {
118 'merged layers': self._as_dict(partitions=[partition_index]),
119 }
99 "git rev-parse --abbrev-ref HEAD",
1010 shell=True,
1111 stderr=STDOUT,
12 ).strip()
12 ).decode().strip()
1313 except CalledProcessError:
1414 return None
1515
2020 "git status --porcelain",
2121 shell=True,
2222 stderr=STDOUT,
23 ).strip())
23 ).decode().strip())
2424 except CalledProcessError:
2525 return None
2626
3131 "bzr revno",
3232 shell=True,
3333 stderr=STDOUT,
34 ).strip()
34 ).decode().strip()
3535 except CalledProcessError:
3636 return None
3737
4242 "git rev-parse HEAD",
4343 shell=True,
4444 stderr=STDOUT,
45 ).strip()
45 ).decode().strip()
4646 except CalledProcessError:
4747 return None
4848
5353 "hg --debug id -i",
5454 shell=True,
5555 stderr=STDOUT,
56 ).strip().rstrip("+")
56 ).decode().strip().rstrip("+")
5757 except CalledProcessError:
5858 return None
5959
00 from contextlib import contextmanager
11 from datetime import datetime
2 import fcntl
32 from functools import wraps
43 from os import _exit, environ, getpid, kill
54 from os.path import join
65 from select import select
6 from shutil import get_terminal_size
77 from signal import signal, SIG_DFL, SIGINT, SIGQUIT, SIGTERM
8 import struct
98 from subprocess import PIPE, Popen
109 import sys
1110 import termios
101100 yield c
102101
103102
104 def term_width():
105 if not TTY:
106 return 0
107
108 fd = sys.stdout.fileno()
109 _, width = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'aaaa'))
110 return width
111
112
113103 def page_lines(lines):
114104 """
115105 View the given list of Unicode lines in a pager (e.g. `less`).
124114 env=env,
125115 stdin=PIPE,
126116 )
127 pager.stdin.write("\n".join(lines).encode('utf-8'))
117 try:
118 pager.stdin.write("\n".join(lines).encode('utf-8'))
119 except BrokenPipeError:
120 pass
128121 pager.stdin.close()
129122 pager.communicate()
130123 write_to_stream(STDOUT_WRITER, HIDE_CURSOR)
422415 progress_text = "{:.1f}% ".format(progress * 100)
423416 line += bold(progress_text)
424417 visible_length += len(progress_text)
425 line += self.jobs[-1][:term_width() - 1 - visible_length]
418 line += self.jobs[-1][:get_terminal_size().columns - 1 - visible_length]
426419 write_to_stream(STDOUT_WRITER, line)
427420 self._status_line_present = True
428421
0 bundlewrap (4.1.0-1) unstable; urgency=medium
1
2 * New upstream release
3 * Add python3-tomlkit to build-depends (needed for tests)
4
5 -- Jonathan Carter <jcc@debian.org> Sat, 08 Aug 2020 12:24:31 +0200
6
07 bundlewrap (4.0.0-1) unstable; urgency=medium
18
29 * New upstream release
3 * Add python3-tomlkit to build-dependencies (needed for tests)
10 * Add python3-tomlkit to build-depends (needed for tests)
411
512 -- Jonathan Carter <jcc@debian.org> Tue, 30 Jun 2020 20:38:26 +0200
613
714 bundlewrap (3.10.0-1) unstable; urgency=medium
815
916 * New upstream release
10 * Add python3-jinja2 and python3-mako to build-depends (for tests)
17 * Add python3-jinja2 and python3-mako to build-depends (needed for tests)
1118
1219 -- Jonathan Carter <jcc@debian.org> Mon, 18 May 2020 11:39:02 +0200
1320
1010 python3-mako,
1111 python3-minimal,
1212 python3-requests,
13 python3-setuptools
13 python3-setuptools,
14 python3-tomlkit
1415 Standards-Version: 4.5.0
1516 Rules-Requires-Root: no
1617 Homepage: http://bundlewrap.org/
9292
9393 <div class="alert alert-warning">BundleWrap will consider group hierarchy when merging metadata. For example, it is possible to define a default nameserver for the "eu" group and then override it for the "eu.frankfurt" subgroup. The catch is that this only works for groups that are connected through a subgroup hierarchy. Independent groups will have their metadata merged in an undefined order. <code>bw test</code> will report conflicting metadata in independent groups as a metadata collision.</div>
9494
95 <div class="alert alert-info">Also see the <a href="../nodes.py#metadata">documentation for node.metadata</a> for more information.</div>
95 <div class="alert alert-info">Also see the <a href="../nodes.py#metadata">documentation for node.metadata</a> and <a href="../metadata.py#Priority">metadata.py</a> for more information.</div>
9696
9797 <br>
9898
6363
6464 `interactive` Indicates whether the apply is interactive or not.
6565
66 To abort the entire apply operation:
67
68 ```
69 from bundlewrap.exceptions import GracefulApplyException
70 raise GracefulApplyException("reason goes here")
71 ```
72
6673 ---
6774
6875 **`apply_end(repo, target, nodes, duration=None, **kwargs)`**
1515
1616 ## Reactors
1717
18 So let's look at reactors next. Metadata reactors are functions that take the metadata generated so far as their single argument. You must then return a new dictionary with any metadata you wish to have added:
18 So let's look at reactors next. Metadata reactors are functions that take the metadata generated for this node so far as their single argument. You must then return a new dictionary with any metadata you wish to have added:
1919
2020 @metadata_reactor
2121 def bar(metadata):
2929
3030 While node and group metadata and metadata defaults will always be available to reactors, you should not rely on that for the simple reason that you may one day move some metadata from those static sources into another reactor, which may be run later. Thus you may need to wait for some iterations before that data shows up in `metadata`. Note that BundleWrap will catch any `KeyError`s raised in metadata reactors and only report them if they don't go away after all other relevant reactors are done.
3131
32 To avoid deadlocks when accessing *other* nodes' metadata from within a metadata reactor, use `other_node.partial_metadata` instead of `other_node.metadata`. For the same reason, always use the `metadata` parameter to access the current node's metadata, never `node.metadata`.
32 You can also access other nodes' metadata:
33
34 @metadata_reactor
35 def baz(metadata):
36 frob = set()
37 for n in repo.nodes:
38 frob.add(n.metadata.get('sizzle'))
39 return {'frob': frob}
3340
3441
3542 ### DoNotRunAgain
4552
4653
4754 <div class="alert alert-info">For your convenience, you can access <code>repo</code>, <code>node</code>, <code>metadata_reactors</code>, and <code>DoNotRunAgain</code> in <code>metadata.py</code> without importing them.</div>
55
56
57 ## Priority
58
59 For atomic ("primitive") data types like `int` or `bool`:
60
61 1. Nodes
62 2. Groups
63 3. Reactors
64 4. Defaults
65
66 Node metadata wins over group metadata, groups win over reactors, reactors win over defaults.
67
68 This also applies to type conflicts: For example, specifying a boolean flag in node metadata will win over a list returned by a metadata reactor. (You should probably avoid situations like this entirely.)
69
70 Set-like data types will be merged recursively.
71
72 <div class="alert alert-info">Also see the <a href="../nodes.py#metadata">documentation for node.metadata</a> and <a href="../groups.py#metadata">group.metadata</a> for more information.</div>
118118 * `None`
119119 * `bundlewrap.utils.Fault`
120120
121 <div class="alert alert-info">Also see the <a href="../groups.py#metadata">documentation for group.metadata</a> for more information.</div>
121 <div class="alert alert-info">Also see the <a href="../groups.py#metadata">documentation for group.metadata</a> and <a href="../metadata.py#Priority">metadata.py</a> for more information.</div>
122122
123123 <br>
124124
22
33 setup(
44 name="bundlewrap",
5 version="4.0.0",
5 version="4.1.0",
66 description="Config management with Python",
77 long_description=(
88 "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n"
384384 f.write(
385385 """
386386 @metadata_reactor
387 def foo(metadata):
388 bar_ran = metadata.get('bar_ran', False)
389 if not bar_ran:
390 return {'foo_ran': True}
391 else:
392 return {'foo': metadata.get('bar'), 'foo_ran': True}
393
394
395 @metadata_reactor
396 def bar(metadata):
397 foo_ran = metadata.get('foo_ran', False)
398 if not foo_ran:
399 return {'bar_ran': False}
400 else:
401 return {'bar': 47, 'bar_ran': True}
402 """)
403 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
404 assert loads(stdout.decode()) == {
405 "bar": 47,
406 "bar_ran": True,
407 "foo": 47,
408 "foo_ran": True,
387 def one(metadata):
388 return {'one': True}
389
390 @metadata_reactor
391 def two(metadata):
392 return {'two': metadata.get('one')}
393
394 @metadata_reactor
395 def three(metadata):
396 return {'three': metadata.get('two')}
397
398 @metadata_reactor
399 def four(metadata):
400 return {'four': metadata.get('three')}
401 """)
402 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
403 assert loads(stdout.decode()) == {
404 "one": True,
405 "two": True,
406 "three": True,
407 "four": True,
409408 }
410409 assert stderr == b""
411410 assert rcode == 0
426425 """
427426 @metadata_reactor
428427 def plusone(metadata):
429 return {'foo': metadata.get('foo', 0) + 1 }
428 return {'foo': metadata.get('bar', 0) + 1 }
430429
431430 @metadata_reactor
432431 def plustwo(metadata):
433 return {'foo': metadata.get('foo', 0) + 2 }
432 return {'bar': metadata.get('foo', 0) + 2 }
434433 """)
435434 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
436435 assert rcode == 1
436
437
438 def test_metadatapy_no_self_react(tmpdir):
439 make_repo(
440 tmpdir,
441 bundles={"test": {}},
442 nodes={
443 "node1": {
444 'bundles': ["test"],
445 },
446 },
447 )
448 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
449 f.write(
450 """
451 @metadata_reactor
452 def reactor1(metadata):
453 assert not metadata.get('broken', False)
454 return {'broken': True}
455
456 @metadata_reactor
457 def reactor2(metadata):
458 # just to make sure reactor1 runs again
459 return {'again': True}
460 """)
461 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
462 assert loads(stdout.decode()) == {
463 "broken": True,
464 "again": True,
465 }
466
467
468 def test_own_node_metadata(tmpdir):
469 make_repo(
470 tmpdir,
471 bundles={"test": {}},
472 nodes={
473 "node1": {
474 'bundles': ["test"],
475 'metadata': {'number': 47},
476 },
477 },
478 )
479 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
480 f.write(
481 """
482 @metadata_reactor
483 def reactor1(metadata):
484 return {'plusone': node.metadata.get('number') + 1}
485 """)
486 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
487 assert loads(stdout.decode()) == {
488 "number": 47,
489 "plusone": 48,
490 }
491
492
493 def test_other_node_metadata(tmpdir):
494 make_repo(
495 tmpdir,
496 bundles={"test": {}},
497 nodes={
498 "node1": {
499 'bundles': ["test"],
500 'metadata': {'number': 47},
501 },
502 "node2": {
503 'bundles': ["test"],
504 'metadata': {'number': 42},
505 },
506 "node3": {
507 'bundles': ["test"],
508 'metadata': {'number': 23},
509 },
510 },
511 )
512 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
513 f.write(
514 """
515 @metadata_reactor
516 def reactor1(metadata):
517 numbers = set()
518 for n in repo.nodes:
519 if n != node:
520 numbers.add(n.metadata.get('number'))
521 return {'other_numbers': numbers}
522 """)
523 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
524 assert loads(stdout.decode()) == {
525 "number": 47,
526 "other_numbers": [23, 42],
527 }
528 stdout, stderr, rcode = run("bw metadata node2", path=str(tmpdir))
529 assert loads(stdout.decode()) == {
530 "number": 42,
531 "other_numbers": [23, 47],
532 }
533 stdout, stderr, rcode = run("bw metadata node3", path=str(tmpdir))
534 assert loads(stdout.decode()) == {
535 "number": 23,
536 "other_numbers": [42, 47],
537 }
1010
1111 def test_has_no_subpath():
1212 stack = Metastack()
13 stack._set_layer('base', {'something': {'in': {}}})
13 stack._set_layer(0, 'base', {'something': {'in': {}}})
1414 with raises(KeyError):
1515 stack.get('something/in/a/path')
1616
1717
1818 def test_get_top():
1919 stack = Metastack()
20 stack._set_layer('base', {'something': 123})
20 stack._set_layer(0, 'base', {'something': 123})
2121 assert stack.get('something') == 123
2222
2323
2424 def test_get_subpath():
2525 stack = Metastack()
26 stack._set_layer('base', {'something': {'in': {'a': 'subpath'}}})
26 stack._set_layer(0, 'base', {'something': {'in': {'a': 'subpath'}}})
2727 assert stack.get('something/in/a', None) == 'subpath'
2828
2929
3434
3535 def test_get_default_with_base():
3636 stack = Metastack()
37 stack._set_layer('', {'foo': 'bar'})
37 stack._set_layer(0, '', {'foo': 'bar'})
3838 assert stack.get('something', 123) == 123
3939
4040
4141 def test_get_default_with_overlay():
4242 stack = Metastack()
43 stack._set_layer('base', {'foo': 'bar'})
44 stack._set_layer('overlay', {'baz': 'boing'})
43 stack._set_layer(0, 'base', {'foo': 'bar'})
44 stack._set_layer(0, 'overlay', {'baz': 'boing'})
4545 assert stack.get('something', 123) == 123
4646
4747
4848 def test_overlay_value():
4949 stack = Metastack()
50 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
51 stack._set_layer('overlay', {'something': {'a_value': 10}})
50 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
51 stack._set_layer(0, 'overlay', {'something': {'a_value': 10}})
5252 assert stack.get('something/a_value', None) == 10
5353
5454
5555 def test_merge_lists():
5656 stack = Metastack()
57 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
58 stack._set_layer('overlay', {'something': {'a_list': [3]}})
57 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
58 stack._set_layer(0, 'overlay', {'something': {'a_list': [3]}})
5959 assert sorted(stack.get('something/a_list', None)) == sorted([1, 2, 3])
6060
6161
6262 def test_merge_sets():
6363 stack = Metastack()
64 stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
65 stack._set_layer('overlay', {'something': {'a_set': {3}}})
64 stack._set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
65 stack._set_layer(0, 'overlay', {'something': {'a_set': {3}}})
6666 assert stack.get('something/a_set', None) == {1, 2, 3}
6767
6868
6969 def test_overlay_value_multi_layers():
7070 stack = Metastack()
71 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
72 stack._set_layer('overlay', {'something': {'a_value': 10}})
73 stack._set_layer('unrelated', {'something': {'another_value': 10}})
71 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
72 stack._set_layer(0, 'overlay', {'something': {'a_value': 10}})
73 stack._set_layer(0, 'unrelated', {'something': {'another_value': 10}})
7474 assert stack.get('something/a_value', None) == 10
7575
7676
7777 def test_merge_lists_multi_layers():
7878 stack = Metastack()
79 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
80 stack._set_layer('overlay', {'something': {'a_list': [3]}})
81 stack._set_layer('unrelated', {'something': {'another_value': 10}})
79 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
80 stack._set_layer(0, 'overlay', {'something': {'a_list': [3]}})
81 stack._set_layer(0, 'unrelated', {'something': {'another_value': 10}})
8282
8383 # Objects in Metastacks are frozen. This converts lists to tuples.
8484 # Unlike set and frozenset, list and tuple doesn't naturally support
9696
9797 def test_merge_sets_multi_layers():
9898 stack = Metastack()
99 stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
100 stack._set_layer('overlay', {'something': {'a_set': {3}}})
101 stack._set_layer('unrelated', {'something': {'another_value': 10}})
99 stack._set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
100 stack._set_layer(0, 'overlay', {'something': {'a_set': {3}}})
101 stack._set_layer(0, 'unrelated', {'something': {'another_value': 10}})
102102 assert stack.get('something/a_set', None) == {1, 2, 3}
103103
104104
105105 def test_merge_lists_with_empty_layer():
106106 stack = Metastack()
107 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
108 stack._set_layer('overlay1', {'something': {'a_list': []}})
109 stack._set_layer('overlay2', {'something': {'a_list': [3]}})
107 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
108 stack._set_layer(0, 'overlay1', {'something': {'a_list': []}})
109 stack._set_layer(0, 'overlay2', {'something': {'a_list': [3]}})
110110 assert sorted(stack.get('something/a_list', None)) == sorted([1, 2, 3])
111111
112112
113113 def test_merge_sets_with_empty_layer():
114114 stack = Metastack()
115 stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
116 stack._set_layer('overlay1', {'something': {'a_set': set()}})
117 stack._set_layer('overlay2', {'something': {'a_set': {3}}})
115 stack._set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
116 stack._set_layer(0, 'overlay1', {'something': {'a_set': set()}})
117 stack._set_layer(0, 'overlay2', {'something': {'a_set': {3}}})
118118 assert stack.get('something/a_set', None) == {1, 2, 3}
119119
120120
121121 def test_merge_lists_with_multiple_used_layers():
122122 stack = Metastack()
123 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
124 stack._set_layer('overlay1', {'something': {'a_list': [3]}})
125 stack._set_layer('overlay2', {'something': {'a_list': [4]}})
126 stack._set_layer('overlay3', {'something': {'a_list': [6, 5]}})
123 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
124 stack._set_layer(0, 'overlay1', {'something': {'a_list': [3]}})
125 stack._set_layer(0, 'overlay2', {'something': {'a_list': [4]}})
126 stack._set_layer(0, 'overlay3', {'something': {'a_list': [6, 5]}})
127127 assert sorted(stack.get('something/a_list', None)) == sorted([1, 2, 3, 4, 5, 6])
128128
129129
130130 def test_merge_sets_with_multiple_used_layers():
131131 stack = Metastack()
132 stack._set_layer('base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
133 stack._set_layer('overlay1', {'something': {'a_set': {3}}})
134 stack._set_layer('overlay2', {'something': {'a_set': {4}}})
135 stack._set_layer('overlay3', {'something': {'a_set': {6, 5}}})
132 stack._set_layer(0, 'base', {'something': {'a_set': {1, 2}, 'a_value': 5}})
133 stack._set_layer(0, 'overlay1', {'something': {'a_set': {3}}})
134 stack._set_layer(0, 'overlay2', {'something': {'a_set': {4}}})
135 stack._set_layer(0, 'overlay3', {'something': {'a_set': {6, 5}}})
136136 assert stack.get('something/a_set', None) == {1, 2, 3, 4, 5, 6}
137137
138138
139139 def test_merge_dicts():
140140 stack = Metastack()
141 stack._set_layer('overlay1', {'something': {'a_value': 3}})
142 stack._set_layer('overlay2', {'something': {'another_value': 5}})
143 stack._set_layer('overlay3', {'something': {'this': {'and': 'that'}}})
144 stack._set_layer('overlay4', {'something': {'a_set': {1, 2}}})
145 stack._set_layer('overlay5', {'something': {'a_set': {3, 4}}})
141 stack._set_layer(0, 'overlay1', {'something': {'a_value': 3}})
142 stack._set_layer(0, 'overlay2', {'something': {'another_value': 5}})
143 stack._set_layer(0, 'overlay3', {'something': {'this': {'and': 'that'}}})
144 stack._set_layer(0, 'overlay4', {'something': {'a_set': {1, 2}}})
145 stack._set_layer(0, 'overlay5', {'something': {'a_set': {3, 4}}})
146146 assert stack.get('something', None) == {
147147 'a_set': {1, 2, 3, 4},
148148 'a_value': 3,
155155
156156 def test_requesting_empty_path():
157157 stack = Metastack()
158 stack._set_layer('base', {'foo': {'bar': 'baz'}})
158 stack._set_layer(0, 'base', {'foo': {'bar': 'baz'}})
159159 assert stack.get('', 'default') == 'default'
160160
161161
162162 def test_update_layer_for_new_value():
163163 stack = Metastack()
164 stack._set_layer('base', {'foo': 'bar'})
165
166 stack._set_layer('overlay', {'something': 123})
164 stack._set_layer(0, 'base', {'foo': 'bar'})
165
166 stack._set_layer(0, 'overlay', {'something': 123})
167167 assert stack.get('foo', None) == 'bar'
168168 assert stack.get('boing', 'default') == 'default'
169169 assert stack.get('something', None) == 123
170170
171 stack._set_layer('overlay', {'something': 456})
171 stack._set_layer(0, 'overlay', {'something': 456})
172172 assert stack.get('foo', None) == 'bar'
173173 assert stack.get('boing', 'default') == 'default'
174174 assert stack.get('something', None) == 456
176176
177177 def test_deepcopy():
178178 stack = Metastack()
179 stack._set_layer('base', {'foo': {'bar': {1, 2, 3}}})
179 stack._set_layer(0, 'base', {'foo': {'bar': {1, 2, 3}}})
180180 foo = stack.get('foo', None)
181181 foo['bar'].add(4)
182182 assert stack.get('foo/bar') == {1, 2, 3}
186186
187187 def test_atomic_in_base():
188188 stack = Metastack()
189 stack._set_layer('base', {'list': atomic([1, 2, 3])})
190 stack._set_layer('overlay', {'list': [4]})
189 stack._set_layer(0, 'base', {'list': atomic([1, 2, 3])})
190 stack._set_layer(0, 'overlay', {'list': [4]})
191191 assert list(stack.get('list', None)) == [4]
192192
193193
194194 def test_atomic_in_layer():
195195 stack = Metastack()
196 stack._set_layer('base', {'list': [1, 2, 3]})
197 stack._set_layer('overlay', {'list': atomic([4])})
196 stack._set_layer(0, 'base', {'list': [1, 2, 3]})
197 stack._set_layer(0, 'overlay', {'list': atomic([4])})
198198 assert list(stack.get('list', None)) == [4]
199199
200200
201 def test_set_layer_return_code():
202 stack = Metastack()
203 ret = stack._set_layer('overlay', {'foo': 'bar'})
204 assert ret is True
205 ret = stack._set_layer('overlay', {'foo': 'bar'})
206 assert ret is False
207 ret = stack._set_layer('overlay', {'foo': 'baz'})
208 assert ret is True
209 ret = stack._set_layer('overlay', {'foo': 'baz', 'bar': 1})
210 assert ret is True
201 def test_pop_layer():
202 stack = Metastack()
203 stack._set_layer(0, 'overlay', {'foo': 'bar'})
204 stack._set_layer(0, 'overlay', {'foo': 'baz'})
205 assert stack._pop_layer(0, 'overlay') == {'foo': 'baz'}
206 with raises(KeyError):
207 stack.get('foo')
208 assert stack._pop_layer(0, 'overlay') == {}
209 assert stack._pop_layer(0, 'unknown') == {}
210 assert stack._pop_layer(47, 'unknown') == {}
211211
212212
213213 def test_as_dict():
214214 stack = Metastack()
215 stack._set_layer('base', {
215 stack._set_layer(0, 'base', {
216216 'bool': True,
217217 'bytes': b'howdy',
218218 'dict': {'1': 2},
223223 'str': 'howdy',
224224 'tuple': (1, 2),
225225 })
226 stack._set_layer('overlay1', {'int': 1000})
227 stack._set_layer('overlay2', {'list': [2]})
228 stack._set_layer('overlay3', {'new_element': True})
226 stack._set_layer(0, 'overlay1', {'int': 1000})
227 stack._set_layer(0, 'overlay2', {'list': [2]})
228 stack._set_layer(0, 'overlay3', {'new_element': True})
229229 assert stack._as_dict() == {
230230 'bool': True,
231231 'bytes': b'howdy',
242242
243243 def test_as_blame():
244244 stack = Metastack()
245 stack._set_layer('base', {'something': {'a_list': [1, 2], 'a_value': 5}})
246 stack._set_layer('overlay', {'something': {'a_list': [3]}})
247 stack._set_layer('unrelated', {'something': {'another_value': 10}})
245 stack._set_layer(0, 'base', {'something': {'a_list': [1, 2], 'a_value': 5}})
246 stack._set_layer(0, 'overlay', {'something': {'a_list': [3]}})
247 stack._set_layer(0, 'unrelated', {'something': {'another_value': 10}})
248248 assert stack._as_blame() == {
249249 ('something',): ['base', 'overlay', 'unrelated'],
250250 ('something', 'a_list'): ['base', 'overlay'],