Codebase list bundlewrap / e18aff1
New upstream release Jonathan Carter 3 years ago
17 changed file(s) with 202 addition(s) and 146 deletion(s). Raw diff Collapse all Expand all
0 # 4.1.1
1
2 2020-08-12
3
4 * improved reporting of invalid types in metadata
5 * improved error output of `bw test -m`
6 * fixed recognition of JSON files as text
7 * fixed a rare case of nodes not having their metadata built to completion
8 * fixed a column sorting issue in `bw nodes`
9
10
011 # 4.1.0
112
213 2020-07-27
0 VERSION = (4, 1, 0)
0 VERSION = (4, 1, 1)
11 VERSION_STRING = ".".join([str(v) for v in VERSION])
155155 for path, blamed in blame:
156156 if key_paths:
157157 # remove all paths we did not ask to see
158 path_seen = False
159158 for filtered_path in key_paths:
160159 if (
161160 _list_starts_with(path, filtered_path) or
162161 _list_starts_with(filtered_path, path)
163162 ):
164 path_seen = True
165163 break
166 if not path_seen:
164 else:
167165 delete_key_at_path(metadata, path)
168166 continue
169167
2121 inline,
2222 ):
2323 rows = [[entity_label], ROW_SEPARATOR]
24 selected_attrs = {attr.strip() for attr in selected_attrs}
24 selected_attrs = [attr.strip() for attr in selected_attrs]
2525
26 if selected_attrs == {'all'}:
26 if selected_attrs == ['all']:
2727 selected_attrs = available_attrs
2828 elif 'all' in selected_attrs:
2929 io.stderr(_(
4343 for attr in selected_attrs:
4444 if attr in available_attrs_lists:
4545 if inline:
46 attr_values.append([",".join(names(getattr(entity, attr)))])
46 attr_values.append([",".join(sorted(names(getattr(entity, attr))))])
4747 else:
4848 has_list_attrs = True
4949 attr_values.append(sorted(names(getattr(entity, attr))))
33 from ..deps import DummyItem
44 from ..exceptions import FaultUnavailable, ItemDependencyLoop
55 from ..itemqueue import ItemTestQueue
6 from ..metadata import check_for_metadata_conflicts
6 from ..metadata import check_for_metadata_conflicts, metadata_to_json
77 from ..repo import Repository
88 from ..utils.cmdline import count_items, get_target_nodes
9 from ..utils.dicts import diff_value_text
910 from ..utils.plot import explain_item_dependency_loop
1011 from ..utils.text import bold, green, mark_for_translation as _, red, yellow
1112 from ..utils.ui import io, QUIT_EVENT
182183 anything changes between iterations
183184 """
184185 hashes = {}
186 metadata = {}
185187 io.progress_set_total(len(nodes) * iterations)
186188 for i in range(iterations):
187189 if QUIT_EVENT.is_set():
201203 n=iterations,
202204 node=bold(node.name),
203205 )):
206 metadata.setdefault(node.name, node.metadata)
204207 result = node.metadata_hash()
205208 hashes.setdefault(node.name, result)
206209 if hashes[node.name] != result:
207210 io.stderr(_(
208 "{x} Metadata for node {node} changed when generated repeatedly "
209 "(use `bw hash -d {node}` to debug)"
210 ).format(node=node.name, x=red("✘")))
211 "{x} Metadata for node {node} changed when generated repeatedly"
212 ).format(node=bold(node.name), x=red("✘")))
213 previous_json = metadata_to_json(metadata[node.name])
214 current_json = metadata_to_json(node.metadata)
215 io.stderr(diff_value_text("", previous_json, current_json))
211216 exit(1)
212217 io.progress_advance()
213218 io.progress_set_total(0)
0 from contextlib import suppress
1
02 from .exceptions import BundleError, ItemDependencyError, NoSuchItem
13 from .items import Item
24 from .items.actions import Action
346348 # items depending *only* on the processed item to be
347349 # eligible for the next iteration of this loop.
348350 for other_item in type_items:
349 try:
351 with suppress(ValueError):
350352 other_item.__deps.remove(item.id)
351 except ValueError:
352 pass
353353 return items
354354
355355
623623 dependencies of all items in the given list.
624624 """
625625 for item in items:
626 try:
626 with suppress(ValueError):
627627 item._deps.remove(dep)
628 except ValueError:
629 pass
630628 return items
631629
632630
00 from base64 import b64decode
11 from collections import defaultdict
2 from contextlib import contextmanager
2 from contextlib import contextmanager, suppress
33 from datetime import datetime
44 from os.path import basename, dirname, exists, join, normpath
55 from shlex import quote
354354 cdict['content'] = self.content
355355 sdict['content'] = get_remote_file_contents(self.node, self.name)
356356 if 'type' in keys:
357 try:
357 with suppress(ValueError):
358358 keys.remove('content_hash')
359 except ValueError:
360 pass
361359 return (cdict, sdict, keys)
362360
363361 def patch_attributes(self, attributes):
00 from abc import ABCMeta, abstractmethod
1 from contextlib import suppress
12
23 from bundlewrap.exceptions import BundleError
34 from bundlewrap.items import Item
2526 )
2627
2728 def fix(self, status):
28 try:
29 with suppress(KeyError):
2930 self._pkg_install_cache.get(self.node.name, set()).remove(self.id)
30 except KeyError:
31 pass
3231 if self.attributes['installed'] is False:
3332 self.pkg_remove()
3433 else:
6363 if isinstance(metadata, dict):
6464 for key, value in metadata.items():
6565 if not isinstance(key, str):
66 raise TypeError(_("metadata keys must be str, not: {}").format(repr(key)))
66 raise TypeError(_("metadata keys must be str: {value} is {type}").format(
67 type=type(key),
68 value=repr(key),
69 ))
6770 validate_metadata(value, _top_level=False)
6871 elif isinstance(metadata, (tuple, list, set)):
6972 for value in metadata:
7073 validate_metadata(value, _top_level=False)
7174 elif not isinstance(metadata, METADATA_TYPES):
72 raise TypeError(_("illegal metadata value type: {}").format(repr(metadata)))
75 raise TypeError(_("illegal metadata value type: {value} is {type}").format(
76 type=type(metadata),
77 value=repr(metadata),
78 ))
7379
7480
7581 def atomic(obj):
308314 if isinstance(obj, bytes):
309315 return force_text(obj)
310316 else:
311 raise ValueError(_("illegal metadata value type: {}").format(repr(obj)))
317 raise ValueError(_("illegal metadata value type: {value} is {type}").format(
318 type=type(metadata),
319 value=repr(metadata),
320 ))
312321
313322
314323 def metadata_to_json(metadata, sort_keys=True):
0 from collections import Counter
0 from collections import defaultdict, Counter
1 from contextlib import suppress
12 from os import environ
23 from traceback import TracebackException
34
1011 from .utils.text import bold, mark_for_translation as _, red
1112
1213
13 MAX_METADATA_ITERATIONS = int(environ.get("BW_MAX_METADATA_ITERATIONS", "5000"))
14 MAX_METADATA_ITERATIONS = int(environ.get("BW_MAX_METADATA_ITERATIONS", "1000"))
15
16
17 class _StartOver(Exception):
18 """
19 Raised when metadata processing needs to start from the top.
20 """
21 pass
1422
1523
1624 class MetadataGenerator:
2331 # reactors that raised KeyErrors (and which ones)
2432 self.__keyerrors = {}
2533 # a Metastack for every node
26 self.__metastacks = {}
34 self.__metastacks = defaultdict(Metastack)
2735 # mapping each node to all nodes that depend on it
28 self.__node_deps = {}
36 self.__node_deps = defaultdict(set)
37 # how often __run_reactors was called for a node
38 self.__node_iterations = defaultdict(int)
2939 # A node is 'stable' when all its reactors return unchanged
3040 # metadata, except for those reactors that look at other nodes.
3141 # This dict maps node names to True/False indicating stable status.
4151 # how often we called reactors
4252 self.__reactors_run = 0
4353 # how often each reactor changed
44 self.__reactor_changes = {}
54 self.__reactor_changes = defaultdict(int)
4555 # tracks which reactors on a node have look at other nodes
4656 # through partial_metadata
47 self.__reactors_with_deps = {}
57 self.__reactors_with_deps = defaultdict(set)
4858
4959 def _metadata_for_node(self, node_name, blame=False, stack=False):
5060 """
6070 """
6171 if self.__in_a_reactor:
6272 if node_name in self._node_metadata_complete:
73 io.debug(f"is already complete: {node_name}")
6374 # We already completed metadata for this node, but partial must
6475 # return a Metastack, so we build a single-layered one just for
6576 # the interface.
7283 return metastack
7384 else:
7485 self.__partial_metadata_accessed_for.add(node_name)
75 return self.__metastacks.setdefault(node_name, Metastack())
86 return self.__metastacks[node_name]
7687
7788 if blame or stack:
7889 # cannot return cached result here, force rebuild
79 try:
90 with suppress(KeyError):
8091 del self._node_metadata_complete[node_name]
81 except KeyError:
82 pass
83
84 try:
92
93 with suppress(KeyError):
8594 return self._node_metadata_complete[node_name]
86 except KeyError:
87 pass
8895
8996 # Different worker threads might request metadata at the same time.
9097
9198 with self._node_metadata_lock:
92 try:
99 with suppress(KeyError):
93100 # maybe our metadata got completed while waiting for the lock
94101 return self._node_metadata_complete[node_name]
95 except KeyError:
96 pass
97102
98103 self.__build_node_metadata(node_name)
99104
120125 else:
121126 return self._node_metadata_complete[node_name]
122127
128 def __run_new_nodes(self):
129 try:
130 node_name = self.__nodes_that_never_ran.pop()
131 except KeyError:
132 pass
133 else:
134 self.__nodes_that_ran_at_least_once.add(node_name)
135 self.__initial_run_for_node(node_name)
136 raise _StartOver
137
138 def __run_triggered_nodes(self):
139 try:
140 node_name = self.__triggered_nodes.pop()
141 except KeyError:
142 pass
143 else:
144 io.debug(f"triggered metadata run for {node_name}")
145 self.__run_reactors(
146 self.get_node(node_name),
147 with_deps=True,
148 without_deps=False,
149 )
150 raise _StartOver
151
152 def __run_unstable_nodes(self):
153 encountered_unstable_node = False
154 for node, stable in self.__node_stable.items():
155 if stable:
156 continue
157
158 io.debug(f"begin metadata stabilization test for {node.name}")
159 self.__run_reactors(node, with_deps=False, without_deps=True)
160 if self.__node_stable[node]:
161 io.debug(f"metadata stabilized for {node.name}")
162 else:
163 io.debug(f"metadata remains unstable for {node.name}")
164 encountered_unstable_node = True
165 if self.__nodes_that_never_ran:
166 # we have found a new dependency, process it immediately
167 # going wide early should be more efficient
168 raise _StartOver
169 if encountered_unstable_node:
170 # start over until everything is stable
171 io.debug("found an unstable node (without_deps=True)")
172 raise _StartOver
173
174 def __run_nodes_with_deps(self):
175 encountered_unstable_node = False
176 for node in randomize_order(self.__node_stable.keys()):
177 io.debug(f"begin final stabilization test for {node.name}")
178 self.__run_reactors(node, with_deps=True, without_deps=False)
179 if not self.__node_stable[node]:
180 io.debug(f"{node.name} still unstable")
181 encountered_unstable_node = True
182 if self.__nodes_that_never_ran:
183 # we have found a new dependency, process it immediately
184 # going wide early should be more efficient
185 raise _StartOver
186 if encountered_unstable_node:
187 # start over until everything is stable
188 io.debug("found an unstable node (with_deps=True)")
189 raise _StartOver
190
123191 def __build_node_metadata(self, initial_node_name):
124192 self.__reset()
125193 self.__nodes_that_never_ran.add(initial_node_name)
126194
127 iterations = 0
128195 while not QUIT_EVENT.is_set():
129 iterations += 1
130 if iterations > MAX_METADATA_ITERATIONS:
131 top_changers = Counter(self.__reactor_changes).most_common(25)
132 msg = _(
133 "MAX_METADATA_ITERATIONS({m}) exceeded, "
134 "likely an infinite loop between flip-flopping metadata reactors.\n"
135 "These are the reactors that changed most often:\n\n"
136 ).format(m=MAX_METADATA_ITERATIONS)
137 for reactor, count in top_changers:
138 msg += f" {count}\t{reactor[0]}\t{reactor[1]}\n"
139 raise RuntimeError(msg)
140
141 io.debug(f"metadata iteration #{iterations}")
142
143 jobmsg = _("{b} ({i} iterations, {n} nodes, {r} reactors, {e} runs)").format(
196 jobmsg = _("{b} ({n} nodes, {r} reactors, {e} runs)").format(
144197 b=bold(_("running metadata reactors")),
145 i=iterations,
146198 n=len(self.__nodes_that_never_ran) + len(self.__nodes_that_ran_at_least_once),
147199 r=len(self.__reactor_changes),
148200 e=self.__reactors_run,
149201 )
150 with io.job(jobmsg):
151 try:
152 node_name = self.__nodes_that_never_ran.pop()
153 except KeyError:
154 pass
155 else:
156 self.__nodes_that_ran_at_least_once.add(node_name)
157 self.__initial_run_for_node(node_name)
158 continue
159
160 # at this point, we have run all relevant nodes at least once
161
162 # if we have any triggered nodes from below, run their reactors
163 # with deps to see if they become unstable
164
165 try:
166 node_name = self.__triggered_nodes.pop()
167 except KeyError:
168 pass
169 else:
170 io.debug(f"triggered metadata run for {node_name}")
171 self.__run_reactors(
172 self.get_node(node_name),
173 with_deps=True,
174 without_deps=False,
175 )
176 continue
177
178 # now (re)stabilize all nodes
179
180 encountered_unstable_node = False
181 for node, stable in self.__node_stable.items():
182 if stable:
183 continue
184 self.__run_reactors(node, with_deps=False, without_deps=True)
185 if self.__node_stable[node]:
186 io.debug(f"metadata stabilized for {node_name}")
187 else:
188 io.debug(f"metadata remains unstable for {node_name}")
189 encountered_unstable_node = True
190 if encountered_unstable_node:
191 # start over until everything is stable
192 continue
193
194 # at this point, all nodes should be stable except for their reactors with deps
195
196 encountered_unstable_node = False
197 for node in randomize_order(self.__node_stable.keys()):
198 self.__run_reactors(node, with_deps=True, without_deps=False)
199 if not self.__node_stable[node]:
200 encountered_unstable_node = True
201 if encountered_unstable_node:
202 # start over until everything is stable
203 continue
204
205 # if we get here, we're done!
206 break
202 try:
203 with io.job(jobmsg):
204 # Control flow here is a bit iffy. The functions in this block often raise
205 # _StartOver in order to aggressively process new nodes first etc.
206 # Each method represents a distinct stage of metadata processing that checks
207 # for nodes in certain states as described below.
208
209 # This checks for newly discovered nodes that haven't seen any processing at
210 # all so far. It is important that we run them as early as possible, so their
211 # static metadata becomes available to other nodes and we recursively discover
212 # additional nodes as quickly as possible.
213 self.__run_new_nodes()
214 # At this point, we have run all relevant nodes at least once.
215
216 # Nodes become "triggered" when they previously looked something up from a
217 # different node and that second node changed. In this method, we try to figure
218 # out if the change on the node we depend on actually has any effect on the
219 # depending node.
220 self.__run_triggered_nodes()
221
222 # In this stage, we run all unstable nodes to the point where everything is
223 # stable again, except for those reactors that depend on other nodes.
224 self.__run_unstable_nodes()
225
226 # The final step is to make sure nothing changes when we run reactors with
227 # dependencies on other nodes. If anything changes, we need to start over so
228 # local-only reactors on a node can react to changes caused by reactors looking
229 # at other nodes.
230 self.__run_nodes_with_deps()
231
232 # if we get here, we're done!
233 break
234
235 except _StartOver:
236 continue
207237
208238 if self.__keyerrors and not QUIT_EVENT.is_set():
209239 msg = _(
217247 msg += " " + line
218248 raise MetadataPersistentKeyError(msg)
219249
250 io.debug("metadata generation for selected nodes finished")
251
220252 def __initial_run_for_node(self, node_name):
221253 io.debug(f"initial metadata run for {node_name}")
222254 node = self.get_node(node_name)
246278 )
247279 self.__metastacks[node_name]._cache_partition(0)
248280
249 self.__reactors_with_deps[node_name] = set()
250281 # run all reactors once to get started
251282 self.__run_reactors(node, with_deps=True, without_deps=True)
252283
284 def __check_iteration_count(self, node_name):
285 self.__node_iterations[node_name] += 1
286 if self.__node_iterations[node_name] > MAX_METADATA_ITERATIONS:
287 top_changers = Counter(self.__reactor_changes).most_common(25)
288 msg = _(
289 "MAX_METADATA_ITERATIONS({m}) exceeded for {node}, "
290 "likely an infinite loop between flip-flopping metadata reactors.\n"
291 "These are the reactors that changed most often:\n\n"
292 ).format(m=MAX_METADATA_ITERATIONS, node=node_name)
293 for reactor, count in top_changers:
294 msg += f" {count}\t{reactor[0]}\t{reactor[1]}\n"
295 raise RuntimeError(msg)
296
253297 def __run_reactors(self, node, with_deps=True, without_deps=True):
298 self.__check_iteration_count(node.name)
254299 any_reactor_changed = False
255300
256301 for depsonly in (True, False):
284329 self.__nodes_that_never_ran.add(required_node_name)
285330 # this is so we know the current node needs to be run
286331 # again if the required node changes
287 self.__node_deps.setdefault(required_node_name, set())
288332 self.__node_deps[required_node_name].add(node.name)
289333
290334 if any_reactor_changed:
291335 # something changed on this node, mark all dependent nodes as unstable
292 for required_node_name in self.__node_deps.get(node.name, set()):
336 for required_node_name in self.__node_deps[node.name]:
293337 io.debug(f"{node.name} triggering metadata rerun on {required_node_name}")
294338 self.__triggered_nodes.add(required_node_name)
295339
303347 return False, set()
304348 self.__partial_metadata_accessed_for = set()
305349 self.__reactors_run += 1
306 self.__reactor_changes.setdefault((node_name, reactor_name), 0)
307350 # make sure the reactor doesn't react to its own output
308351 old_metadata = self.__metastacks[node_name]._pop_layer(1, reactor_name)
309352 self.__in_a_reactor = True
315358 except DoNotRunAgain:
316359 self.__do_not_run_again.add((node_name, reactor_name))
317360 # clear any previously stored exception
318 try:
361 with suppress(KeyError):
319362 del self.__keyerrors[(node_name, reactor_name)]
320 except KeyError:
321 pass
322363 return False, set()
323364 except Exception as exc:
324365 io.stderr(_(
334375 self.__in_a_reactor = False
335376
336377 # reactor terminated normally, clear any previously stored exception
337 try:
378 with suppress(KeyError):
338379 del self.__keyerrors[(node_name, reactor_name)]
339 except KeyError:
340 pass
341380
342381 try:
343382 self.__metastacks[node_name]._set_layer(
0 from contextlib import suppress
01 from datetime import datetime, timedelta
12 from hashlib import md5
23 from os import environ, mkdir
449450 def cdict(self):
450451 node_dict = {}
451452 for item in self.items:
452 try:
453 with suppress(AttributeError): # actions have no cdict
453454 node_dict[item.id] = item.hash()
454 except AttributeError: # actions have no cdict
455 pass
456455 return node_dict
457456
458457 def covered_by_autoskip_selector(self, autoskip_selector):
0 from contextlib import suppress
01 from importlib.machinery import SourceFileLoader
12 from inspect import isabstract
23 from os import listdir, mkdir, walk
385386 for name, obj in self.get_all_attrs_from_file(filepath).items():
386387 if obj == items.Item or name.startswith("_"):
387388 continue
388 try:
389 with suppress(TypeError):
389390 if issubclass(obj, items.Item) and not isabstract(obj):
390391 yield obj
391 except TypeError:
392 pass
393392
394393 def _discover_root_path(self, path):
395394 while True:
7474 "text" in self.desc or
7575 self.desc in (
7676 "empty",
77 "JSON data",
7778 "OpenSSH ED25519 public key",
7879 "OpenSSH RSA public key",
7980 "OpenSSH DSA public key",
0 from contextlib import contextmanager
0 from contextlib import contextmanager, suppress
11 from datetime import datetime
22 from functools import wraps
33 from os import _exit, environ, getpid, kill
114114 env=env,
115115 stdin=PIPE,
116116 )
117 try:
117 with suppress(BrokenPipeError):
118118 pager.stdin.write("\n".join(lines).encode('utf-8'))
119 except BrokenPipeError:
120 pass
121119 pager.stdin.close()
122120 pager.communicate()
123121 write_to_stream(STDOUT_WRITER, HIDE_CURSOR)
127125
128126
129127 def write_to_stream(stream, msg):
130 try:
128 with suppress(BrokenPipeError):
131129 if TTY:
132130 stream.write(msg)
133131 else:
134132 stream.write(ansi_clean(msg))
135133 stream.flush()
136 except BrokenPipeError:
137 pass
138134
139135
140136 class DrainableStdin:
367363 ))
368364 for ssh_pid in self._child_pids:
369365 self.debug(_("killing SSH session with PID {pid}").format(pid=ssh_pid))
370 try:
366 with suppress(ProcessLookupError):
371367 kill(ssh_pid, SIGTERM)
372 except ProcessLookupError:
373 pass
374368 self._clear_last_job()
375369 if TTY:
376370 write_to_stream(STDOUT_WRITER, SHOW_CURSOR)
0 bundlewrap (4.1.1-1) unstable; urgency=medium
1
2 * New upstream release
3
4 -- Jonathan Carter <jcc@debian.org> Fri, 14 Aug 2020 11:43:45 +0200
5
06 bundlewrap (4.1.0-1) unstable; urgency=medium
17
28 * New upstream release
4747
4848 ## `BW_MAX_METADATA_ITERATIONS`
4949
50 Sets the limit of how often metadata reactors will be run before BundleWrap calls it a loop and terminates with an exception. Defaults to `100`.
50 Sets the limit of how often metadata reactors will be run for a node before BundleWrap calls it a loop and terminates with an exception. Defaults to `1000`.
5151
5252 <br>
5353
22
33 setup(
44 name="bundlewrap",
5 version="4.1.0",
5 version="4.1.1",
66 description="Config management with Python",
77 long_description=(
88 "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n"