Imported Debian patch 3.0.1-1
Jonathan Carter
6 years ago
6 | 6 | - 3.6 |
7 | 7 | install: |
8 | 8 | - pip install . |
9 | - pip install idna==2.5 | |
10 | 9 | before_script: |
11 | 10 | - ssh-keygen -f ~/.ssh/id_rsa -N "" |
12 | 11 | - cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys |
0 | # 3.0.1 | |
1 | ||
2 | 2017-09-25 | |
3 | ||
4 | * fixed `bw run` | |
5 | * fixed `bw test -e` | |
6 | ||
7 | ||
8 | # 3.0.0 | |
9 | ||
10 | 2017-09-24 | |
11 | ||
12 | * new metadata processor API and options (BACKWARDS INCOMPATIBLE) | |
13 | * files, directories, and symlinks now have defaults for owner, group, and mode (BACKWARDS INCOMPATIBLE) | |
14 | * overhauled options and output of `bw groups` (BACKWARDS INCOMPATIBLE) | |
15 | * overhauled options and output of `bw nodes` (BACKWARDS INCOMPATIBLE) | |
16 | * overhauled options and output of `bw run` (BACKWARDS INCOMPATIBLE) | |
17 | * overhauled options of `bw test` (BACKWARDS INCOMPATIBLE) | |
18 | * svc_systemd services are now 'enabled' by default (BACKWARDS INCOMPATIBLE) | |
19 | * `bw items --file-preview` no longer uses a separate file path argument (BACKWARDS INCOMPATIBLE) | |
20 | * removed `bw apply --profiling` (BACKWARDS INCOMPATIBLE) | |
21 | * removed `Item.display_keys()` (BACKWARDS INCOMPATIBLE) | |
22 | * changed return value of `Item.display_dicts()` (BACKWARDS INCOMPATIBLE) | |
23 | * changed `Item.BLOCK_CONCURRENT` into a class method (BACKWARDS INCOMPATIBLE) | |
24 | * removed `repo.vault.format()` (BACKWARDS INCOMPATIBLE) | |
25 | * removed env vars: BWADDHOSTKEYS, BWCOLORS, BWITEMWORKERS, BWNODEWORKERS (BACKWARDS INCOMPATIBLE) | |
26 | ||
27 | ||
28 | # 2.20.1 | |
29 | ||
30 | 2017-09-21 | |
31 | ||
32 | * improved performance of metadata processors | |
33 | * pkg_* and svc_* items no longer throw exceptions when their commands fail | |
34 | * fixed BW_DEBUG_LOG_DIR with `bw debug` | |
35 | * fixed 'precedes' attribute for actions | |
36 | ||
37 | ||
0 | 38 | # 2.20.0 |
1 | 39 | |
2 | 40 | 2017-08-15 |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | VERSION = (2, 20, 0) | |
3 | VERSION = (3, 0, 1) | |
4 | 4 | VERSION_STRING = ".".join([str(v) for v in VERSION]) |
3 | 3 | from os.path import exists, join |
4 | 4 | |
5 | 5 | from .exceptions import NoSuchBundle, RepositoryError |
6 | from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE | |
6 | 7 | from .utils import cached_property, get_all_attrs_from_file |
7 | 8 | from .utils.text import mark_for_translation as _ |
8 | 9 | from .utils.text import validate_name |
11 | 12 | |
12 | 13 | FILENAME_BUNDLE = "items.py" |
13 | 14 | FILENAME_METADATA = "metadata.py" |
15 | ||
16 | ||
17 | def metadata_processor(func): | |
18 | """ | |
19 | Decorator that tags metadata processors. | |
20 | """ | |
21 | func.__is_a_metadata_processor = True | |
22 | return func | |
14 | 23 | |
15 | 24 | |
16 | 25 | class Bundle(object): |
91 | 100 | for name, attr in get_all_attrs_from_file( |
92 | 101 | self.metadata_file, |
93 | 102 | base_env={ |
103 | 'DEFAULTS': DEFAULTS, | |
104 | 'DONE': DONE, | |
105 | 'RUN_ME_AGAIN': RUN_ME_AGAIN, | |
106 | 'OVERWRITE': OVERWRITE, | |
107 | 'metadata_processor': metadata_processor, | |
94 | 108 | 'node': self.node, |
95 | 109 | 'repo': self.repo, |
96 | 110 | }, |
97 | 111 | ).items(): |
98 | if name.startswith("_") or not callable(attr): | |
99 | continue | |
100 | result.append(attr) | |
112 | if getattr(attr, '__is_a_metadata_processor', False): | |
113 | result.append(attr) | |
101 | 114 | return result |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | from cProfile import Profile | |
3 | 4 | from functools import wraps |
4 | 5 | from os import environ |
5 | 6 | from os.path import abspath, dirname |
100 | 101 | if not hasattr(pargs, 'func'): |
101 | 102 | parser_bw.print_help() |
102 | 103 | exit(2) |
104 | if pargs.profile: | |
105 | profile = Profile() | |
106 | profile.enable() | |
103 | 107 | |
104 | 108 | path = abspath(pargs.repo_path) |
105 | 109 | io.debug_mode = pargs.debug |
106 | 110 | io.activate() |
107 | 111 | io.debug(_("invocation: {}").format(" ".join([force_text(arg) for arg in argv]))) |
108 | ||
109 | if 'BWADDHOSTKEYS' in environ: # TODO remove in 3.0.0 | |
110 | environ.setdefault('BW_ADD_HOST_KEYS', environ['BWADDHOSTKEYS']) | |
111 | if 'BWCOLORS' in environ: # TODO remove in 3.0.0 | |
112 | environ.setdefault('BW_COLORS', environ['BWCOLORS']) | |
113 | if 'BWITEMWORKERS' in environ: # TODO remove in 3.0.0 | |
114 | environ.setdefault('BW_ITEM_WORKERS', environ['BWITEMWORKERS']) | |
115 | if 'BWNODEWORKERS' in environ: # TODO remove in 3.0.0 | |
116 | environ.setdefault('BW_NODE_WORKERS', environ['BWNODEWORKERS']) | |
117 | 112 | |
118 | 113 | environ.setdefault('BW_ADD_HOST_KEYS', "1" if pargs.add_ssh_host_keys else "0") |
119 | 114 | |
151 | 146 | pargs.func(repo, text_pargs) |
152 | 147 | finally: |
153 | 148 | io.deactivate() |
149 | if pargs.profile: | |
150 | profile.disable() | |
151 | profile.dump_stats(pargs.profile) |
57 | 57 | 'interactive': args['interactive'], |
58 | 58 | 'skip_list': skip_list, |
59 | 59 | 'workers': args['item_workers'], |
60 | 'profiling': args['profiling'], | |
61 | 60 | }, |
62 | 61 | } |
63 | 62 | |
66 | 65 | return |
67 | 66 | skip_list.add(task_id) |
68 | 67 | results.append(return_value) |
69 | if args['profiling']: | |
70 | total_time = 0.0 | |
71 | io.stdout(_(" {}").format(bold(task_id))) | |
72 | io.stdout(_(" {} BEGIN PROFILING DATA " | |
73 | "(most expensive items first)").format(bold(task_id))) | |
74 | io.stdout(_(" {} seconds item").format(bold(task_id))) | |
75 | for time_elapsed, item_id in return_value.profiling_info: | |
76 | io.stdout(" {} {:10.3f} {}".format( | |
77 | bold(task_id), | |
78 | time_elapsed.total_seconds(), | |
79 | item_id, | |
80 | )) | |
81 | total_time += time_elapsed.total_seconds() | |
82 | io.stdout(_(" {} {:10.3f} (total)").format(bold(task_id), total_time)) | |
83 | io.stdout(_(" {} END PROFILING DATA").format(bold(task_id))) | |
84 | io.stdout(_(" {}").format(bold(task_id))) | |
85 | 68 | |
86 | 69 | def handle_exception(task_id, exception, traceback): |
87 | 70 | if isinstance(exception, ItemDependencyLoop): |
142 | 125 | ], ROW_SEPARATOR] |
143 | 126 | |
144 | 127 | for result in results: |
145 | totals['items'] += len(result.profiling_info) | |
128 | totals['items'] += result.total | |
146 | 129 | for metric in ('correct', 'fixed', 'skipped', 'failed'): |
147 | 130 | totals[metric] += getattr(result, metric) |
148 | 131 | rows.append([ |
149 | 132 | result.node_name, |
150 | str(len(result.profiling_info)), | |
133 | str(result.total), | |
151 | 134 | str(result.correct), |
152 | 135 | green_unless_zero(result.fixed), |
153 | 136 | yellow_unless_zero(result.skipped), |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | from ..utils import names | |
3 | from ..group import GROUP_ATTR_DEFAULTS | |
4 | from ..utils.text import bold, mark_for_translation as _ | |
4 | 5 | from ..utils.ui import io |
6 | from .nodes import _attribute_table | |
7 | ||
8 | ||
9 | GROUP_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['nodes']) | |
10 | GROUP_ATTRS_LISTS = ('nodes',) | |
5 | 11 | |
6 | 12 | |
7 | 13 | def bw_groups(repo, args): |
8 | for group in repo.groups: | |
9 | line = group.name | |
10 | if args['show_nodes']: | |
11 | line += ": " + ", ".join(names(group.nodes)) | |
12 | io.stdout(line) | |
14 | if not args['groups']: | |
15 | for group in repo.groups: | |
16 | io.stdout(group.name) | |
17 | else: | |
18 | groups = [repo.get_group(group.strip()) for group in args['groups'].split(",")] | |
19 | if not args['attrs']: | |
20 | subgroups = set(groups) | |
21 | for group in groups: | |
22 | subgroups = subgroups.union(group.subgroups) | |
23 | for subgroup in sorted(subgroups): | |
24 | io.stdout(subgroup.name) | |
25 | else: | |
26 | _attribute_table( | |
27 | groups, | |
28 | bold(_("group")), | |
29 | args['attrs'], | |
30 | GROUP_ATTRS, | |
31 | GROUP_ATTRS_LISTS, | |
32 | args['inline'], | |
33 | ) |
27 | 27 | |
28 | 28 | def bw_items(repo, args): |
29 | 29 | node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) |
30 | if args['file_preview']: | |
31 | item = get_item(node, "file:{}".format(args['file_preview'])) | |
32 | if ( | |
33 | item.attributes['content_type'] in ('any', 'base64', 'binary') or | |
34 | item.attributes['delete'] is True | |
35 | ): | |
36 | io.stderr(_( | |
37 | "{x} cannot preview {file} on {node} (unsuitable content_type or deleted)" | |
38 | ).format(x=red("!!!"), file=item.id, node=node.name)) | |
30 | if args['file_preview'] and not args['item']: | |
31 | io.stderr(_("{x} no ITEM given for file preview").format(x=red("!!!"))) | |
32 | exit(1) | |
33 | elif args['file_preview_path']: | |
34 | if args['item']: | |
35 | io.stderr(_("{x} use --file-preview to preview single files").format(x=red("!!!"))) | |
39 | 36 | exit(1) |
40 | else: | |
41 | try: | |
42 | io.stdout(item.content.decode(item.attributes['encoding']), append_newline=False) | |
43 | except FaultUnavailable: | |
44 | io.stderr(_( | |
45 | "{x} skipped {path} (Fault unavailable)" | |
46 | ).format(x=yellow("»"), path=bold(item.name))) | |
47 | exit(1) | |
48 | elif args['file_preview_path']: | |
49 | 37 | if exists(args['file_preview_path']): |
50 | 38 | io.stderr(_( |
51 | 39 | "not writing to existing path: {path}" |
87 | 75 | )) |
88 | 76 | elif args['item']: |
89 | 77 | item = get_item(node, args['item']) |
90 | if args['show_sdict']: | |
91 | statedict = item.sdict() | |
78 | if args['file_preview']: | |
79 | if item.ITEM_TYPE_NAME != 'file': | |
80 | io.stderr(_( | |
81 | "{x} cannot preview {item} on {node} (not a file)" | |
82 | ).format(x=red("!!!"), item=item.id, node=node.name)) | |
83 | exit(1) | |
84 | if ( | |
85 | item.attributes['content_type'] in ('any', 'base64', 'binary') or | |
86 | item.attributes['delete'] is True | |
87 | ): | |
88 | io.stderr(_( | |
89 | "{x} cannot preview {file} on {node} (unsuitable content_type or deleted)" | |
90 | ).format(x=red("!!!"), file=item.id, node=node.name)) | |
91 | exit(1) | |
92 | else: | |
93 | try: | |
94 | io.stdout( | |
95 | item.content.decode(item.attributes['encoding']), | |
96 | append_newline=False, | |
97 | ) | |
98 | except FaultUnavailable: | |
99 | io.stderr(_( | |
100 | "{x} skipped {path} (Fault unavailable)" | |
101 | ).format(x=yellow("»"), path=bold(item.name))) | |
102 | exit(1) | |
92 | 103 | else: |
93 | statedict = item.cdict() | |
94 | if statedict is None: | |
95 | io.stdout("REMOVE") | |
96 | else: | |
97 | if args['attr']: | |
98 | io.stdout(repr(statedict[args['attr']])) | |
104 | if args['show_sdict']: | |
105 | statedict = item.sdict() | |
99 | 106 | else: |
100 | io.stdout(statedict_to_json(statedict, pretty=True)) | |
107 | statedict = item.cdict() | |
108 | if statedict is None: | |
109 | io.stdout("REMOVE") | |
110 | else: | |
111 | if args['attr']: | |
112 | io.stdout(repr(statedict[args['attr']])) | |
113 | else: | |
114 | io.stdout(statedict_to_json(statedict, pretty=True)) | |
101 | 115 | else: |
102 | 116 | for item in sorted(node.items): |
103 | 117 | if args['show_repr']: |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | 3 | from os import environ |
4 | from sys import exit | |
4 | 5 | |
5 | 6 | from ..utils import names |
6 | from ..utils.cmdline import get_group, get_target_nodes | |
7 | from ..utils.cmdline import get_target_nodes | |
7 | 8 | from ..utils.table import ROW_SEPARATOR, render_table |
8 | from ..utils.text import bold, mark_for_translation as _ | |
9 | from ..utils.text import bold, mark_for_translation as _, red | |
9 | 10 | from ..utils.ui import io, page_lines |
10 | 11 | from ..group import GROUP_ATTR_DEFAULTS |
11 | 12 | |
12 | 13 | |
14 | NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'groups', 'hostname']) | |
15 | NODE_ATTRS_LISTS = ('bundles', 'groups') | |
16 | ||
17 | ||
18 | def _attribute_table( | |
19 | entities, | |
20 | entity_label, | |
21 | selected_attrs, | |
22 | available_attrs, | |
23 | available_attrs_lists, | |
24 | inline, | |
25 | ): | |
26 | rows = [[entity_label], ROW_SEPARATOR] | |
27 | selected_attrs = [attr.strip() for attr in selected_attrs.split(",")] | |
28 | if selected_attrs == ['all']: | |
29 | selected_attrs = available_attrs | |
30 | for attr in selected_attrs: | |
31 | if attr not in available_attrs: | |
32 | io.stderr(_("{x} unknown attribute: {attr}").format(x=red("!!!"), attr=attr)) | |
33 | exit(1) | |
34 | rows[0].append(bold(attr)) | |
35 | has_list_attrs = False | |
36 | for entity in entities: | |
37 | attr_values = [[entity.name]] | |
38 | for attr in selected_attrs: | |
39 | if attr in available_attrs_lists: | |
40 | if inline: | |
41 | attr_values.append([",".join(names(getattr(entity, attr)))]) | |
42 | else: | |
43 | has_list_attrs = True | |
44 | attr_values.append(list(names(getattr(entity, attr)))) | |
45 | else: | |
46 | attr_values.append([str(getattr(entity, attr))]) | |
47 | number_of_lines = max([len(value) for value in attr_values]) | |
48 | if environ.get("BW_TABLE_STYLE") == 'grep': | |
49 | # repeat entity name for each line | |
50 | attr_values[0] = attr_values[0] * number_of_lines | |
51 | for line in range(number_of_lines): | |
52 | row = [] | |
53 | for attr_index in range(len(selected_attrs) + 1): | |
54 | try: | |
55 | row.append(attr_values[attr_index][line]) | |
56 | except IndexError: | |
57 | row.append("") | |
58 | rows.append(row) | |
59 | if has_list_attrs: | |
60 | rows.append(ROW_SEPARATOR) | |
61 | if environ.get("BW_TABLE_STYLE") == 'grep': | |
62 | rows = rows[2:] | |
63 | page_lines(render_table( | |
64 | rows[:-1] if has_list_attrs else rows, # remove trailing ROW_SEPARATOR | |
65 | )) | |
66 | ||
67 | ||
13 | 68 | def bw_nodes(repo, args): |
14 | if args['filter_group'] is not None: | |
15 | nodes = get_group(repo, args['filter_group']).nodes | |
16 | elif args['target'] is not None: | |
69 | if args['target'] is not None: | |
17 | 70 | nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) |
18 | 71 | else: |
19 | 72 | nodes = repo.nodes |
20 | ||
21 | rows = [[ | |
22 | bold(_("node")), | |
23 | bold(_("attribute")), | |
24 | bold(_("value")), | |
25 | ], ROW_SEPARATOR] | |
26 | ||
27 | for node in nodes: | |
28 | if args['show_attrs']: | |
29 | first_attr = True | |
30 | for attr in sorted(list(GROUP_ATTR_DEFAULTS) + ['hostname']): | |
31 | rows.append([ | |
32 | bold(node.name) if first_attr else "", | |
33 | bold(attr), | |
34 | str(getattr(node, attr)), | |
35 | ]) | |
36 | first_attr = environ.get("BW_TABLE_STYLE") == 'grep' | |
37 | ||
38 | if args['inline']: | |
39 | rows.append([ | |
40 | bold(node.name) if first_attr else "", | |
41 | bold("groups"), | |
42 | ", ".join(sorted([group.name for group in node.groups])), | |
43 | ]) | |
44 | first_attr = environ.get("BW_TABLE_STYLE") == 'grep' | |
45 | else: | |
46 | rows.append([ | |
47 | "", | |
48 | ROW_SEPARATOR, | |
49 | ROW_SEPARATOR, | |
50 | ]) | |
51 | first_group = True | |
52 | for group in sorted(node.groups): | |
53 | rows.append([ | |
54 | bold(node.name) if first_attr else "", | |
55 | bold("groups") if first_group else "", | |
56 | group.name, | |
57 | ]) | |
58 | first_group = environ.get("BW_TABLE_STYLE") == 'grep' | |
59 | first_attr = environ.get("BW_TABLE_STYLE") == 'grep' | |
60 | rows.append([ | |
61 | "", | |
62 | ROW_SEPARATOR, | |
63 | ROW_SEPARATOR, | |
64 | ]) | |
65 | ||
66 | if args['inline']: | |
67 | rows.append([ | |
68 | bold(node.name) if first_attr else "", | |
69 | bold("bundles"), | |
70 | ", ".join(sorted([bundle.name for bundle in node.bundles])), | |
71 | ]) | |
72 | first_attr = environ.get("BW_TABLE_STYLE") == 'grep' | |
73 | else: | |
74 | first_bundle = True | |
75 | for bundle in sorted(node.bundles): | |
76 | rows.append([ | |
77 | bold(node.name) if first_attr else "", | |
78 | bold("bundles") if first_bundle else "", | |
79 | bundle.name, | |
80 | ]) | |
81 | first_bundle = environ.get("BW_TABLE_STYLE") == 'grep' | |
82 | first_attr = environ.get("BW_TABLE_STYLE") == 'grep' | |
83 | rows.append(ROW_SEPARATOR) | |
84 | continue | |
85 | line = "" | |
86 | if args['show_hostnames']: | |
87 | line += node.hostname | |
88 | else: | |
89 | line += node.name | |
90 | if args['show_bundles']: | |
91 | line += ": " + ", ".join(sorted(names(node.bundles))) | |
92 | elif args['show_groups']: | |
93 | line += ": " + ", ".join(sorted(names(node.groups))) | |
94 | elif args['show_os']: | |
95 | line += ": " + node.os | |
96 | io.stdout(line) | |
97 | ||
98 | if len(rows) > 2: | |
99 | page_lines(render_table( | |
100 | rows[:-1], # remove trailing ROW_SEPARATOR | |
101 | )) | |
73 | if not args['attrs']: | |
74 | for node in nodes: | |
75 | io.stdout(node.name) | |
76 | else: | |
77 | _attribute_table( | |
78 | nodes, | |
79 | bold(_("node")), | |
80 | args['attrs'], | |
81 | NODE_ATTRS, | |
82 | NODE_ATTRS_LISTS, | |
83 | args['inline'], | |
84 | ) |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | from argparse import ArgumentParser | |
3 | from argparse import ArgumentParser, SUPPRESS | |
4 | 4 | from os import environ, getcwd |
5 | 5 | |
6 | 6 | from .. import VERSION_STRING |
53 | 53 | action='store_true', |
54 | 54 | default=False, |
55 | 55 | dest='debug', |
56 | help=_("print debugging info (implies -v)"), | |
56 | help=_("print debugging info"), | |
57 | 57 | ) |
58 | 58 | parser.add_argument( |
59 | 59 | "-r", |
62 | 62 | dest='repo_path', |
63 | 63 | help=_("Look for repository at this path (defaults to current working directory)"), |
64 | 64 | metavar=_("DIRECTORY"), |
65 | type=str, | |
66 | ) | |
67 | # hidden option to dump profiling info, can be inpected with | |
68 | # SnakeViz or whatever | |
69 | parser.add_argument( | |
70 | "--profile", | |
71 | default=None, | |
72 | dest='profile', | |
73 | help=SUPPRESS, | |
74 | metavar=_("FILE"), | |
65 | 75 | type=str, |
66 | 76 | ) |
67 | 77 | parser.add_argument( |
119 | 129 | help=_("number of items to apply simultaneously on each node " |
120 | 130 | "(defaults to {})").format(bw_apply_p_items_default), |
121 | 131 | type=int, |
122 | ) | |
123 | parser_apply.add_argument( | |
124 | "--profiling", | |
125 | action='store_true', | |
126 | default=False, | |
127 | dest='profiling', | |
128 | help=_("print time elapsed for each item"), | |
129 | 132 | ) |
130 | 133 | parser_apply.add_argument( |
131 | 134 | "-s", |
189 | 192 | ) |
190 | 193 | |
191 | 194 | # bw groups |
192 | help_groups = _("Lists groups in this repository (deprecated, use `bw nodes -a`)") | |
195 | help_groups = _("Lists groups in this repository") | |
193 | 196 | parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups) |
194 | 197 | parser_groups.set_defaults(func=bw_groups) |
195 | 198 | parser_groups.add_argument( |
196 | "-n", | |
197 | "--nodes", | |
198 | action='store_true', | |
199 | dest='show_nodes', | |
200 | help=_("show nodes for each group"), | |
199 | "-i", | |
200 | "--inline", | |
201 | action='store_true', | |
202 | dest='inline', | |
203 | help=_("keep lists on a single line (for grep)"), | |
204 | ) | |
205 | parser_groups.add_argument( | |
206 | 'groups', | |
207 | default=None, | |
208 | metavar=_("GROUP1,GROUP2..."), | |
209 | nargs='?', | |
210 | type=str, | |
211 | help=_("show the given groups and their subgroups"), | |
212 | ) | |
213 | parser_groups.add_argument( | |
214 | 'attrs', | |
215 | default=None, | |
216 | metavar=_("ATTR1,ATTR2..."), | |
217 | nargs='?', | |
218 | type=str, | |
219 | help=_("show table with the given attributes for each group " | |
220 | "(e.g. 'all', 'members', 'os', ...)"), | |
201 | 221 | ) |
202 | 222 | |
203 | 223 | # bw hash |
270 | 290 | parser_items.add_argument( |
271 | 291 | "-f", |
272 | 292 | "--file-preview", |
293 | action='store_true', | |
273 | 294 | dest='file_preview', |
274 | help=_("print preview of given file"), | |
275 | metavar=_("FILE"), | |
276 | required=False, | |
277 | type=str, # TODO 3.0 convert to bool and use ITEM arg | |
295 | help=_("print preview of given file ITEM"), | |
278 | 296 | ) |
279 | 297 | parser_items.add_argument( |
280 | 298 | "-w", |
432 | 450 | metavar=_("KEY"), |
433 | 451 | nargs='*', |
434 | 452 | type=str, |
435 | help=_("print only partial metadata from the given space-separated key path"), | |
453 | help=_("print only partial metadata from the given space-separated key path (e.g. `bw metadata mynode users jdoe` to show `mynode.metadata['users']['jdoe']`)"), | |
436 | 454 | ) |
437 | 455 | parser_metadata.add_argument( |
438 | 456 | "-t", |
447 | 465 | ) |
448 | 466 | |
449 | 467 | # bw nodes |
450 | help_nodes = _("List all nodes in this repository") | |
468 | help_nodes = _("List nodes in this repository") | |
451 | 469 | parser_nodes = subparsers.add_parser("nodes", description=help_nodes, help=help_nodes) |
452 | 470 | parser_nodes.set_defaults(func=bw_nodes) |
453 | 471 | parser_nodes.add_argument( |
454 | "-a", | |
455 | "--attrs", | |
456 | action='store_true', | |
457 | dest='show_attrs', | |
458 | help=_("show attributes for each node"), | |
459 | ) | |
460 | parser_nodes.add_argument( | |
461 | "--bundles", | |
462 | action='store_true', | |
463 | dest='show_bundles', | |
464 | help=_("show bundles for each node (deprecated, use --attrs)"), | |
465 | ) | |
466 | parser_nodes.add_argument( | |
467 | "--hostnames", | |
468 | action='store_true', | |
469 | dest='show_hostnames', | |
470 | help=_("show hostnames instead of node names (deprecated, use --attrs)"), | |
471 | ) | |
472 | parser_nodes.add_argument( | |
473 | "-g", | |
474 | "--filter-group", | |
475 | default=None, | |
476 | dest='filter_group', | |
477 | metavar=_("GROUP"), | |
478 | required=False, | |
479 | type=str, | |
480 | help=_("show only nodes in the given group (deprecated)"), | |
481 | ) | |
482 | parser_nodes.add_argument( | |
483 | "--groups", | |
484 | action='store_true', | |
485 | dest='show_groups', | |
486 | help=_("show group membership for each node (deprecated, use --attrs)"), | |
487 | ) | |
488 | parser_nodes.add_argument( | |
489 | 472 | "-i", |
490 | 473 | "--inline", |
491 | 474 | action='store_true', |
492 | 475 | dest='inline', |
493 | help=_("show multiple values on the same line (use with --attrs)"), | |
494 | ) | |
495 | parser_nodes.add_argument( | |
496 | "--os", | |
497 | action='store_true', | |
498 | dest='show_os', | |
499 | help=_("show OS for each node (deprecated, use --attrs)"), | |
476 | help=_("keep lists on a single line (for grep)"), | |
500 | 477 | ) |
501 | 478 | parser_nodes.add_argument( |
502 | 479 | 'target', |
505 | 482 | nargs='?', |
506 | 483 | type=str, |
507 | 484 | help=_("filter according to nodes, groups and/or bundle selectors"), |
485 | ) | |
486 | parser_nodes.add_argument( | |
487 | 'attrs', | |
488 | default=None, | |
489 | metavar=_("ATTR1,ATTR2..."), | |
490 | nargs='?', | |
491 | type=str, | |
492 | help=_("show table with the given attributes for each node " | |
493 | "(e.g. 'all', 'groups', 'bundles', 'hostname', 'os', ...)"), | |
508 | 494 | ) |
509 | 495 | |
510 | 496 | # bw plot |
719 | 705 | parser_run.add_argument( |
720 | 706 | 'command', |
721 | 707 | metavar=_("COMMAND"), |
722 | nargs='+', | |
723 | 708 | type=str, |
724 | 709 | help=_("command to run"), |
725 | 710 | ) |
726 | 711 | parser_run.add_argument( |
727 | "-f", | |
728 | "--may-fail", | |
729 | action='store_true', | |
730 | dest='may_fail', | |
731 | help=_("ignore non-zero exit codes"), | |
712 | "--stderr-table", | |
713 | action='store_true', | |
714 | dest='stderr_table', | |
715 | help=_("include command stderr in stats table"), | |
732 | 716 | ) |
733 | 717 | parser_run.add_argument( |
734 | "--force", | |
735 | action='store_true', | |
736 | dest='ignore_locks', | |
737 | help=_("ignore soft locks on target nodes"), | |
718 | "--stdout-table", | |
719 | action='store_true', | |
720 | dest='stdout_table', | |
721 | help=_("include command stdout in stats table"), | |
738 | 722 | ) |
739 | 723 | bw_run_p_default = int(environ.get("BW_NODE_WORKERS", "1")) |
740 | 724 | parser_run.add_argument( |
758 | 742 | metavar=_("PATH"), |
759 | 743 | type=str, |
760 | 744 | ) |
745 | parser_run.add_argument( | |
746 | "-S", | |
747 | "--no-summary", | |
748 | action='store_false', | |
749 | dest='summary', | |
750 | help=_("don't show stats summary"), | |
751 | ) | |
761 | 752 | |
762 | 753 | # bw stats |
763 | 754 | help_stats = _("Show some statistics about your repository") |
766 | 757 | |
767 | 758 | # bw test |
768 | 759 | help_test = _("Test your repository for consistency " |
769 | "(you can use this with a CI tool like Jenkins)") | |
760 | "(you can use this with a CI tool like Jenkins). " | |
761 | "If *any* options other than -i are given, *only* the " | |
762 | "tests selected by those options will be run. Otherwise, a " | |
763 | "default selection of tests will be run (that selection may " | |
764 | "change in future releases). Currently, the default is -IJM " | |
765 | "if specific nodes are given and -HIJMS if testing the " | |
766 | "entire repo.") | |
770 | 767 | parser_test = subparsers.add_parser("test", description=help_test, help=help_test) |
771 | 768 | parser_test.set_defaults(func=bw_test) |
772 | 769 | parser_test.add_argument( |
775 | 772 | metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), |
776 | 773 | nargs='?', |
777 | 774 | type=str, |
778 | help=_("target nodes, groups and/or bundle selectors"), | |
775 | help=_("target nodes, groups and/or bundle selectors (defaults to all)"), | |
779 | 776 | ) |
780 | 777 | parser_test.add_argument( |
781 | 778 | "-c", |
782 | "--plugin-conflict-error", | |
783 | action='store_true', | |
784 | dest='plugin_conflict_error', | |
779 | "--plugin-conflicts", | |
780 | action='store_true', | |
781 | dest='plugin_conflicts', | |
785 | 782 | help=_("check for local modifications to files installed by plugins"), |
786 | 783 | ) |
787 | 784 | parser_test.add_argument( |
795 | 792 | type=int, |
796 | 793 | ) |
797 | 794 | parser_test.add_argument( |
795 | "-e", | |
796 | "--empty-groups", | |
797 | action='store_true', | |
798 | dest='empty_groups', | |
799 | help=_("check for empty groups"), | |
800 | ) | |
801 | parser_test.add_argument( | |
802 | "-H", | |
803 | "--hooks-repo", | |
804 | action='store_true', | |
805 | dest='hooks_repo', | |
806 | help=_("run repo-level test hooks"), | |
807 | ) | |
808 | parser_test.add_argument( | |
798 | 809 | "-i", |
799 | 810 | "--ignore-missing-faults", |
800 | 811 | action='store_true', |
801 | 812 | dest='ignore_missing_faults', |
802 | 813 | help=_("do not fail when encountering a missing Fault"), |
814 | ) | |
815 | parser_test.add_argument( | |
816 | "-I", | |
817 | "--items", | |
818 | action='store_true', | |
819 | dest='items', | |
820 | help=_("run item-level tests (like rendering templates)"), | |
821 | ) | |
822 | parser_test.add_argument( | |
823 | "-J", | |
824 | "--hooks-node", | |
825 | action='store_true', | |
826 | dest='hooks_node', | |
827 | help=_("run node-level test hooks"), | |
803 | 828 | ) |
804 | 829 | parser_test.add_argument( |
805 | 830 | "-m", |
811 | 836 | metavar="N", |
812 | 837 | type=int, |
813 | 838 | ) |
814 | bw_test_p_default = int(environ.get("BW_NODE_WORKERS", "1")) | |
815 | parser_test.add_argument( | |
816 | "-p", | |
817 | "--parallel-nodes", | |
818 | default=bw_test_p_default, | |
819 | dest='node_workers', | |
820 | help=_("number of nodes to test simultaneously " | |
821 | "(defaults to {})").format(bw_test_p_default), | |
822 | type=int, | |
823 | ) | |
824 | bw_test_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) | |
825 | parser_test.add_argument( | |
826 | "-P", | |
827 | "--parallel-items", | |
828 | default=bw_test_p_items_default, | |
829 | dest='item_workers', | |
830 | help=_("number of items to test simultaneously for each node " | |
831 | "(defaults to {})").format(bw_test_p_items_default), | |
832 | type=int, | |
839 | parser_test.add_argument( | |
840 | "-M", | |
841 | "--metadata-collisions", | |
842 | action='store_true', | |
843 | dest='metadata_collisions', | |
844 | help=_("check for conflicting metadata keys in group metadata"), | |
845 | ) | |
846 | parser_test.add_argument( | |
847 | "-o", | |
848 | "--orphaned-bundles", | |
849 | action='store_true', | |
850 | dest='orphaned_bundles', | |
851 | help=_("check for bundles not assigned to any node"), | |
852 | ) | |
853 | parser_test.add_argument( | |
854 | "-S", | |
855 | "--subgroup-loops", | |
856 | action='store_true', | |
857 | dest='subgroup_loops', | |
858 | help=_("check for loops in subgroup hierarchies"), | |
833 | 859 | ) |
834 | 860 | |
835 | 861 | # bw verify |
28 | 28 | node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) |
29 | 29 | for line in graph_for_items( |
30 | 30 | node.name, |
31 | prepare_dependencies(node.items), | |
31 | prepare_dependencies(node.items, node.os, node.os_version), | |
32 | 32 | cluster=args['cluster'], |
33 | 33 | concurrency=args['depends_concurrency'], |
34 | 34 | static=args['depends_static'], |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | 3 | from datetime import datetime |
4 | try: | |
5 | from itertools import zip_longest | |
6 | except ImportError: # Python 2 | |
7 | from itertools import izip_longest as zip_longest | |
8 | from sys import exit | |
4 | 9 | |
5 | 10 | from ..concurrency import WorkerPool |
6 | from ..exceptions import NodeLockedException | |
7 | 11 | from ..utils import SkipList |
8 | 12 | from ..utils.cmdline import get_target_nodes |
13 | from ..utils.table import ROW_SEPARATOR, render_table | |
9 | 14 | from ..utils.text import mark_for_translation as _ |
10 | from ..utils.text import bold, error_summary, green, red, yellow | |
15 | from ..utils.text import blue, bold, error_summary, green, red, yellow | |
11 | 16 | from ..utils.time import format_duration |
12 | 17 | from ..utils.ui import io |
13 | 18 | |
14 | 19 | |
15 | def run_on_node(node, command, may_fail, ignore_locks, log_output, skip_list): | |
20 | def run_on_node(node, command, skip_list): | |
16 | 21 | if node.dummy: |
17 | 22 | io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("»"))) |
18 | 23 | return None |
27 | 32 | command, |
28 | 33 | ) |
29 | 34 | |
30 | start = datetime.now() | |
31 | 35 | result = node.run( |
32 | 36 | command, |
33 | may_fail=may_fail, | |
34 | log_output=log_output, | |
37 | may_fail=True, | |
38 | log_output=True, | |
35 | 39 | ) |
36 | end = datetime.now() | |
37 | duration = end - start | |
38 | 40 | |
39 | 41 | node.repo.hooks.node_run_end( |
40 | 42 | node.repo, |
41 | 43 | node, |
42 | 44 | command, |
43 | duration=duration, | |
45 | duration=result.duration, | |
44 | 46 | return_code=result.return_code, |
45 | 47 | stdout=result.stdout, |
46 | 48 | stderr=result.stderr, |
47 | 49 | ) |
50 | return result | |
48 | 51 | |
49 | if result.return_code == 0: | |
50 | io.stdout("{x} {node} {msg}".format( | |
51 | msg=_("completed successfully after {time}").format( | |
52 | time=format_duration(duration, msec=True), | |
53 | ), | |
54 | node=bold(node.name), | |
55 | x=green("✓"), | |
56 | )) | |
57 | else: | |
58 | io.stderr("{x} {node} {msg}".format( | |
59 | msg=_("failed after {time}s (return code {rcode})").format( | |
60 | rcode=result.return_code, | |
61 | time=format_duration(duration, msec=True), | |
62 | ), | |
63 | node=bold(node.name), | |
64 | x=red("✘"), | |
65 | )) | |
66 | return result.return_code | |
52 | ||
53 | def stats_summary(results, include_stdout, include_stderr): | |
54 | rows = [[ | |
55 | bold(_("node")), | |
56 | bold(_("return code")), | |
57 | bold(_("time")), | |
58 | ], ROW_SEPARATOR] | |
59 | if include_stdout: | |
60 | rows[0].append(bold(_("stdout"))) | |
61 | if include_stderr: | |
62 | rows[0].append(bold(_("stderr"))) | |
63 | ||
64 | for node_name, result in sorted(results.items()): | |
65 | row = [node_name] | |
66 | if result.return_code == 0: | |
67 | row.append(green(str(result.return_code))) | |
68 | else: | |
69 | row.append(red(str(result.return_code))) | |
70 | row.append(format_duration(result.duration, msec=True)) | |
71 | rows.append(row) | |
72 | if include_stdout or include_stderr: | |
73 | stdout = result.stdout.decode('utf-8', errors='replace').strip().split("\n") | |
74 | stderr = result.stderr.decode('utf-8', errors='replace').strip().split("\n") | |
75 | if include_stdout: | |
76 | row.append(stdout[0]) | |
77 | if include_stderr: | |
78 | row.append(stderr[0]) | |
79 | for stdout_line, stderr_line in list(zip_longest(stdout, stderr, fillvalue=""))[1:]: | |
80 | continuation_row = ["", "", ""] | |
81 | if include_stdout: | |
82 | continuation_row.append(stdout_line) | |
83 | if include_stderr: | |
84 | continuation_row.append(stderr_line) | |
85 | rows.append(continuation_row) | |
86 | rows.append(ROW_SEPARATOR) | |
87 | ||
88 | if include_stdout or include_stderr: | |
89 | # remove last ROW_SEPARATOR | |
90 | rows = rows[:-1] | |
91 | for line in render_table(rows, alignments={1: 'right', 2: 'right'}): | |
92 | io.stdout("{x} {line}".format(x=blue("i"), line=line)) | |
67 | 93 | |
68 | 94 | |
69 | 95 | def bw_run(repo, args): |
79 | 105 | args['command'], |
80 | 106 | ) |
81 | 107 | start_time = datetime.now() |
82 | ||
108 | results = {} | |
83 | 109 | skip_list = SkipList(args['resume_file']) |
84 | 110 | |
85 | 111 | def tasks_available(): |
92 | 118 | 'task_id': node.name, |
93 | 119 | 'args': ( |
94 | 120 | node, |
95 | " ".join(args['command']), | |
96 | args['may_fail'], | |
97 | args['ignore_locks'], | |
98 | True, | |
121 | args['command'], | |
99 | 122 | skip_list, |
100 | 123 | ), |
101 | 124 | } |
102 | 125 | |
103 | 126 | def handle_result(task_id, return_value, duration): |
104 | 127 | io.progress_advance() |
128 | results[task_id] = return_value | |
105 | 129 | if return_value == 0: |
106 | 130 | skip_list.add(task_id) |
107 | 131 | |
108 | 132 | def handle_exception(task_id, exception, traceback): |
109 | 133 | io.progress_advance() |
110 | if isinstance(exception, NodeLockedException): | |
111 | msg = _( | |
112 | "{node_bold} locked by {user} " | |
113 | "(see `bw lock show {node}` for details)" | |
114 | ).format( | |
115 | node_bold=bold(task_id), | |
116 | node=task_id, | |
117 | user=exception.args[0]['user'], | |
118 | ) | |
119 | else: | |
120 | msg = "{} {}".format(bold(task_id), exception) | |
121 | io.stderr(traceback) | |
122 | io.stderr(repr(exception)) | |
134 | msg = "{} {}".format(bold(task_id), exception) | |
135 | io.stderr(traceback) | |
136 | io.stderr(repr(exception)) | |
123 | 137 | io.stderr("{} {}".format(red("!"), msg)) |
124 | 138 | errors.append(msg) |
125 | 139 | |
134 | 148 | ) |
135 | 149 | worker_pool.run() |
136 | 150 | |
151 | if args['summary']: | |
152 | stats_summary(results, args['stdout_table'], args['stderr_table']) | |
137 | 153 | error_summary(errors) |
138 | 154 | |
139 | 155 | repo.hooks.run_end( |
143 | 159 | args['command'], |
144 | 160 | duration=datetime.now() - start_time, |
145 | 161 | ) |
162 | ||
163 | exit(1 if errors else 0) |
3 | 3 | from copy import copy |
4 | 4 | from sys import exit |
5 | 5 | |
6 | from ..exceptions import ItemDependencyLoop | |
7 | from ..concurrency import WorkerPool | |
6 | from ..deps import DummyItem | |
7 | from ..exceptions import FaultUnavailable, ItemDependencyLoop | |
8 | from ..itemqueue import ItemTestQueue | |
9 | from ..metadata import check_for_unsolvable_metadata_key_conflicts | |
8 | 10 | from ..plugins import PluginManager |
9 | 11 | from ..repo import Repository |
10 | 12 | from ..utils.cmdline import count_items, get_target_nodes |
11 | 13 | from ..utils.plot import explain_item_dependency_loop |
12 | 14 | from ..utils.text import bold, green, mark_for_translation as _, red, yellow |
13 | from ..utils.ui import io | |
14 | ||
15 | ||
16 | def bw_test(repo, args): | |
17 | if args['target']: | |
18 | pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
19 | else: | |
20 | pending_nodes = copy(list(repo.nodes)) | |
21 | ||
22 | # Print warnings for unused bundles. Only do this if we are to | |
23 | # test the entire repo, though. | |
24 | # TODO 3.0 Orphaned bundles should be errors (maybe optionally) | |
25 | orphaned_bundles = set(repo.bundle_names) | |
26 | for node in repo.nodes: | |
27 | for bundle in node.bundles: | |
28 | orphaned_bundles.discard(bundle.name) | |
29 | for bundle in sorted(orphaned_bundles): | |
30 | io.stdout(_("{x} {bundle} is an unused bundle").format( | |
31 | bundle=bold(bundle), | |
32 | x=yellow("!"), | |
33 | )) | |
34 | ||
35 | io.progress_set_total(count_items(pending_nodes)) | |
36 | ||
37 | def tasks_available(): | |
38 | return bool(pending_nodes) | |
39 | ||
40 | def next_task(): | |
41 | node = pending_nodes.pop() | |
42 | return { | |
43 | 'target': node.test, | |
44 | 'task_id': node.name, | |
45 | 'kwargs': { | |
46 | 'ignore_missing_faults': args['ignore_missing_faults'], | |
47 | 'workers': args['item_workers'], | |
48 | }, | |
49 | } | |
50 | ||
51 | def handle_exception(task_id, exception, traceback): | |
52 | if isinstance(exception, ItemDependencyLoop): | |
53 | for line in explain_item_dependency_loop(exception, task_id): | |
15 | from ..utils.ui import io, QUIT_EVENT | |
16 | ||
17 | ||
18 | def test_items(nodes, ignore_missing_faults): | |
19 | with io.job(_(" counting items...")): | |
20 | io.progress_set_total(count_items(nodes)) | |
21 | for node in nodes: | |
22 | if QUIT_EVENT.is_set(): | |
23 | break | |
24 | if not node.items: | |
25 | io.stdout(_("{x} {node} has no items").format(node=bold(node.name), x=yellow("!"))) | |
26 | continue | |
27 | item_queue = ItemTestQueue(node.items, node.os, node.os_version) | |
28 | while not QUIT_EVENT.is_set(): | |
29 | try: | |
30 | item = item_queue.pop() | |
31 | except IndexError: # no items left | |
32 | break | |
33 | if isinstance(item, DummyItem): | |
34 | continue | |
35 | try: | |
36 | item._test() | |
37 | except FaultUnavailable: | |
38 | if ignore_missing_faults: | |
39 | io.progress_advance() | |
40 | io.stderr(_("{x} {node} {bundle} {item} ({msg})").format( | |
41 | bundle=bold(item.bundle.name), | |
42 | item=item.id, | |
43 | msg=yellow(_("Fault unavailable")), | |
44 | node=bold(node.name), | |
45 | x=yellow("»"), | |
46 | )) | |
47 | else: | |
48 | io.stderr(_("{x} {node} {bundle} {item} missing Fault:").format( | |
49 | bundle=bold(item.bundle.name), | |
50 | item=item.id, | |
51 | node=bold(node.name), | |
52 | x=red("!"), | |
53 | )) | |
54 | raise | |
55 | except Exception: | |
56 | io.stderr(_("{x} {node} {bundle} {item}").format( | |
57 | bundle=bold(item.bundle.name), | |
58 | item=item.id, | |
59 | node=bold(node.name), | |
60 | x=red("!"), | |
61 | )) | |
62 | raise | |
63 | else: | |
64 | if item.id.count(":") < 2: | |
65 | # don't count canned actions | |
66 | io.progress_advance() | |
67 | io.stdout("{x} {node} {bundle} {item}".format( | |
68 | bundle=bold(item.bundle.name), | |
69 | item=item.id, | |
70 | node=bold(node.name), | |
71 | x=green("✓"), | |
72 | )) | |
73 | if item_queue.items_with_deps and not QUIT_EVENT.is_set(): | |
74 | exception = ItemDependencyLoop(item_queue.items_with_deps) | |
75 | for line in explain_item_dependency_loop(exception, node.name): | |
54 | 76 | io.stderr(line) |
55 | raise exception | |
56 | ||
57 | worker_pool = WorkerPool( | |
58 | tasks_available, | |
59 | next_task, | |
60 | handle_exception=handle_exception, | |
61 | pool_id="test", | |
62 | workers=args['node_workers'], | |
63 | ) | |
64 | worker_pool.run() | |
65 | ||
66 | io.progress_set_total(0) | |
67 | ||
77 | exit(1) | |
78 | ||
79 | ||
80 | def test_subgroup_loops(repo): | |
68 | 81 | checked_groups = [] |
69 | 82 | for group in repo.groups: |
70 | 83 | if group in checked_groups: |
76 | 89 | group=bold(group.name), |
77 | 90 | )) |
78 | 91 | |
79 | # check for plugin inconsistencies | |
80 | if args['plugin_conflict_error']: | |
81 | pm = PluginManager(repo.path) | |
82 | for plugin, version in pm.list(): | |
83 | local_changes = pm.local_modifications(plugin) | |
84 | if local_changes: | |
85 | io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format( | |
86 | plugin=plugin, | |
87 | x=red("✘"), | |
92 | ||
93 | def test_metadata_collisions(node): | |
94 | with io.job(_(" {node} checking for metadata collisions...").format(node=node.name)): | |
95 | check_for_unsolvable_metadata_key_conflicts(node) | |
96 | io.stdout(_("{x} {node} has no metadata collisions").format( | |
97 | x=green("✓"), | |
98 | node=bold(node.name), | |
99 | )) | |
100 | ||
101 | ||
102 | def test_orphaned_bundles(repo): | |
103 | orphaned_bundles = set(repo.bundle_names) | |
104 | for node in repo.nodes: | |
105 | for bundle in node.bundles: | |
106 | orphaned_bundles.discard(bundle.name) | |
107 | for bundle in sorted(orphaned_bundles): | |
108 | io.stderr(_("{x} {bundle} is an unused bundle").format( | |
109 | bundle=bold(bundle), | |
110 | x=red("✘"), | |
111 | )) | |
112 | if orphaned_bundles: | |
113 | exit(1) | |
114 | ||
115 | ||
116 | def test_empty_groups(repo): | |
117 | empty_groups = set() | |
118 | for group in repo.groups: | |
119 | if not group.nodes: | |
120 | empty_groups.add(group) | |
121 | for group in sorted(empty_groups): | |
122 | io.stderr(_("{x} {group} is an empty group").format( | |
123 | group=bold(group), | |
124 | x=red("✘"), | |
125 | )) | |
126 | if empty_groups: | |
127 | exit(1) | |
128 | ||
129 | ||
130 | def test_plugin_conflicts(repo): | |
131 | pm = PluginManager(repo.path) | |
132 | for plugin, version in pm.list(): | |
133 | local_changes = pm.local_modifications(plugin) | |
134 | if local_changes: | |
135 | io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format( | |
136 | plugin=plugin, | |
137 | x=red("✘"), | |
138 | )) | |
139 | for path, actual_checksum, should_checksum in local_changes: | |
140 | io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format( | |
141 | actual_checksum=actual_checksum, | |
142 | path=path, | |
143 | should_checksum=should_checksum, | |
88 | 144 | )) |
89 | for path, actual_checksum, should_checksum in local_changes: | |
90 | io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format( | |
91 | actual_checksum=actual_checksum, | |
92 | path=path, | |
93 | should_checksum=should_checksum, | |
94 | )) | |
145 | exit(1) | |
146 | else: | |
147 | io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format( | |
148 | plugin=plugin, | |
149 | x=green("✓"), | |
150 | )) | |
151 | ||
152 | ||
153 | def test_determinism_config(repo, nodes, iterations): | |
154 | """ | |
155 | Generate configuration a couple of times for every node and see if | |
156 | anything changes between iterations | |
157 | """ | |
158 | hashes = {} | |
159 | io.progress_set_total(len(nodes) * iterations) | |
160 | for i in range(iterations): | |
161 | if i == 0: | |
162 | # optimization: for the first iteration, just use the repo | |
163 | # we already have | |
164 | iteration_repo = repo | |
165 | else: | |
166 | iteration_repo = Repository(repo.path) | |
167 | iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes] | |
168 | for node in iteration_nodes: | |
169 | with io.job(_(" {node} generating configuration ({i}/{n})...").format( | |
170 | i=i + 1, | |
171 | n=iterations, | |
172 | node=node.name, | |
173 | )): | |
174 | result = node.hash() | |
175 | hashes.setdefault(node.name, result) | |
176 | if hashes[node.name] != result: | |
177 | io.stderr(_( | |
178 | "{x} Configuration for node {node} changed when generated repeatedly " | |
179 | "(use `bw hash -d {node}` to debug)" | |
180 | ).format(node=node.name, x=red("✘"))) | |
95 | 181 | exit(1) |
96 | else: | |
97 | io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format( | |
98 | plugin=plugin, | |
99 | x=green("✓"), | |
100 | )) | |
101 | ||
102 | # generate metadata a couple of times for every node and see if | |
103 | # anything changes between iterations | |
104 | if args['determinism_metadata'] > 1: | |
105 | hashes = {} | |
106 | for i in range(args['determinism_metadata']): | |
107 | repo = Repository(repo.path) | |
108 | if args['target']: | |
109 | nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
110 | else: | |
111 | nodes = repo.nodes | |
112 | for node in nodes: | |
113 | with io.job(_(" {node} generating metadata ({i}/{n})... ").format( | |
114 | i=i + 1, | |
115 | n=args['determinism_metadata'], | |
116 | node=node.name, | |
117 | )): | |
118 | result = node.metadata_hash() | |
119 | hashes.setdefault(node.name, result) | |
120 | if hashes[node.name] != result: | |
121 | io.stderr(_( | |
122 | "{x} Metadata for node {node} changed when generated repeatedly " | |
123 | "(use `bw hash -d {node}` to debug)" | |
124 | ).format(node=node.name, x=red("✘"))) | |
125 | exit(1) | |
126 | io.stdout(_("{x} Metadata remained the same after being generated {n} times").format( | |
127 | n=args['determinism_metadata'], | |
128 | x=green("✓"), | |
129 | )) | |
130 | ||
131 | # generate configuration a couple of times for every node and see if | |
132 | # anything changes between iterations | |
133 | if args['determinism_config'] > 1: | |
134 | hashes = {} | |
135 | for i in range(args['determinism_config']): | |
136 | repo = Repository(repo.path) | |
137 | if args['target']: | |
138 | nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
139 | else: | |
140 | nodes = repo.nodes | |
141 | for node in nodes: | |
142 | with io.job(_(" {node} generating configuration ({i}/{n})...").format( | |
143 | i=i + 1, | |
144 | n=args['determinism_config'], | |
145 | node=node.name, | |
146 | )): | |
147 | result = node.hash() | |
148 | hashes.setdefault(node.name, result) | |
149 | if hashes[node.name] != result: | |
150 | io.stderr(_( | |
151 | "{x} Configuration for node {node} changed when generated repeatedly " | |
152 | "(use `bw hash -d {node}` to debug)" | |
153 | ).format(node=node.name, x=red("✘"))) | |
154 | exit(1) | |
155 | io.stdout(_("{x} Configuration remained the same after being generated {n} times").format( | |
156 | n=args['determinism_config'], | |
157 | x=green("✓"), | |
158 | )) | |
159 | ||
160 | if not args['target']: | |
182 | io.progress_advance() | |
183 | io.stdout(_("{x} Configuration remained the same after being generated {n} times").format( | |
184 | n=iterations, | |
185 | x=green("✓"), | |
186 | )) | |
187 | ||
188 | ||
189 | def test_determinism_metadata(repo, nodes, iterations): | |
190 | """ | |
191 | Generate metadata a couple of times for every node and see if | |
192 | anything changes between iterations | |
193 | """ | |
194 | hashes = {} | |
195 | io.progress_set_total(len(nodes) * iterations) | |
196 | for i in range(iterations): | |
197 | if i == 0: | |
198 | # optimization: for the first iteration, just use the repo | |
199 | # we already have | |
200 | iteration_repo = repo | |
201 | else: | |
202 | iteration_repo = Repository(repo.path) | |
203 | iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes] | |
204 | for node in iteration_nodes: | |
205 | with io.job(_(" {node} generating metadata ({i}/{n})... ").format( | |
206 | i=i + 1, | |
207 | n=iterations, | |
208 | node=node.name, | |
209 | )): | |
210 | result = node.metadata_hash() | |
211 | hashes.setdefault(node.name, result) | |
212 | if hashes[node.name] != result: | |
213 | io.stderr(_( | |
214 | "{x} Metadata for node {node} changed when generated repeatedly " | |
215 | "(use `bw hash -d {node}` to debug)" | |
216 | ).format(node=node.name, x=red("✘"))) | |
217 | exit(1) | |
218 | io.progress_advance() | |
219 | io.stdout(_("{x} Metadata remained the same after being generated {n} times").format( | |
220 | n=iterations, | |
221 | x=green("✓"), | |
222 | )) | |
223 | ||
224 | ||
225 | def bw_test(repo, args): | |
226 | options_selected = ( | |
227 | args['determinism_config'] > 1 or | |
228 | args['determinism_metadata'] > 1 or | |
229 | args['hooks_node'] or | |
230 | args['hooks_repo'] or | |
231 | args['items'] or | |
232 | args['metadata_collisions'] or | |
233 | args['orphaned_bundles'] or | |
234 | args['empty_groups'] or | |
235 | args['plugin_conflicts'] or | |
236 | args['subgroup_loops'] | |
237 | ) | |
238 | if args['target']: | |
239 | nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) | |
240 | if not options_selected: | |
241 | args['hooks_node'] = True | |
242 | args['items'] = True | |
243 | args['metadata_collisions'] = True | |
244 | else: | |
245 | nodes = copy(list(repo.nodes)) | |
246 | if not options_selected: | |
247 | args['hooks_node'] = True | |
248 | args['hooks_repo'] = True | |
249 | args['items'] = True | |
250 | args['metadata_collisions'] = True | |
251 | args['subgroup_loops'] = True | |
252 | ||
253 | if args['plugin_conflicts'] and not QUIT_EVENT.is_set(): | |
254 | test_plugin_conflicts(repo) | |
255 | ||
256 | if args['subgroup_loops'] and not QUIT_EVENT.is_set(): | |
257 | test_subgroup_loops(repo) | |
258 | ||
259 | if args['empty_groups'] and not QUIT_EVENT.is_set(): | |
260 | test_empty_groups(repo) | |
261 | ||
262 | if args['orphaned_bundles'] and not QUIT_EVENT.is_set(): | |
263 | test_orphaned_bundles(repo) | |
264 | ||
265 | if args['metadata_collisions'] and not QUIT_EVENT.is_set(): | |
266 | io.progress_set_total(len(nodes)) | |
267 | for node in nodes: | |
268 | test_metadata_collisions(node) | |
269 | io.progress_advance() | |
270 | ||
271 | if args['items']: | |
272 | test_items(nodes, args['ignore_missing_faults']) | |
273 | ||
274 | if args['determinism_metadata'] > 1 and not QUIT_EVENT.is_set(): | |
275 | test_determinism_metadata(repo, nodes, args['determinism_metadata']) | |
276 | ||
277 | if args['determinism_config'] > 1 and not QUIT_EVENT.is_set(): | |
278 | test_determinism_config(repo, nodes, args['determinism_config']) | |
279 | ||
280 | if args['hooks_node'] and not QUIT_EVENT.is_set(): | |
281 | io.progress_set_total(len(nodes)) | |
282 | for node in nodes: | |
283 | repo.hooks.test_node(repo, node) | |
284 | io.progress_advance() | |
285 | ||
286 | if args['hooks_repo'] and not QUIT_EVENT.is_set(): | |
161 | 287 | repo.hooks.test(repo) |
247 | 247 | return items |
248 | 248 | |
249 | 249 | |
250 | def _inject_concurrency_blockers(items): | |
250 | def _inject_concurrency_blockers(items, node_os, node_os_version): | |
251 | 251 | """ |
252 | 252 | Looks for items with BLOCK_CONCURRENT set and inserts daisy-chain |
253 | 253 | dependencies to force a sequential apply. |
258 | 258 | item._concurrency_deps = [] # used for DOT (graphviz) output only |
259 | 259 | if ( |
260 | 260 | not isinstance(item, DummyItem) and |
261 | item.BLOCK_CONCURRENT | |
261 | item.block_concurrent(node_os, node_os_version) | |
262 | 262 | ): |
263 | 263 | item_types.add(item.__class__) |
264 | 264 | |
265 | # Now that we have collected all types with BLOCK_CONCURRENT, | |
265 | # Now that we have collected all relevant types, | |
266 | 266 | # we must group them together when they overlap. E.g.: |
267 | 267 | # |
268 | # Type1.BLOCK_CONCURRENT = ["type1", "type2"] | |
269 | # Type2.BLOCK_CONCURRENT = ["type2, "type3"] | |
270 | # Type4.BLOCK_CONCURRENT = ["type4"] | |
268 | # Type1.block_concurrent(...) == ["type1", "type2"] | |
269 | # Type2.block_concurrent(...) == ["type2", "type3"] | |
270 | # Type4.block_concurrent(...) == ["type4"] | |
271 | 271 | # |
272 | 272 | # becomes |
273 | 273 | # |
283 | 283 | |
284 | 284 | chain_groups = [] |
285 | 285 | for item_type in item_types: |
286 | block_concurrent = list(item_type.BLOCK_CONCURRENT) + [item_type.ITEM_TYPE_NAME] | |
286 | block_concurrent = [item_type.ITEM_TYPE_NAME] | |
287 | block_concurrent.extend(item_type.block_concurrent(node_os, node_os_version)) | |
287 | 288 | found = False |
288 | 289 | for blocked_types in chain_groups: |
289 | 290 | for blocked_type in block_concurrent: |
421 | 422 | depending_item = items[depending_item_id] |
422 | 423 | except KeyError: |
423 | 424 | raise ItemDependencyError(_( |
424 | "'{item}' in bundle '{bundle}' has a reverse dependency (needed_by)" | |
425 | "'{item}' in bundle '{bundle}' has a reverse dependency (needed_by) " | |
425 | 426 | "on '{dep}', which doesn't exist" |
426 | 427 | ).format( |
427 | 428 | item=item.id, |
552 | 553 | return items |
553 | 554 | |
554 | 555 | |
555 | def prepare_dependencies(items): | |
556 | def prepare_dependencies(items, node_os, node_os_version): | |
556 | 557 | """ |
557 | 558 | Performs all dependency preprocessing on a list of items. |
558 | 559 | """ |
573 | 574 | items = _inject_trigger_dependencies(items) |
574 | 575 | items = _inject_preceded_by_dependencies(items) |
575 | 576 | items = _flatten_dependencies(items) |
576 | items = _inject_concurrency_blockers(items) | |
577 | items = _inject_concurrency_blockers(items, node_os, node_os_version) | |
577 | 578 | |
578 | 579 | for item in items.values(): |
579 | 580 | if not isinstance(item, DummyItem): |
14 | 14 | |
15 | 15 | |
16 | 16 | class BaseQueue(object): |
17 | def __init__(self, items): | |
18 | self.items_with_deps = prepare_dependencies(items) | |
17 | def __init__(self, items, node_os, node_os_version): | |
18 | self.items_with_deps = prepare_dependencies(items, node_os, node_os_version) | |
19 | 19 | self.items_without_deps = [] |
20 | 20 | self._split() |
21 | 21 | self.pending_items = [] |
85 | 85 | ) |
86 | 86 | self._split() |
87 | 87 | |
88 | def pop(self, interactive=False): | |
88 | def pop(self): | |
89 | 89 | """ |
90 | 90 | Gets the next item available for processing and moves it into |
91 | 91 | self.pending_items. Will raise IndexError if no item is |
92 | available. Otherwise, it will return the item and a list of | |
93 | items that have been skipped while looking for the item. | |
92 | available. | |
94 | 93 | """ |
95 | skipped_items = [] | |
96 | ||
97 | 94 | if not self.items_without_deps: |
98 | 95 | raise IndexError |
99 | 96 | |
100 | while self.items_without_deps: | |
101 | item = self.items_without_deps.pop() | |
102 | ||
103 | if item._precedes_items: | |
104 | if item._precedes_incorrect_item(interactive=interactive): | |
105 | item.has_been_triggered = True | |
106 | else: | |
107 | # we do not have to cascade here at all because | |
108 | # all chained preceding items will be skipped by | |
109 | # this same mechanism | |
110 | io.debug( | |
111 | _("skipping {node}:{bundle}:{item} because its precede trigger " | |
112 | "did not fire").format( | |
113 | bundle=item.bundle.name, | |
114 | item=item.id, | |
115 | node=item.node.name, | |
116 | ), | |
117 | ) | |
118 | self.items_with_deps = remove_dep_from_items(self.items_with_deps, item.id) | |
119 | self._split() | |
120 | skipped_items.append(item) | |
121 | item = None | |
122 | continue | |
123 | break | |
124 | assert item is not None | |
97 | item = self.items_without_deps.pop() | |
125 | 98 | self.pending_items.append(item) |
126 | return (item, skipped_items) | |
99 | return item | |
127 | 100 | |
128 | 101 | def _fire_triggers_for_item(self, item): |
129 | 102 | for triggered_item_id in item.triggers: |
86 | 86 | STATUS_SKIPPED = 4 |
87 | 87 | STATUS_ACTION_SUCCEEDED = 5 |
88 | 88 | WHEN_CREATING_ATTRIBUTES = {} |
89 | ||
90 | @classmethod | |
91 | def block_concurrent(cls, node_os, node_os_version): | |
92 | """ | |
93 | Return a list of item types that cannot be applied in parallel | |
94 | with this item type. | |
95 | """ | |
96 | return [] | |
89 | 97 | |
90 | 98 | def __init__( |
91 | 99 | self, |
241 | 249 | |
242 | 250 | @cached_property |
243 | 251 | def cached_unless_result(self): |
244 | if self.unless and not self.cached_status.correct: | |
252 | """ | |
253 | Returns True if 'unless' wants to skip this item. | |
254 | """ | |
255 | if self.unless and (self.ITEM_TYPE_NAME == 'action' or not self.cached_status.correct): | |
245 | 256 | unless_result = self.node.run(self.unless, may_fail=True) |
246 | 257 | return unless_result.return_code == 0 |
247 | 258 | else: |
248 | 259 | return False |
249 | 260 | |
250 | def _precedes_incorrect_item(self, interactive=False): | |
251 | """ | |
252 | Returns True if this item precedes another and the triggering | |
253 | item is in need of fixing. | |
254 | """ | |
255 | for item in self._precedes_items: | |
256 | if item._precedes_incorrect_item(): | |
257 | return True | |
261 | def _triggers_preceding_items(self, interactive=False): | |
262 | """ | |
263 | Preceding items will execute this to figure out if they're | |
264 | triggered. | |
265 | """ | |
258 | 266 | if self.cached_unless_result: |
259 | # triggering item failed unless, so there is nothing to do | |
267 | # 'unless' says we don't need to run | |
260 | 268 | return False |
261 | 269 | if self.ITEM_TYPE_NAME == 'action': |
270 | # so we have an action where 'unless' says it must be run | |
271 | # but the 'interactive' attribute might still override that | |
262 | 272 | if self.attributes['interactive'] != interactive or \ |
263 | 273 | self.attributes['interactive'] is None: |
274 | return True | |
275 | else: | |
264 | 276 | return False |
265 | else: | |
266 | return True | |
267 | 277 | return not self.cached_status.correct |
268 | 278 | |
269 | 279 | def _prepare_deps(self, items): |
314 | 324 | |
315 | 325 | @classmethod |
316 | 326 | def _validate_attribute_names(cls, bundle, item_id, attributes): |
327 | if not isinstance(attributes, dict): | |
328 | raise BundleError(_( | |
329 | "invalid item '{item}' in bundle '{bundle}': not a dict" | |
330 | ).format( | |
331 | item=item_id, | |
332 | bundle=bundle.name, | |
333 | )) | |
317 | 334 | invalid_attributes = set(attributes.keys()).difference( |
318 | 335 | set(cls.ITEM_ATTRIBUTES.keys()).union( |
319 | 336 | set(BUILTIN_ITEM_ATTRIBUTES.keys()) |
394 | 411 | if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): |
395 | 412 | status_code = self.STATUS_SKIPPED |
396 | 413 | keys_to_fix = [_("soft locked")] |
414 | ||
415 | for item in self._precedes_items: | |
416 | if item._triggers_preceding_items(interactive=interactive): | |
417 | io.debug(_( | |
418 | "preceding item {item} on {node} has been triggered by {other_item}" | |
419 | ).format(item=self.id, node=self.node.name, other_item=item.id)) | |
420 | self.has_been_triggered = True | |
421 | break | |
422 | else: | |
423 | io.debug(_( | |
424 | "preceding item {item} on {node} has NOT been triggered by {other_item}" | |
425 | ).format(item=self.id, node=self.node.name, other_item=item.id)) | |
397 | 426 | |
398 | 427 | if self.triggered and not self.has_been_triggered and status_code is None: |
399 | 428 | io.debug(_( |
448 | 477 | status_code = self.STATUS_OK |
449 | 478 | |
450 | 479 | if status_code is None: |
451 | keys_to_fix = self.display_keys( | |
452 | copy(self.cached_cdict), | |
453 | copy(status_before.sdict), | |
454 | status_before.keys_to_fix[:], | |
455 | ) | |
480 | keys_to_fix = status_before.keys_to_fix | |
456 | 481 | if not interactive: |
457 | 482 | with io.job(_(" {node} {bundle} {item} fixing...").format( |
458 | 483 | bundle=self.bundle.name, |
466 | 491 | elif status_before.must_be_deleted: |
467 | 492 | question_text = _("Found on node. Will be removed.") |
468 | 493 | else: |
469 | cdict, sdict = self.display_dicts( | |
494 | cdict, sdict, display_keys_to_fix = self.display_dicts( | |
470 | 495 | copy(self.cached_cdict), |
471 | 496 | copy(status_before.sdict), |
472 | keys_to_fix, | |
497 | copy(keys_to_fix), | |
473 | 498 | ) |
474 | question_text = self.ask(cdict, sdict, keys_to_fix) | |
499 | question_text = self.ask(cdict, sdict, display_keys_to_fix) | |
475 | 500 | if self.comment: |
476 | 501 | question_text += format_comment(self.comment) |
477 | 502 | question = wrap_question( |
514 | 539 | elif status_before.must_be_deleted: |
515 | 540 | changes = False |
516 | 541 | elif status_code == self.STATUS_FAILED: |
517 | changes = self.display_keys( | |
542 | changes = self.display_dicts( | |
518 | 543 | self.cached_cdict.copy(), |
519 | 544 | status_after.sdict.copy(), |
520 | 545 | status_after.keys_to_fix[:], |
521 | ) | |
546 | )[2] | |
522 | 547 | else: |
523 | 548 | changes = keys_to_fix |
524 | 549 | |
631 | 656 | def display_dicts(self, cdict, sdict, keys): |
632 | 657 | """ |
633 | 658 | Given cdict and sdict as implemented above, modify them to |
634 | better suit interactive presentation. The keys parameter is the | |
635 | return value of display_keys (see below) and provided for | |
636 | reference only. | |
659 | better suit interactive presentation. The keys parameter is a | |
660 | list of keys whose values differ between cdict and sdict. | |
637 | 661 | |
638 | 662 | MAY be overridden by subclasses. |
639 | 663 | """ |
640 | return (cdict, sdict) | |
641 | ||
642 | def display_keys(self, cdict, sdict, keys): | |
643 | """ | |
644 | Given a list of keys whose values differ between cdict and | |
645 | sdict, modify them to better suit presentation to the user. | |
646 | ||
647 | MAY be overridden by subclasses. | |
648 | """ | |
649 | return keys | |
664 | return (cdict, sdict, keys) | |
650 | 665 | |
651 | 666 | def patch_attributes(self, attributes): |
652 | 667 | """ |
62 | 62 | if interactive is False and self.attributes['interactive'] is True: |
63 | 63 | return (self.STATUS_SKIPPED, [_("interactive only")]) |
64 | 64 | |
65 | for item in self._precedes_items: | |
66 | if item._triggers_preceding_items(interactive=interactive): | |
67 | io.debug(_( | |
68 | "preceding item {item} on {node} has been triggered by {other_item}" | |
69 | ).format(item=self.id, node=self.node.name, other_item=item.id)) | |
70 | self.has_been_triggered = True | |
71 | break | |
72 | else: | |
73 | io.debug(_( | |
74 | "preceding item {item} on {node} has NOT been triggered by {other_item}" | |
75 | ).format(item=self.id, node=self.node.name, other_item=item.id)) | |
76 | ||
65 | 77 | if self.triggered and not self.has_been_triggered: |
66 | 78 | io.debug(_("skipping {} because it wasn't triggered").format(self.id)) |
67 | return (self.STATUS_SKIPPED, [_("no trigger")]) | |
79 | return (self.STATUS_SKIPPED, [_("not triggered")]) | |
68 | 80 | |
69 | 81 | if self.unless: |
70 | 82 | with io.job(_(" {node} {bundle} {item} checking 'unless' condition...").format( |
32 | 32 | "mode for {item} should be three or four digits long, was: '{value}'" |
33 | 33 | ).format(item=item_id, value=value)) |
34 | 34 | |
35 | ||
35 | 36 | ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) |
36 | 37 | ATTRIBUTE_VALIDATORS.update({ |
37 | 38 | 'mode': validator_mode, |
44 | 45 | """ |
45 | 46 | BUNDLE_ATTRIBUTE_NAME = "directories" |
46 | 47 | ITEM_ATTRIBUTES = { |
47 | 'group': None, | |
48 | 'mode': None, | |
49 | 'owner': None, | |
48 | 'group': "root", | |
49 | 'mode': "0755", | |
50 | 'owner': "root", | |
50 | 51 | 'purge': False, |
51 | 52 | } |
52 | 53 | ITEM_TYPE_NAME = "directory" |
67 | 68 | return cdict |
68 | 69 | |
69 | 70 | def display_dicts(self, cdict, sdict, keys): |
70 | if UNMANAGED_PATH_DESC in keys: | |
71 | try: | |
72 | keys.remove('paths_to_purge') | |
73 | except ValueError: | |
74 | pass | |
75 | else: | |
76 | keys.append(UNMANAGED_PATH_DESC) | |
71 | 77 | cdict[UNMANAGED_PATH_DESC] = cdict['paths_to_purge'] |
72 | 78 | sdict[UNMANAGED_PATH_DESC] = sdict['paths_to_purge'] |
73 | 79 | del cdict['paths_to_purge'] |
74 | 80 | del sdict['paths_to_purge'] |
75 | return (cdict, sdict) | |
76 | ||
77 | def display_keys(self, cdict, sdict, keys): | |
78 | try: | |
79 | keys.remove('paths_to_purge') | |
80 | except ValueError: | |
81 | pass | |
82 | else: | |
83 | keys.append(UNMANAGED_PATH_DESC) | |
84 | return keys | |
81 | return (cdict, sdict, keys) | |
85 | 82 | |
86 | 83 | def fix(self, status): |
87 | 84 | if status.must_be_created or 'type' in status.keys_to_fix: |
182 | 179 | path=line, |
183 | 180 | )) |
184 | 181 | yield line |
185 | ||
186 | ||
187 | 182 | |
188 | 183 | def get_auto_deps(self, items): |
189 | 184 | deps = [] |
256 | 251 | def patch_attributes(self, attributes): |
257 | 252 | if 'mode' in attributes and attributes['mode'] is not None: |
258 | 253 | attributes['mode'] = str(attributes['mode']).zfill(4) |
254 | if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: | |
255 | # BSD doesn't have a root group, so we have to use a | |
256 | # different default value here | |
257 | attributes['group'] = 'wheel' | |
259 | 258 | return attributes |
260 | 259 | |
261 | 260 | @classmethod |
178 | 178 | 'context': None, |
179 | 179 | 'delete': False, |
180 | 180 | 'encoding': "utf-8", |
181 | 'group': None, | |
182 | 'mode': None, | |
183 | 'owner': None, | |
181 | 'group': "root", | |
182 | 'mode': "0644", | |
183 | 'owner': "root", | |
184 | 184 | 'source': None, |
185 | 185 | 'verify_with': None, |
186 | 186 | } |
350 | 350 | } |
351 | 351 | |
352 | 352 | def display_dicts(self, cdict, sdict, keys): |
353 | if 'content' in keys: | |
354 | del cdict['content_hash'] | |
355 | del sdict['content_hash'] | |
356 | cdict['content'] = self.content | |
357 | sdict['content'] = get_remote_file_contents(self.node, self.name) | |
358 | return (cdict, sdict) | |
359 | ||
360 | def display_keys(self, cdict, sdict, keys): | |
361 | 353 | if ( |
362 | 354 | 'content_hash' in keys and |
363 | 355 | self.attributes['content_type'] not in ('base64', 'binary') and |
366 | 358 | ): |
367 | 359 | keys.remove('content_hash') |
368 | 360 | keys.append('content') |
369 | return keys | |
361 | del cdict['content_hash'] | |
362 | del sdict['content_hash'] | |
363 | cdict['content'] = self.content | |
364 | sdict['content'] = get_remote_file_contents(self.node, self.name) | |
365 | return (cdict, sdict, keys) | |
370 | 366 | |
371 | 367 | def patch_attributes(self, attributes): |
372 | 368 | if ( |
380 | 376 | attributes['context'] = {} |
381 | 377 | if 'mode' in attributes and attributes['mode'] is not None: |
382 | 378 | attributes['mode'] = str(attributes['mode']).zfill(4) |
379 | if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: | |
380 | # BSD doesn't have a root group, so we have to use a | |
381 | # different default value here | |
382 | attributes['group'] = 'wheel' | |
383 | 383 | return attributes |
384 | 384 | |
385 | 385 | def test(self): |
31 | 31 | } |
32 | 32 | ITEM_TYPE_NAME = "group" |
33 | 33 | REQUIRED_ATTRIBUTES = [] |
34 | ||
35 | @classmethod | |
36 | def block_concurrent(cls, node_os, node_os_version): | |
37 | # https://github.com/bundlewrap/bundlewrap/issues/367 | |
38 | if node_os == 'openbsd': | |
39 | return [cls.ITEM_TYPE_NAME] | |
40 | else: | |
41 | return [] | |
34 | 42 | |
35 | 43 | def __repr__(self): |
36 | 44 | return "<Group name:{}>".format(self.name) |
17 | 17 | 'installed': True, |
18 | 18 | } |
19 | 19 | _pkg_install_cache = {} |
20 | ||
21 | @classmethod | |
22 | def block_concurrent(cls, node_os, node_os_version): | |
23 | return [cls.ITEM_TYPE_NAME] | |
20 | 24 | |
21 | 25 | def __repr__(self): |
22 | 26 | return "<{} name:{} installed:{}>".format( |
11 | 11 | """ |
12 | 12 | A package installed by apt. |
13 | 13 | """ |
14 | BLOCK_CONCURRENT = ["pkg_apt"] | |
15 | 14 | BUNDLE_ATTRIBUTE_NAME = "pkg_apt" |
16 | 15 | ITEM_TYPE_NAME = "pkg_apt" |
17 | 16 | WHEN_CREATING_ATTRIBUTES = { |
30 | 29 | runlevel + |
31 | 30 | "DEBIAN_FRONTEND=noninteractive " |
32 | 31 | "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends " |
33 | "install {}".format(quote(self.name.replace("_", ":"))) | |
32 | "install {}".format(quote(self.name.replace("_", ":"))), | |
33 | may_fail=True, | |
34 | 34 | ) |
35 | 35 | |
36 | 36 | def pkg_installed(self): |
9 | 9 | """ |
10 | 10 | A package installed by dnf. |
11 | 11 | """ |
12 | BLOCK_CONCURRENT = ["pkg_dnf", "pkg_yum"] | |
13 | 12 | BUNDLE_ATTRIBUTE_NAME = "pkg_dnf" |
14 | 13 | ITEM_TYPE_NAME = "pkg_dnf" |
14 | ||
15 | @classmethod | |
16 | def block_concurrent(cls, node_os, node_os_version): | |
17 | return ["pkg_dnf", "pkg_yum"] | |
15 | 18 | |
16 | 19 | def pkg_all_installed(self): |
17 | 20 | result = self.node.run("dnf -d0 -e0 list installed") |
19 | 22 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) |
20 | 23 | |
21 | 24 | def pkg_install(self): |
22 | self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name))) | |
25 | self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) | |
23 | 26 | |
24 | 27 | def pkg_installed(self): |
25 | 28 | result = self.node.run( |
29 | 32 | return result.return_code == 0 |
30 | 33 | |
31 | 34 | def pkg_remove(self): |
32 | self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name))) | |
35 | self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) |
13 | 13 | |
14 | 14 | def pkg_install(node, pkgname, version): |
15 | 15 | full_name = "{}-{}".format(pkgname, version) if version else pkgname |
16 | return node.run("pkg_add -r -I {}".format(full_name)) | |
16 | return node.run("pkg_add -r -I {}".format(full_name), may_fail=True) | |
17 | 17 | |
18 | 18 | |
19 | 19 | def pkg_installed(node, pkgname): |
21 | 21 | "pkg_info | cut -f 1 -d ' '", |
22 | 22 | may_fail=True, |
23 | 23 | ) |
24 | for line in result.stdout.decode('utf-8').strip().split("\n"): | |
24 | for line in result.stdout.decode('utf-8').strip().splitlines(): | |
25 | 25 | installed_package, installed_version = PKGSPEC_REGEX.match(line).groups() |
26 | 26 | if installed_package == pkgname: |
27 | 27 | return installed_version |
29 | 29 | |
30 | 30 | |
31 | 31 | def pkg_remove(node, pkgname): |
32 | return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname))) | |
32 | return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname)), may_fail=True) | |
33 | 33 | |
34 | 34 | |
35 | 35 | class OpenBSDPkg(Item): |
36 | 36 | """ |
37 | 37 | A package installed by pkg_add/pkg_delete. |
38 | 38 | """ |
39 | BLOCK_CONCURRENT = ["pkg_openbsd"] | |
40 | 39 | BUNDLE_ATTRIBUTE_NAME = "pkg_openbsd" |
41 | 40 | ITEM_ATTRIBUTES = { |
42 | 41 | 'installed': True, |
10 | 10 | """ |
11 | 11 | A package installed by pacman. |
12 | 12 | """ |
13 | BLOCK_CONCURRENT = ["pkg_pacman"] | |
14 | 13 | BUNDLE_ATTRIBUTE_NAME = "pkg_pacman" |
15 | 14 | ITEM_ATTRIBUTES = { |
16 | 15 | 'installed': True, |
34 | 33 | local_file = join(self.item_dir, self.attributes['tarball']) |
35 | 34 | remote_file = "/tmp/{}".format(basename(local_file)) |
36 | 35 | self.node.upload(local_file, remote_file) |
37 | self.node.run("pacman --noconfirm -U {}".format(quote(remote_file))) | |
36 | self.node.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True) | |
38 | 37 | self.node.run("rm -- {}".format(quote(remote_file))) |
39 | 38 | else: |
40 | self.node.run("pacman --noconfirm -S {}".format(quote(self.name))) | |
39 | self.node.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True) | |
41 | 40 | |
42 | 41 | def pkg_installed(self): |
43 | 42 | result = self.node.run( |
47 | 46 | return result.return_code == 0 |
48 | 47 | |
49 | 48 | def pkg_remove(self): |
50 | self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name))) | |
49 | self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True) |
12 | 12 | if version: |
13 | 13 | pkgname = "{}=={}".format(pkgname, version) |
14 | 14 | pip_path, pkgname = split_path(pkgname) |
15 | return node.run("{} install -U {}".format(quote(pip_path), quote(pkgname))) | |
15 | return node.run("{} install -U {}".format(quote(pip_path), quote(pkgname)), may_fail=True) | |
16 | 16 | |
17 | 17 | |
18 | 18 | def pkg_installed(node, pkgname): |
29 | 29 | |
30 | 30 | def pkg_remove(node, pkgname): |
31 | 31 | pip_path, pkgname = split_path(pkgname) |
32 | return node.run("{} uninstall -y {}".format(quote(pip_path), quote(pkgname))) | |
32 | return node.run("{} uninstall -y {}".format(quote(pip_path), quote(pkgname)), may_fail=True) | |
33 | 33 | |
34 | 34 | |
35 | 35 | class PipPkg(Item): |
36 | 36 | """ |
37 | 37 | A package installed by pip. |
38 | 38 | """ |
39 | BLOCK_CONCURRENT = ["pkg_pip"] | |
40 | 39 | BUNDLE_ATTRIBUTE_NAME = "pkg_pip" |
41 | 40 | ITEM_ATTRIBUTES = { |
42 | 41 | 'installed': True, |
9 | 9 | """ |
10 | 10 | A package installed by snap. |
11 | 11 | """ |
12 | BLOCK_CONCURRENT = ["pkg_snap"] | |
13 | 12 | BUNDLE_ATTRIBUTE_NAME = "pkg_snap" |
14 | 13 | ITEM_TYPE_NAME = "pkg_snap" |
15 | 14 | |
19 | 18 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(" ")[0]) |
20 | 19 | |
21 | 20 | def pkg_install(self): |
22 | self.node.run("snap install {}".format(quote(self.name))) | |
21 | self.node.run("snap install {}".format(quote(self.name)), may_fail=True) | |
23 | 22 | |
24 | 23 | def pkg_installed(self): |
25 | 24 | result = self.node.run( |
29 | 28 | return result.return_code == 0 |
30 | 29 | |
31 | 30 | def pkg_remove(self): |
32 | self.node.run("snap remove {}".format(quote(self.name))) | |
31 | self.node.run("snap remove {}".format(quote(self.name)), may_fail=True) |
9 | 9 | """ |
10 | 10 | A package installed by yum. |
11 | 11 | """ |
12 | BLOCK_CONCURRENT = ["pkg_dnf", "pkg_yum"] | |
13 | 12 | BUNDLE_ATTRIBUTE_NAME = "pkg_yum" |
14 | 13 | ITEM_TYPE_NAME = "pkg_yum" |
14 | ||
15 | @classmethod | |
16 | def block_concurrent(cls, node_os, node_os_version): | |
17 | return ["pkg_dnf", "pkg_yum"] | |
15 | 18 | |
16 | 19 | def pkg_all_installed(self): |
17 | 20 | result = self.node.run("yum -d0 -e0 list installed") |
19 | 22 | yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0]) |
20 | 23 | |
21 | 24 | def pkg_install(self): |
22 | self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name))) | |
25 | self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True) | |
23 | 26 | |
24 | 27 | def pkg_installed(self): |
25 | 28 | result = self.node.run( |
29 | 32 | return result.return_code == 0 |
30 | 33 | |
31 | 34 | def pkg_remove(self): |
32 | self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name))) | |
35 | self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True) |
13 | 13 | |
14 | 14 | |
15 | 15 | def pkg_install(node, pkgname): |
16 | return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname))) | |
16 | return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True) | |
17 | 17 | |
18 | 18 | |
19 | 19 | def pkg_installed(node, pkgname): |
29 | 29 | |
30 | 30 | |
31 | 31 | def pkg_remove(node, pkgname): |
32 | return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname))) | |
32 | return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True) | |
33 | 33 | |
34 | 34 | |
35 | 35 | class ZypperPkg(Item): |
36 | 36 | """ |
37 | 37 | A package installed by zypper. |
38 | 38 | """ |
39 | BLOCK_CONCURRENT = ["pkg_zypper"] | |
40 | 39 | BUNDLE_ATTRIBUTE_NAME = "pkg_zypper" |
41 | 40 | ITEM_ATTRIBUTES = { |
42 | 41 | 'installed': True, |
43 | 42 | } |
44 | 43 | ITEM_TYPE_NAME = "pkg_zypper" |
44 | ||
45 | @classmethod | |
46 | def block_concurrent(cls, node_os, node_os_version): | |
47 | return [cls.ITEM_TYPE_NAME] | |
45 | 48 | |
46 | 49 | def __repr__(self): |
47 | 50 | return "<ZypperPkg name:{} installed:{}>".format( |
8 | 8 | |
9 | 9 | |
10 | 10 | def svc_start(node, svcname): |
11 | return node.run("/etc/rc.d/{} start".format(quote(svcname))) | |
11 | return node.run("/etc/rc.d/{} start".format(quote(svcname)), may_fail=True) | |
12 | 12 | |
13 | 13 | |
14 | 14 | def svc_running(node, svcname): |
17 | 17 | |
18 | 18 | |
19 | 19 | def svc_stop(node, svcname): |
20 | return node.run("/etc/rc.d/{} stop".format(quote(svcname))) | |
20 | return node.run("/etc/rc.d/{} stop".format(quote(svcname)), may_fail=True) | |
21 | 21 | |
22 | 22 | |
23 | 23 | def svc_enable(node, svcname): |
24 | return node.run("rcctl set {} status on".format(quote(svcname))) | |
24 | return node.run("rcctl set {} status on".format(quote(svcname)), may_fail=True) | |
25 | 25 | |
26 | 26 | |
27 | 27 | def svc_enabled(node, svcname): |
33 | 33 | |
34 | 34 | |
35 | 35 | def svc_disable(node, svcname): |
36 | return node.run("rcctl set {} status off".format(quote(svcname))) | |
36 | return node.run("rcctl set {} status off".format(quote(svcname)), may_fail=True) | |
37 | 37 | |
38 | 38 | |
39 | 39 | class SvcOpenBSD(Item): |
8 | 8 | |
9 | 9 | |
10 | 10 | def svc_start(node, svcname): |
11 | return node.run("systemctl start -- {}".format(quote(svcname))) | |
11 | return node.run("systemctl start -- {}".format(quote(svcname)), may_fail=True) | |
12 | 12 | |
13 | 13 | |
14 | 14 | def svc_running(node, svcname): |
20 | 20 | |
21 | 21 | |
22 | 22 | def svc_stop(node, svcname): |
23 | return node.run("systemctl stop -- {}".format(quote(svcname))) | |
23 | return node.run("systemctl stop -- {}".format(quote(svcname)), may_fail=True) | |
24 | 24 | |
25 | 25 | |
26 | 26 | def svc_enable(node, svcname): |
27 | return node.run("systemctl enable -- {}".format(quote(svcname))) | |
27 | return node.run("systemctl enable -- {}".format(quote(svcname)), may_fail=True) | |
28 | 28 | |
29 | 29 | |
30 | 30 | def svc_enabled(node, svcname): |
36 | 36 | |
37 | 37 | |
38 | 38 | def svc_disable(node, svcname): |
39 | return node.run("systemctl disable -- {}".format(quote(svcname))) | |
39 | return node.run("systemctl disable -- {}".format(quote(svcname)), may_fail=True) | |
40 | 40 | |
41 | 41 | |
42 | 42 | class SvcSystemd(Item): |
44 | 44 | A service managed by systemd. |
45 | 45 | """ |
46 | 46 | BUNDLE_ATTRIBUTE_NAME = "svc_systemd" |
47 | # bw 3.0: Both should default to True. | |
48 | 47 | ITEM_ATTRIBUTES = { |
49 | 'enabled': None, | |
48 | 'enabled': True, | |
50 | 49 | 'running': True, |
51 | 50 | } |
52 | 51 | ITEM_TYPE_NAME = "svc_systemd" |
8 | 8 | |
9 | 9 | |
10 | 10 | def svc_start(node, svcname): |
11 | return node.run("/etc/init.d/{} start".format(quote(svcname))) | |
11 | return node.run("/etc/init.d/{} start".format(quote(svcname)), may_fail=True) | |
12 | 12 | |
13 | 13 | |
14 | 14 | def svc_running(node, svcname): |
20 | 20 | |
21 | 21 | |
22 | 22 | def svc_stop(node, svcname): |
23 | return node.run("/etc/init.d/{} stop".format(quote(svcname))) | |
23 | return node.run("/etc/init.d/{} stop".format(quote(svcname)), may_fail=True) | |
24 | 24 | |
25 | 25 | |
26 | 26 | class SvcSystemV(Item): |
8 | 8 | |
9 | 9 | |
10 | 10 | def svc_start(node, svcname): |
11 | return node.run("initctl start --no-wait -- {}".format(quote(svcname))) | |
11 | return node.run("initctl start --no-wait -- {}".format(quote(svcname)), may_fail=True) | |
12 | 12 | |
13 | 13 | |
14 | 14 | def svc_running(node, svcname): |
19 | 19 | |
20 | 20 | |
21 | 21 | def svc_stop(node, svcname): |
22 | return node.run("initctl stop --no-wait -- {}".format(quote(svcname))) | |
22 | return node.run("initctl stop --no-wait -- {}".format(quote(svcname)), may_fail=True) | |
23 | 23 | |
24 | 24 | |
25 | 25 | class SvcUpstart(Item): |
20 | 20 | """ |
21 | 21 | BUNDLE_ATTRIBUTE_NAME = "symlinks" |
22 | 22 | ITEM_ATTRIBUTES = { |
23 | 'group': None, | |
24 | 'owner': None, | |
23 | 'group': "root", | |
24 | 'owner': "root", | |
25 | 25 | 'target': None, |
26 | 26 | } |
27 | 27 | ITEM_TYPE_NAME = "symlink" |
60 | 60 | group = self.attributes['group'] or "" |
61 | 61 | if group: |
62 | 62 | group = ":" + quote(group) |
63 | self.node.run("chown -h {}{} -- {}".format( | |
63 | if self.node.os in self.node.OS_FAMILY_BSD: | |
64 | command = "chown -h {}{} {}" | |
65 | else: | |
66 | command = "chown -h {}{} -- {}" | |
67 | self.node.run(command.format( | |
64 | 68 | quote(self.attributes['owner'] or ""), |
65 | 69 | group, |
66 | 70 | quote(self.name), |
132 | 136 | deps.append(item.id) |
133 | 137 | return deps |
134 | 138 | |
139 | def patch_attributes(self, attributes): | |
140 | if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD: | |
141 | # BSD doesn't have a root group, so we have to use a | |
142 | # different default value here | |
143 | attributes['group'] = 'wheel' | |
144 | return attributes | |
145 | ||
135 | 146 | def sdict(self): |
136 | 147 | path_info = PathInfo(self.node, self.name) |
137 | 148 | if not path_info.exists: |
106 | 106 | 'use_shadow': None, |
107 | 107 | } |
108 | 108 | ITEM_TYPE_NAME = "user" |
109 | ||
110 | @classmethod | |
111 | def block_concurrent(cls, node_os, node_os_version): | |
112 | # https://github.com/bundlewrap/bundlewrap/issues/367 | |
113 | if node_os == 'openbsd': | |
114 | return [cls.ITEM_TYPE_NAME] | |
115 | else: | |
116 | return [] | |
109 | 117 | |
110 | 118 | def __repr__(self): |
111 | 119 | return "<User name:{}>".format(self.name) |
25 | 25 | type(None), |
26 | 26 | ) |
27 | 27 | |
28 | # constants returned as options by metadata processors | |
29 | DONE = 1 | |
30 | RUN_ME_AGAIN = 2 | |
31 | DEFAULTS = 3 | |
32 | OVERWRITE = 4 | |
33 | ||
28 | 34 | |
29 | 35 | def atomic(obj): |
30 | 36 | """ |
39 | 45 | "(not: {})".format(repr(obj))) |
40 | 46 | else: |
41 | 47 | return cls(obj) |
48 | ||
49 | ||
50 | def check_metadata_processor_result(result, node_name, metadata_processor_name): | |
51 | """ | |
52 | Validates the return value of a metadata processor and splits it | |
53 | into metadata and options. | |
54 | """ | |
55 | if not isinstance(result, tuple) or not len(result) >= 2: | |
56 | raise ValueError(_( | |
57 | "metadata processor {metaproc} for node {node} did not return " | |
58 | "a tuple of length 2 or greater" | |
59 | ).format( | |
60 | metaproc=metadata_processor_name, | |
61 | node=node_name, | |
62 | )) | |
63 | result_dict, options = result[0], result[1:] | |
64 | if not isinstance(result_dict, dict): | |
65 | raise ValueError(_( | |
66 | "metadata processor {metaproc} for node {node} did not return " | |
67 | "a dict as the first element" | |
68 | ).format( | |
69 | metaproc=metadata_processor_name, | |
70 | node=node_name, | |
71 | )) | |
72 | for option in options: | |
73 | if option not in (DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE): | |
74 | raise ValueError(_( | |
75 | "metadata processor {metaproc} for node {node} returned an " | |
76 | "invalid option: {opt}" | |
77 | ).format( | |
78 | metaproc=metadata_processor_name, | |
79 | node=node_name, | |
80 | opt=repr(option), | |
81 | )) | |
82 | if DONE in options and RUN_ME_AGAIN in options: | |
83 | raise ValueError(_( | |
84 | "metadata processor {metaproc} for node {node} cannot return both " | |
85 | "DONE and RUN_ME_AGAIN" | |
86 | ).format( | |
87 | metaproc=metadata_processor_name, | |
88 | node=node_name, | |
89 | )) | |
90 | if DONE not in options and RUN_ME_AGAIN not in options: | |
91 | raise ValueError(_( | |
92 | "metadata processor {metaproc} for node {node} must return either " | |
93 | "DONE or RUN_ME_AGAIN" | |
94 | ).format( | |
95 | metaproc=metadata_processor_name, | |
96 | node=node_name, | |
97 | )) | |
98 | if DEFAULTS in options and OVERWRITE in options: | |
99 | raise ValueError(_( | |
100 | "metadata processor {metaproc} for node {node} cannot return both " | |
101 | "DEFAULTS and OVERWRITE" | |
102 | ).format( | |
103 | metaproc=metadata_processor_name, | |
104 | node=node_name, | |
105 | )) | |
106 | return result_dict, options | |
42 | 107 | |
43 | 108 | |
44 | 109 | def check_for_unsolvable_metadata_key_conflicts(node): |
153 | 218 | Our own version of deepcopy.copy that doesn't pickle and ensures |
154 | 219 | a limited range of types is used in metadata. |
155 | 220 | """ |
156 | if isinstance(obj, dict): | |
221 | if isinstance(obj, METADATA_TYPES): | |
222 | return obj | |
223 | elif isinstance(obj, dict): | |
157 | 224 | new_obj = {} |
158 | 225 | for key, value in obj.items(): |
159 | 226 | if not isinstance(key, METADATA_TYPES): |
168 | 235 | new_obj = set() |
169 | 236 | for member in obj: |
170 | 237 | new_obj.add(deepcopy_metadata(member)) |
171 | elif isinstance(obj, METADATA_TYPES): | |
172 | return obj | |
173 | 238 | else: |
174 | 239 | raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) |
175 | 240 | return new_obj |
3 | 3 | from datetime import datetime, timedelta |
4 | 4 | from hashlib import md5 |
5 | 5 | from os import environ |
6 | from sys import exit | |
7 | 6 | from threading import Lock |
8 | 7 | |
9 | 8 | from . import operations |
15 | 14 | ) |
16 | 15 | from .exceptions import ( |
17 | 16 | DontCache, |
18 | FaultUnavailable, | |
19 | 17 | ItemDependencyLoop, |
20 | 18 | NodeLockedException, |
21 | 19 | NoSuchBundle, |
22 | 20 | RepositoryError, |
23 | 21 | ) |
24 | 22 | from .group import GROUP_ATTR_DEFAULTS |
25 | from .itemqueue import ItemQueue, ItemTestQueue | |
23 | from .itemqueue import ItemQueue | |
26 | 24 | from .items import Item |
27 | 25 | from .lock import NodeLock |
28 | from .metadata import check_for_unsolvable_metadata_key_conflicts, hash_metadata | |
26 | from .metadata import hash_metadata | |
29 | 27 | from .utils import cached_property, names |
30 | 28 | from .utils.statedict import hash_statedict |
31 | 29 | from .utils.text import blue, bold, cyan, green, red, validate_name, yellow |
44 | 42 | self.fixed = 0 |
45 | 43 | self.skipped = 0 |
46 | 44 | self.failed = 0 |
47 | self.profiling_info = [] | |
48 | ||
49 | for item_id, result, time_elapsed in item_results: | |
50 | self.profiling_info.append((time_elapsed, item_id)) | |
45 | self.total = 0 | |
46 | ||
47 | for item_id, result, duration in item_results: | |
48 | self.total += 1 | |
51 | 49 | if result == Item.STATUS_ACTION_SUCCEEDED: |
52 | 50 | self.correct += 1 |
53 | 51 | elif result == Item.STATUS_OK: |
63 | 61 | "can't make sense of results for {} on {}: {}" |
64 | 62 | ).format(item_id, self.node_name, result)) |
65 | 63 | |
66 | self.profiling_info.sort() | |
67 | self.profiling_info.reverse() | |
68 | ||
69 | 64 | self.start = None |
70 | 65 | self.end = None |
71 | 66 | |
119 | 114 | other_peoples_soft_locks=(), |
120 | 115 | workers=1, |
121 | 116 | interactive=False, |
122 | profiling=False, | |
123 | 117 | ): |
124 | 118 | with io.job(_(" {node} processing dependencies...").format(node=node.name)): |
125 | item_queue = ItemQueue(node.items) | |
119 | item_queue = ItemQueue(node.items, node.os, node.os_version) | |
126 | 120 | |
127 | 121 | results = [] |
128 | 122 | |
130 | 124 | return bool(item_queue.items_without_deps) |
131 | 125 | |
132 | 126 | def next_task(): |
133 | item, skipped_items = item_queue.pop() | |
134 | for skipped_item in skipped_items: | |
135 | io.progress_advance() | |
136 | handle_apply_result( | |
137 | node, | |
138 | skipped_item, | |
139 | Item.STATUS_SKIPPED, | |
140 | interactive, | |
141 | changes=[_("no pre-trigger")], | |
142 | ) | |
143 | results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) | |
144 | ||
127 | item = item_queue.pop() | |
145 | 128 | return { |
146 | 129 | 'task_id': "{}:{}".format(node.name, item.id), |
147 | 130 | 'target': item.apply, |
532 | 515 | def magic_number(self): |
533 | 516 | return int(md5(self.name.encode('UTF-8')).hexdigest(), 16) |
534 | 517 | |
535 | @property | |
536 | def _static_items(self): | |
537 | for bundle in self.bundles: | |
538 | for item in bundle._static_items: | |
539 | yield item | |
540 | ||
541 | 518 | def apply( |
542 | 519 | self, |
543 | 520 | autoskip_selector="", |
545 | 522 | force=False, |
546 | 523 | skip_list=tuple(), |
547 | 524 | workers=4, |
548 | profiling=False, | |
549 | 525 | ): |
550 | 526 | if not list(self.items): |
551 | 527 | io.stdout(_("{x} {node} has no items").format( |
591 | 567 | other_peoples_soft_locks=lock.other_peoples_soft_locks, |
592 | 568 | workers=workers, |
593 | 569 | interactive=interactive, |
594 | profiling=profiling, | |
595 | 570 | ) |
596 | 571 | except NodeLockedException as e: |
597 | 572 | if not interactive: |
726 | 701 | wrapper_outer=self.cmd_wrapper_outer, |
727 | 702 | ) |
728 | 703 | |
729 | def test(self, ignore_missing_faults=False, workers=4): | |
730 | with io.job(_(" {node} checking for metadata collisions...").format(node=self.name)): | |
731 | check_for_unsolvable_metadata_key_conflicts(self) | |
732 | io.stdout(_("{x} {node} has no metadata collisions").format( | |
733 | x=green("✓"), | |
734 | node=bold(self.name), | |
735 | )) | |
736 | if self.items: | |
737 | test_items(self, ignore_missing_faults=ignore_missing_faults, workers=workers) | |
738 | else: | |
739 | io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!"))) | |
740 | ||
741 | self.repo.hooks.test_node(self.repo, self) | |
742 | ||
743 | 704 | def upload(self, local_path, remote_path, mode=None, owner="", group=""): |
744 | 705 | return operations.upload( |
745 | 706 | self.hostname, |
811 | 772 | |
812 | 773 | for attr, default in GROUP_ATTR_DEFAULTS.items(): |
813 | 774 | setattr(Node, attr, build_attr_property(attr, default)) |
814 | ||
815 | ||
816 | def test_items(node, ignore_missing_faults=False, workers=1): | |
817 | item_queue = ItemTestQueue(node.items) | |
818 | ||
819 | def tasks_available(): | |
820 | return bool(item_queue.items_without_deps) | |
821 | ||
822 | def next_task(): | |
823 | try: | |
824 | # Get the next non-DummyItem in the queue. | |
825 | while True: | |
826 | item = item_queue.pop() | |
827 | if not isinstance(item, DummyItem): | |
828 | break | |
829 | except IndexError: # no more items available right now | |
830 | return None | |
831 | else: | |
832 | return { | |
833 | 'task_id': item.node.name + ":" + item.bundle.name + ":" + item.id, | |
834 | 'target': item._test, | |
835 | } | |
836 | ||
837 | def handle_result(task_id, return_value, duration): | |
838 | node_name, bundle_name, item_id = task_id.split(":", 2) | |
839 | if item_id.count(":") < 2: | |
840 | # don't count canned actions | |
841 | io.progress_advance() | |
842 | io.stdout("{x} {node} {bundle} {item}".format( | |
843 | bundle=bold(bundle_name), | |
844 | item=item_id, | |
845 | node=bold(node_name), | |
846 | x=green("✓"), | |
847 | )) | |
848 | ||
849 | def handle_exception(task_id, exception, traceback): | |
850 | io.progress_advance() | |
851 | node_name, bundle_name, item_id = task_id.split(":", 2) | |
852 | if ignore_missing_faults and isinstance(exception, FaultUnavailable): | |
853 | io.stderr(_("{x} {node} {bundle} {item} ({msg})").format( | |
854 | bundle=bold(bundle_name), | |
855 | item=item_id, | |
856 | msg=yellow(_("Fault unavailable")), | |
857 | node=bold(node_name), | |
858 | x=yellow("»"), | |
859 | )) | |
860 | else: | |
861 | io.stderr("{x} {node} {bundle} {item}".format( | |
862 | bundle=bold(bundle_name), | |
863 | item=item_id, | |
864 | node=bold(node_name), | |
865 | x=red("!"), | |
866 | )) | |
867 | io.stderr(traceback) | |
868 | io.stderr("{}: {}".format(type(exception), str(exception))) | |
869 | exit(1) | |
870 | ||
871 | worker_pool = WorkerPool( | |
872 | tasks_available, | |
873 | next_task, | |
874 | handle_result=handle_result, | |
875 | handle_exception=handle_exception, | |
876 | pool_id="test_{}".format(node.name), | |
877 | workers=workers, | |
878 | ) | |
879 | worker_pool.run() | |
880 | ||
881 | if item_queue.items_with_deps: | |
882 | raise ItemDependencyLoop(item_queue.items_with_deps) | |
883 | 775 | |
884 | 776 | |
885 | 777 | def verify_items(node, show_all=False, workers=1): |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | from datetime import datetime | |
3 | 4 | from pipes import quote |
4 | 5 | from select import select |
5 | 6 | from shlex import split |
72 | 73 | |
73 | 74 | class RunResult(object): |
74 | 75 | def __init__(self): |
76 | self.duration = None | |
75 | 77 | self.return_code = None |
76 | 78 | self.stderr = None |
77 | 79 | self.stdout = None |
110 | 112 | |
111 | 113 | cmd_id = randstr(length=4).upper() |
112 | 114 | io.debug("running command with ID {}: {}".format(cmd_id, " ".join(command))) |
115 | start = datetime.utcnow() | |
113 | 116 | |
114 | 117 | # Launch the child process. It's important that SSH gets a dummy |
115 | 118 | # stdin, i.e. it must *not* read from the terminal. Otherwise, it |
180 | 183 | )) |
181 | 184 | |
182 | 185 | result = RunResult() |
186 | result.duration = datetime.utcnow() - start | |
183 | 187 | result.stdout = stdout_lb.record.getvalue() |
184 | 188 | result.stderr = stderr_lb.record.getvalue() |
185 | 189 | result.return_code = child_process.returncode |
11 | 11 | from . import items, utils, VERSION_STRING |
12 | 12 | from .bundle import FILENAME_BUNDLE |
13 | 13 | from .exceptions import ( |
14 | BundleError, | |
15 | 14 | NoSuchGroup, |
16 | 15 | NoSuchNode, |
17 | 16 | NoSuchRepository, |
19 | 18 | RepositoryError, |
20 | 19 | ) |
21 | 20 | from .group import Group |
22 | from .metadata import deepcopy_metadata | |
21 | from .metadata import check_metadata_processor_result, deepcopy_metadata, DEFAULTS, DONE, OVERWRITE | |
23 | 22 | from .node import _flatten_group_hierarchy, Node |
24 | 23 | from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy |
25 | 24 | from .utils import cached_property, merge_dict, names |
89 | 88 | FILENAME_REQUIREMENTS: "bundlewrap>={}\n".format(VERSION_STRING), |
90 | 89 | FILENAME_SECRETS: generate_initial_secrets_cfg, |
91 | 90 | } |
92 | META_PROC_MAX_ITER = 1000 # maximum iterations for metadata processors | |
93 | 91 | |
94 | 92 | |
95 | 93 | def groups_from_file(filepath, libs, repo_path, vault): |
465 | 463 | Builds complete metadata for all nodes that appear in |
466 | 464 | self._node_metadata_partial.keys(). |
467 | 465 | """ |
468 | iterations = {} | |
469 | 466 | # these processors have indicated that they do not need to be run again |
470 | 467 | blacklisted_metaprocs = set() |
471 | while ( | |
472 | not iterations or max(iterations.values()) <= META_PROC_MAX_ITER | |
473 | ) and not QUIT_EVENT.is_set(): | |
468 | while not QUIT_EVENT.is_set(): | |
474 | 469 | # First, get the static metadata out of the way |
475 | 470 | for node_name in list(self._node_metadata_partial): |
476 | 471 | if QUIT_EVENT.is_set(): |
491 | 486 | ) |
492 | 487 | |
493 | 488 | with io.job(_(" {node} merging node metadata...").format(node=node.name)): |
494 | self._node_metadata_partial[node.name] = merge_dict( | |
489 | # deepcopy_metadata is important here because up to this point | |
490 | # different nodes from the same group might still share objects | |
491 | # nested deeply in their metadata. This becomes a problem if we | |
492 | # start messing with these objects in metadata processors. Every | |
493 | # time we would edit one of these objects, the changes would be | |
494 | # shared amongst multiple nodes. | |
495 | self._node_metadata_partial[node.name] = deepcopy_metadata(merge_dict( | |
495 | 496 | self._node_metadata_partial[node.name], |
496 | 497 | node._node_metadata, |
497 | ) | |
498 | )) | |
498 | 499 | |
499 | 500 | # Now for the interesting part: We run all metadata processors |
500 | # in sequence until none of them return changed metadata. | |
501 | modified = False | |
501 | # until none of them return DONE anymore (indicating that they're | |
502 | # just waiting for another metaproc to maybe insert new data, | |
503 | # which isn't happening if none return DONE) | |
504 | metaproc_returned_DONE = False | |
502 | 505 | for node_name in list(self._node_metadata_partial): |
503 | 506 | if QUIT_EVENT.is_set(): |
504 | 507 | break |
507 | 510 | for metadata_processor_name, metadata_processor in node.metadata_processors: |
508 | 511 | if (node_name, metadata_processor_name) in blacklisted_metaprocs: |
509 | 512 | continue |
510 | iterations.setdefault((node.name, metadata_processor_name), 1) | |
511 | 513 | io.debug(_( |
512 | "running metadata processor {metaproc} for node {node}, " | |
513 | "iteration #{i}" | |
514 | "running metadata processor {metaproc} for node {node}" | |
514 | 515 | ).format( |
515 | 516 | metaproc=metadata_processor_name, |
516 | 517 | node=node.name, |
517 | i=iterations[(node.name, metadata_processor_name)], | |
518 | 518 | )) |
519 | 519 | try: |
520 | processed = metadata_processor( | |
521 | deepcopy_metadata(self._node_metadata_partial[node.name]), | |
522 | ) | |
520 | processed = metadata_processor(self._node_metadata_partial[node.name]) | |
523 | 521 | except Exception as exc: |
524 | 522 | io.stderr(_( |
525 | 523 | "{x} Exception while executing metadata processor " |
530 | 528 | node=node.name, |
531 | 529 | )) |
532 | 530 | raise exc |
533 | iterations[(node.name, metadata_processor_name)] += 1 | |
534 | if isinstance(processed, tuple) and len(processed) == 2: | |
535 | if processed[1] is True: | |
536 | io.debug(_( | |
537 | "metadata processor {metaproc} for node {node} " | |
538 | "has indicated that it need not be run again" | |
539 | ).format( | |
540 | metaproc=metadata_processor_name, | |
541 | node=node.name, | |
542 | )) | |
543 | blacklisted_metaprocs.add((node_name, metadata_processor_name)) | |
544 | processed = processed[0] | |
545 | if not isinstance(processed, dict): | |
546 | raise ValueError(_( | |
547 | "metadata processor {metaproc} for node {node} did not return " | |
548 | "a dictionary or tuple of (dict, bool)" | |
531 | processed_dict, options = check_metadata_processor_result( | |
532 | processed, | |
533 | node.name, | |
534 | metadata_processor_name, | |
535 | ) | |
536 | if DONE in options: | |
537 | io.debug(_( | |
538 | "metadata processor {metaproc} for node {node} " | |
539 | "has indicated that it need NOT be run again" | |
549 | 540 | ).format( |
550 | 541 | metaproc=metadata_processor_name, |
551 | 542 | node=node.name, |
552 | 543 | )) |
553 | if processed != self._node_metadata_partial[node.name]: | |
544 | blacklisted_metaprocs.add((node_name, metadata_processor_name)) | |
545 | metaproc_returned_DONE = True | |
546 | else: | |
554 | 547 | io.debug(_( |
555 | "metadata processor {metaproc} for node {node} changed metadata, " | |
556 | "rerunning all metadata processors for this node" | |
548 | "metadata processor {metaproc} for node {node} " | |
549 | "has indicated that it must be run again" | |
557 | 550 | ).format( |
558 | 551 | metaproc=metadata_processor_name, |
559 | 552 | node=node.name, |
560 | 553 | )) |
561 | self._node_metadata_partial[node.name] = processed | |
562 | modified = True | |
563 | if not modified: | |
554 | ||
555 | if DEFAULTS in options: | |
556 | self._node_metadata_partial[node.name] = merge_dict( | |
557 | processed_dict, | |
558 | self._node_metadata_partial[node.name], | |
559 | ) | |
560 | elif OVERWRITE in options: | |
561 | self._node_metadata_partial[node.name] = merge_dict( | |
562 | self._node_metadata_partial[node.name], | |
563 | processed_dict, | |
564 | ) | |
565 | else: | |
566 | self._node_metadata_partial[node.name] = processed_dict | |
567 | ||
568 | if not metaproc_returned_DONE: | |
564 | 569 | if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()): |
565 | 570 | # During metadata processor execution, partial metadata may |
566 | 571 | # have been requested for nodes we did not previously |
572 | 577 | continue |
573 | 578 | else: |
574 | 579 | break |
575 | ||
576 | for culprit, number_of_iterations in iterations.items(): | |
577 | if number_of_iterations >= META_PROC_MAX_ITER: | |
578 | node, metadata_processor = culprit | |
579 | raise BundleError(_( | |
580 | "Metadata processor '{proc}' stopped after too many iterations " | |
581 | "({max_iter}) for node '{node}' to prevent infinite loop. " | |
582 | "This usually means one of two things: " | |
583 | "1) You have two metadata processors that keep overwriting each other's " | |
584 | "data or 2) You have a single metadata processor that keeps changing its own " | |
585 | "data. " | |
586 | "To fix this, use `bw --debug metadata {node}` and look for repeated messages " | |
587 | "indicating that the same metadata processor keeps changing metadata. Then " | |
588 | "rewrite that metadata processor to eventually stop changing metadata.".format( | |
589 | max_iter=META_PROC_MAX_ITER, | |
590 | node=node, | |
591 | proc=metadata_processor, | |
592 | ), | |
593 | )) | |
594 | 580 | |
595 | 581 | def metadata_hash(self): |
596 | 582 | repo_dict = {} |
307 | 307 | f.write(fernet.encrypt(plaintext)) |
308 | 308 | return target_file |
309 | 309 | |
310 | def _format(self, format_str=None, faults=None): | |
311 | return format_str.format(*[fault.value for fault in faults]) | |
312 | ||
313 | def format(self, format_str, *faults): | |
314 | """ | |
315 | Returns a Fault for a string formatted with the given Faults, | |
316 | e.g.: | |
317 | ||
318 | vault.format("password: {}", vault.password_for("something")) | |
319 | ||
320 | DEPRECATED, remove in 3.0, use Fault.format_into instead. | |
321 | """ | |
322 | return Fault( | |
323 | self._format, | |
324 | format_str=format_str, | |
325 | faults=faults, | |
326 | ) | |
327 | ||
328 | 310 | def human_password_for( |
329 | 311 | self, identifier, digits=2, key='generate', per_word=3, words=4, |
330 | 312 | ): |
14 | 14 | |
15 | 15 | from ..exceptions import DontCache, FaultUnavailable |
16 | 16 | |
17 | __GETATTR_CACHE = {} | |
17 | __GETATTR_CODE_CACHE = {} | |
18 | __GETATTR_RESULT_CACHE = {} | |
18 | 19 | __GETATTR_NODEFAULT = "very_unlikely_default_value" |
19 | 20 | |
20 | 21 | |
150 | 151 | return content |
151 | 152 | |
152 | 153 | |
153 | def get_all_attrs_from_file(path, cache=True, base_env=None): | |
154 | def get_all_attrs_from_file(path, base_env=None): | |
154 | 155 | """ |
155 | 156 | Reads all 'attributes' (if it were a module) from a source file. |
156 | 157 | """ |
157 | 158 | if base_env is None: |
158 | 159 | base_env = {} |
159 | if base_env: | |
160 | ||
161 | if not base_env and path in __GETATTR_RESULT_CACHE: | |
160 | 162 | # do not allow caching when passing in a base env because that |
161 | 163 | # breaks repeated calls with different base envs for the same |
162 | 164 | # file |
163 | cache = False | |
164 | if path not in __GETATTR_CACHE or not cache: | |
165 | return __GETATTR_RESULT_CACHE[path] | |
166 | ||
167 | if path not in __GETATTR_CODE_CACHE: | |
165 | 168 | source = get_file_contents(path) |
166 | env = base_env.copy() | |
167 | try: | |
168 | exec(source, env) | |
169 | except: | |
170 | from .ui import io | |
171 | io.stderr("Exception while executing {}".format(path)) | |
172 | raise | |
173 | if cache: | |
174 | __GETATTR_CACHE[path] = env | |
175 | else: | |
176 | env = __GETATTR_CACHE[path] | |
169 | __GETATTR_CODE_CACHE[path] = compile(source, path, mode='exec') | |
170 | ||
171 | code = __GETATTR_CODE_CACHE[path] | |
172 | env = base_env.copy() | |
173 | try: | |
174 | exec(code, env) | |
175 | except: | |
176 | from .ui import io | |
177 | io.stderr("Exception while executing {}".format(path)) | |
178 | raise | |
179 | ||
180 | if not base_env: | |
181 | __GETATTR_RESULT_CACHE[path] = env | |
182 | ||
177 | 183 | return env |
178 | 184 | |
179 | 185 | |
180 | def getattr_from_file(path, attrname, base_env=None, cache=True, default=__GETATTR_NODEFAULT): | |
186 | def getattr_from_file(path, attrname, base_env=None, default=__GETATTR_NODEFAULT): | |
181 | 187 | """ |
182 | 188 | Reads a specific 'attribute' (if it were a module) from a source |
183 | 189 | file. |
184 | 190 | """ |
185 | env = get_all_attrs_from_file(path, base_env=base_env, cache=cache) | |
191 | env = get_all_attrs_from_file(path, base_env=base_env) | |
186 | 192 | if default == __GETATTR_NODEFAULT: |
187 | 193 | return env[attrname] |
188 | 194 | else: |
4 | 4 | from ..exceptions import NoSuchGroup, NoSuchItem, NoSuchNode |
5 | 5 | from . import names |
6 | 6 | from .text import mark_for_translation as _, red |
7 | from .ui import io | |
7 | from .ui import io, QUIT_EVENT | |
8 | 8 | |
9 | 9 | |
10 | 10 | def count_items(nodes): |
11 | 11 | count = 0 |
12 | 12 | for node in nodes: |
13 | if QUIT_EVENT.is_set(): | |
14 | return 0 | |
13 | 15 | count += len(node.items) |
14 | 16 | return count |
15 | 17 |
52 | 52 | def capture_for_debug_logfile(f): |
53 | 53 | @wraps(f) |
54 | 54 | def wrapped(self, msg, **kwargs): |
55 | if self.debug_log_file: | |
55 | if self.debug_log_file and self._active: | |
56 | 56 | self.debug_log_file.write( |
57 | 57 | datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + |
58 | 58 | ansi_clean(msg).rstrip("\n") + "\n" |
0 | bundlewrap (3.0.1-1) unstable; urgency=medium | |
1 | ||
2 | * New upstream release | |
3 | * Update standards version to 4.1.0 | |
4 | ||
5 | -- Jonathan Carter <jcc@debian.org> Wed, 27 Sep 2017 09:38:20 +0200 | |
6 | ||
0 | 7 | bundlewrap (2.20.0-1) unstable; urgency=medium |
1 | 8 | |
2 | 9 | * New upstream release |
6 | 6 | dh-python, |
7 | 7 | python, |
8 | 8 | python-setuptools |
9 | Standards-Version: 4.0.1 | |
9 | Standards-Version: 4.1.0 | |
10 | 10 | Homepage: http://bundlewrap.org/ |
11 | 11 | Vcs-Svn: svn://anonscm.debian.org/python-apps/packages/bundlewrap/trunk |
12 | 12 | Vcs-Browser: https://anonscm.debian.org/viewvc/python-apps/packages/bundlewrap/trunk/ |
119 | 119 | **`.items`** |
120 | 120 | |
121 | 121 | A list of items on this node (instances of subclasses of `bundlewrap.items.Item`) |
122 | ||
123 | <br> | |
124 | ||
125 | **`.magic_number`** | |
126 | ||
127 | A large number derived from the node's name. This number is very likely to be unique for your entire repository. You can, for example, use this number to easily "jitter" cronjobs: | |
128 | ||
129 | '{} {} * * * root /my/script'.format( | |
130 | node.magic_number % 60, | |
131 | node.magic_number % 2 + 4, | |
132 | ) | |
122 | 133 | |
123 | 134 | <br> |
124 | 135 |
28 | 28 | """ |
29 | 29 | A foo. |
30 | 30 | """ |
31 | BLOCK_CONCURRENT = [] | |
32 | 31 | BUNDLE_ATTRIBUTE_NAME = "foo" |
33 | 32 | ITEM_ATTRIBUTES = { |
34 | 33 | 'attribute': "default value", |
35 | 34 | } |
36 | 35 | ITEM_TYPE_NAME = "foo" |
37 | 36 | REQUIRED_ATTRIBUTES = ['attribute'] |
37 | ||
38 | @classmethod | |
39 | def block_concurrent(cls, node_os, node_os_version): | |
40 | """ | |
41 | Return a list of item types that cannot be applied in parallel | |
42 | with this item type. | |
43 | """ | |
44 | return [] | |
38 | 45 | |
39 | 46 | def __repr__(self): |
40 | 47 | return "<Foo attribute:{}>".format(self.attributes['attribute']) |
63 | 70 | |
64 | 71 | def display_dicts(self, cdict, sdict, keys): |
65 | 72 | """ |
66 | Given cdict and sdict as implemented above, modify them to better | |
67 | suit interactive presentation. The keys parameter is the return | |
68 | value of display_keys (see below) and provided for reference only. | |
73 | Given cdict and sdict as implemented above, modify them to | |
74 | better suit interactive presentation. The keys parameter is a | |
75 | list of keys whose values differ between cdict and sdict. | |
69 | 76 | |
70 | 77 | Implementing this method is optional. |
71 | 78 | """ |
72 | return (cdict, sdict) | |
73 | ||
74 | def display_keys(self, cdict, sdict, keys): | |
75 | """ | |
76 | Given a list of keys whose values differ between cdict and sdict, | |
77 | modify them to better suit presentation to the user. | |
78 | ||
79 | Implementing this method is optional. | |
80 | """ | |
81 | return keys | |
79 | return (cdict, sdict, keys) | |
82 | 80 | |
83 | 81 | def fix(self, status): |
84 | 82 | """ |
112 | 110 | ITEM_TYPE_NAME = "foo" |
113 | 111 | |
114 | 112 | |
115 | `BLOCK_CONCURRENT` is a list of item types (e.g. `pkg_apt`), that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time): | |
116 | ||
117 | BLOCK_CONCURRENT = ["pkg_apt"] | |
118 | ||
119 | ||
120 | 113 | `REQUIRED_ATTRIBUTES` is a list of attribute names that must be set on each item of this type. If BundleWrap encounters an item without all these attributes during bundle inspection, an exception will be raised. Example: |
121 | 114 | |
122 | 115 | REQUIRED_ATTRIBUTES = ['attr1', 'attr2'] |
130 | 123 | |
131 | 124 | The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.node.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values. |
132 | 125 | |
126 | `block_concurrent()` must return a list of item types (e.g. `['pkg_apt']`) that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time). | |
127 | ||
133 | 128 | If you're having trouble, try looking at the [source code for the items that come with BundleWrap](https://github.com/bundlewrap/bundlewrap/tree/master/bundlewrap/items). The `pkg_*` items are pretty simple and easy to understand while `files` is the most complex to date. Or just drop by on [IRC](irc://chat.freenode.net/bundlewrap), we're glad to help. |
0 | # Migrating from BundleWrap 2.x to 3.x | |
1 | ||
2 | As per [semver](http://semver.org), BundleWrap 3.0 breaks compatibility with repositories created for BundleWrap 2.x. This document provides a guide on how to upgrade your repositories to BundleWrap 3.x. Please read the entire document before proceeding. | |
3 | ||
4 | <br> | |
5 | ||
6 | ## metadata.py | |
7 | ||
8 | BundleWrap 2.x simply used all functions in `metadata.py` whose names don't start with an underscore as metadata processors. This led to awkward imports like `from foo import bar as _bar`. BundleWrap 3.x requires a decorator for explicitly designating functions as metadata processors: | |
9 | ||
10 | @metadata_processor | |
11 | def myproc(metadata): | |
12 | return metadata, DONE | |
13 | ||
14 | You will have to add `@metadata_processor` to each metadata processor function. There is no need to import it; it is provided automatically, just like `node` and `repo`. | |
15 | ||
16 | The accepted return values of metadata processors have changed as well. Metadata processors now always have to return a tuple with the first element being a dictionary of metadata and the remaining elements made up of various options to tell BundleWrap what to do with the dictionary. In most cases, you will want to return the `DONE` options as in the example above. There is no need to import options, they're always available. | |
17 | ||
18 | When you previously returned `metadata, False` from a metadata processor, you will now have to return `metadata, RUN_ME_AGAIN`. For a more detailed description of the available options, see [the documentation](../repo/bundles.md#metadatapy). | |
19 | ||
20 | <br> | |
21 | ||
22 | ## File and directory ownership defaults | |
23 | ||
24 | [Files](../items/file.md), [directories](../items/directory.md), and [symlinks](../items/symlink.md) now have default values for the ownership and mode attributes. Previously the default was to ignore them. It's very likely that you won't have to do anything here, just be aware. | |
25 | ||
26 | <br> | |
27 | ||
28 | ## systemd services enabled by default | |
29 | ||
30 | Again, just be [aware](../items/svc_systemd.md), it's probably what you intended anyway. | |
31 | ||
32 | <br> | |
33 | ||
34 | ## Environment variables | |
35 | ||
36 | The following [env vars](env.md) have been renamed (though the new names have already been available for a while, so chances are you're already using them): | |
37 | ||
38 | <table> | |
39 | <tr><th>Old</th><th>New</th></tr> | |
40 | <tr><td><code>BWADDHOSTKEYS</code></td><td><code>BW_ADD_HOST_KEYS</code></td></tr> | |
41 | <tr><td><code>BWCOLORS</code></td><td><code>BW_COLORS</code></td></tr> | |
42 | <tr><td><code>BWITEMWORKERS</code></td><td><code>BW_ITEM_WORKERS</code></td></tr> | |
43 | <tr><td><code>BWNODEWORKERS</code></td><td><code>BW_NODE_WORKERS</code></td></tr> | |
44 | </table> | |
45 | ||
46 | <br> | |
47 | ||
48 | ## Item.display_keys and Item.display_dicts | |
49 | ||
50 | If you've written your own items and used the `display_keys()` or `display_dicts()` methods or the `BLOCK_CONCURRENT` attribute, you will have to update them to the [new API](dev_item.md). |
36 | 36 | - hate Python and/or JSON |
37 | 37 | - like to use community-maintained configuration templates |
38 | 38 | - need unattended bootstrapping of nodes |
39 | - need to manage non-Linux systems | |
40 | 39 | - don’t trust your coworkers |
41 | ||
42 | We have also prepared a [comparison with other popular config management systems](misc/alternatives.md). |
1 | 1 | |
2 | 2 | directories = { |
3 | 3 | "/path/to/directory": { |
4 | "mode": "0644", | |
4 | "mode": "0755", | |
5 | 5 | "owner": "root", |
6 | 6 | "group": "root", |
7 | 7 | }, |
15 | 15 | |
16 | 16 | ### group |
17 | 17 | |
18 | Name of the group this directory belongs to. Defaults to `None` (don't care about group). | |
18 | Name of the group this directory belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
19 | 19 | |
20 | 20 | <br> |
21 | 21 | |
22 | 22 | ### mode |
23 | 23 | |
24 | Directory mode as returned by `stat -c %a <directory>`. Defaults to `None` (don't care about mode). | |
24 | Directory mode as returned by `stat -c %a <directory>`. Defaults to `755`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
25 | 25 | |
26 | 26 | <br> |
27 | 27 | |
28 | 28 | ### owner |
29 | 29 | |
30 | Username of the directory's owner. Defaults to `None` (don't care about owner). | |
30 | Username of the directory's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
31 | 31 | |
32 | 32 | <br> |
33 | 33 |
63 | 63 | |
64 | 64 | ### group |
65 | 65 | |
66 | Name of the group this file belongs to. Defaults to `None` (don't care about group). | |
66 | Name of the group this file belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
67 | 67 | |
68 | 68 | <br> |
69 | 69 | |
70 | 70 | ### mode |
71 | 71 | |
72 | File mode as returned by `stat -c %a <file>`. Defaults to `None` (don't care about mode). | |
72 | File mode as returned by `stat -c %a <file>`. Defaults to `644`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
73 | 73 | |
74 | 74 | <br> |
75 | 75 | |
76 | 76 | ### owner |
77 | 77 | |
78 | Username of the file's owner. Defaults to `None` (don't care about owner). | |
78 | Username of the file's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
79 | 79 | |
80 | 80 | <br> |
81 | 81 |
3 | 3 | |
4 | 4 | svc_systemd = { |
5 | 5 | "fcron.service": { |
6 | "enabled": True, | |
6 | "enabled": True, # default | |
7 | 7 | "running": True, # default |
8 | 8 | }, |
9 | 9 | "sgopherd.socket": { |
21 | 21 | |
22 | 22 | ### enabled |
23 | 23 | |
24 | `True` if the service shall be automatically started during system bootup; `False` otherwise. `None`, the default value, makes BundleWrap ignore this setting. | |
24 | `True` if the service shall be automatically started during system bootup; `False` otherwise. `None` makes BundleWrap ignore this setting. | |
25 | 25 | |
26 | 26 | <br> |
27 | 27 |
23 | 23 | |
24 | 24 | ### group |
25 | 25 | |
26 | Name of the group this symlink belongs to. Defaults to `root`. Defaults to `None` (don't care about group). | |
26 | Name of the group this symlink belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. | |
27 | 27 | |
28 | 28 | <br> |
29 | 29 | |
30 | 30 | ### owner |
31 | 31 | |
32 | Username of the symlink's owner. Defaults to `root`. Defaults to `None` (don't care about owner). | |
32 | Username of the symlink's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node. |
0 | # Alternatives | |
1 | ||
2 | <div class="alert alert-info">This page is an effort to compare BundleWrap to other config management systems. It very hard to keep this information complete and up to date, so please feel free to raise issues or create pull requests if something is amiss.</div> | |
3 | ||
4 | BundleWrap has the following properties that are unique to it or at least not common among other solutions: | |
5 | ||
6 | * server- and agent-less architecture | |
7 | * item-level parallelism to speed up convergence of complex nodes | |
8 | * interactive mode to review configuration as it it being applied | |
9 | * [Mako file templates](../items/file_templates) | |
10 | * verifies that each action taken actually fixed the item in question | |
11 | * verify mode to assess the state of your configuration without mutating it | |
12 | * useful and actionable error messages | |
13 | * can apply actions (and other items) prior to fixing an item (and only then) | |
14 | * built-in visualization of node configuration | |
15 | * nice [Python API](../guide/api.md) | |
16 | * designed to be mastered quickly and easily remembered | |
17 | * for better or worse: no commercial agenda/support | |
18 | * no support for non-Linux target nodes (BundleWrap itself can be run from Mac OS as well) | |
19 | ||
20 | ||
21 | ## Ansible | |
22 | ||
23 | [Ansible](http://ansible.com>) is very similar to BundleWrap in how it communicates with nodes. Both systems do not use server or agent processes, but SSH. Ansible can optionally use OpenSSH instead of a Python SSH implementation to speed up performance. On the other hand, BundleWrap will always use the Python implementation, but with multiple connections to each node. This should give BundleWrap a performance advantage on very complex systems with many items, since each connection can work on a different item simultaneously. | |
24 | ||
25 | To apply configuration, Ansible uploads pieces of code called modules to each node and runs them there. Many Ansible modules depend on the node having a Python 2.x interpreter installed. In some cases, third-party Python libraries are needed as well, increasing the footprint on the node. BundleWrap runs commands on the target node just as you would in an interactive SSH session. Most of the [commands needed](../guide/installation.md#requirements-for-managed-systems) by BundleWrap are provided by coreutils and should be present on all standard Linux systems. | |
26 | ||
27 | Ansible ships with loads of modules while BundleWrap will only give you the most needed primitives to work with. For example, we will not add an item type for remote downloads because you can easily build that yourself using an [action](../items/action.md) with `wget`. | |
28 | ||
29 | Ansible's playbooks roughly correspond to BundleWrap's bundles, but are written in YAML using a special playbook language. BundleWrap uses Python for this purpose, so if you know some basic Python you only need to learn the schema of the dictionaries you're building. This also means that you will never run into a problem the playbook language cannot solve. Anything you can do in Python, you can do in BundleWrap. | |
30 | ||
31 | While you can automate application deployments in BundleWrap, Ansible is much more capable in that regard as it combines config management and sophisticated deployment mechanisms (multi-stage, rolling updates). | |
32 | ||
33 | File templates in Ansible are [Jinja2](http://jinja2.pocoo.org), while BundleWrap offers both [Mako](http://makotemplates.org>) and Jinja2. | |
34 | ||
35 | Ansible, Inc. offers paid support for Ansible and an optional web-based addon called [Ansible Tower](http://ansible.com/tower). No such offerings are available for BundleWrap. | |
36 | ||
37 | ||
38 | BCFG2 | |
39 | ----- | |
40 | ||
41 | BCFG2's bundles obviously were an inspiration for BundleWrap. One important difference is that BundleWrap's bundles are usually completely isolated and self-contained within their directory while BCFG2 bundles may need resources (e.g. file templates) from elsewhere in the repository. | |
42 | ||
43 | On a practical level BundleWrap prefers pure Python and Mako over the XML- and text-variants of Genshi used for bundle and file templating in BCFG2. | |
44 | ||
45 | And of course BCFG2 has a very traditional client/server model while BundleWrap runs only on the operators computer. | |
46 | ||
47 | ||
48 | Chef | |
49 | ---- | |
50 | ||
51 | [Chef](http://www.getchef.com) has basically two modes of operation: The most widely used one involves a server component and the `chef-client` agent. The second option is `chef-solo`, which will apply configuration from a local repository to the node the repository is located on. BundleWrap supports neither of these modes and always applies configuration over SSH. | |
52 | ||
53 | Overall, Chef is harder to get into, but will scale to thousands of nodes. | |
54 | ||
55 | The community around Chef is quite large and probably the largest of all config management systems. This means lots of community-maintained cookbooks to choose from. BundleWrap does have a [plugin system](../repo/plugins.md) to provide almost anything in a repository, but there aren't many plugins to choose from yet. | |
56 | ||
57 | Chef is written in Ruby and uses the popular [ERB](http://www.kuwata-lab.com/erubis/) template language. BundleWrap is heavily invested in Python and offers support for Mako and Jinja2 templates. | |
58 | ||
59 | OpsCode offers paid support for Chef and SaaS hosting for the server component. [AWS OpsWorks](http://aws.amazon.com/opsworks/) also integrates Chef cookbooks. |
52 | 52 | While it sounds scary, Copyright assignment is used to improve the enforceability of the GPL. Even the FSF does it, [read their explanation why](http://www.gnu.org/licenses/why-assign.html). The agreement used by BundleWrap is from [harmonyagreements.org](http://harmonyagreements.org). |
53 | 53 | |
54 | 54 | If you're still concerned, please do not hesitate to contact [@trehn](https://twitter.com/trehn). |
55 | ||
56 | <br> | |
57 | ||
58 | ### Isn't this all very similar to Ansible? | |
59 | ||
60 | Some parts are, but there are significant differences as well. Check out the [alternatives page](alternatives.md#ansible) for a writeup of the details. | |
61 | ||
62 | <br> |
273 | 273 | |
274 | 274 | # metadata.py |
275 | 275 | |
276 | Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. To do that, you simply write any number of functions whose name doesn't start with an underscore and put them into `metadata.py`. | |
277 | ||
278 | <div class="alert alert-warning">Understand that <strong>any</strong> function will be used as a metadata processor, unless its name starts with an underscore. This is also true for imported functions, so you'll need to import them like this: <code>from module import func as _func</code>.</div> | |
279 | ||
280 | These functions take the metadata dictionary generated so far as their single argument. You must then return the same dictionary with any modifications you need to make. These functions are called metadata processors. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until no more changes are made to the metadata. Here's an example for how a `metadata.py` could look like (note that you have access to `repo` and `node` just like in `items.py`): | |
281 | ||
276 | Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. | |
277 | ||
278 | This is accomplished through metadata processors. Metadata processors are functions that take the metadata dictionary generated so far as their single argument. You must then return a dictionary with any modifications you need to make plus at least one of several options: | |
279 | ||
280 | @metadata_processor | |
282 | 281 | def my_metadata_processor(metadata): |
283 | 282 | metadata["foo"] = node.name |
284 | return metadata | |
285 | ||
286 | <div class="alert alert-danger">To avoid deadlocks when accessing <strong>other</strong> nodes' metadata from within a metadata processor, use <code>other_node.partial_metadata</code> instead of <code>other_node.metadata</code>. For the same reason, always use the <code>metadata</code> parameter to access the current node's metadata, never <code>node.metadata</code>.</div> | |
287 | ||
288 | To improve performance, you can optionally return a tuple instead, with the first element being your metadata dict and the second one being a boolean indicating whether this metadata processor has done its work for this particular node and need not be run again: | |
289 | ||
290 | def my_metadata_processor(metadata): | |
291 | metadata["foo"] = node.name | |
292 | return metadata, True | |
293 | ||
294 | The example above is a typical case of a metadata processor that only needs to be run once: it always does the same thing anyway. If you depend on other metadata processors, you have to return `False` (or just the dict): | |
295 | ||
283 | return metadata, DONE | |
284 | ||
285 | You must always return the modified metadata dictionary as the first element. After that, there are a few options you can return. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until it indicates that it is done by returning the `DONE` option or until *all* remaining metadata processors return `RUN_ME_AGAIN`. You must always return one of `DONE` or `RUN_ME_AGAIN`. Use the latter if your metadata processor depends on metadata that is generated by another metadata processor (which may be called after yours). Here is another example: | |
286 | ||
287 | @metadata_processor | |
296 | 288 | def first_metadata_processor(metadata): |
297 | 289 | metadata["foo"] = node.name |
298 | return metadata, True | |
299 | ||
300 | ||
290 | return metadata, DONE | |
291 | ||
292 | @metadata_processor | |
301 | 293 | def second_metadata_processor(metadata): |
302 | 294 | if "foo" in metadata: |
303 | 295 | metadata["bar"] = metadata["foo"] |
304 | # our job is done, returning True | |
305 | return metadata, True | |
296 | return metadata, DONE | |
306 | 297 | else: |
307 | # return False, so we get called again | |
308 | return metadata, False | |
309 | ||
310 | In this example, `second_metadata_processor` might be called before `first_metadata_processor`. But it can't do its job without `metadata["foo"]`, so it needs to be called again until it has become available (because `first_metadata_processor` has been called in the meantime). | |
298 | return metadata, RUN_ME_AGAIN | |
299 | ||
300 | In this example, `"bar"` can only be set once `"foo"` is available and thus the `second_metadata_processor` has to wait and request to `RUN_ME_AGAIN` until `first_metadata_processor` ran. This is necessary because the running order of metadata processors is undefined. | |
301 | ||
302 | <div class="alert alert-danger">To avoid deadlocks when accessing <strong>other</strong> nodes' metadata from within a metadata processor, use <code>other_node.partial_metadata</code> instead of <code>other_node.metadata</code>. For the same reason, always use the <code>metadata</code> parameter to access the current node's metadata, never <code>node.metadata</code>.</div> | |
303 | ||
304 | Available options: | |
305 | ||
306 | <table> | |
307 | <tr><th>Option</th><th>Description</th></tr> | |
308 | <tr><td><code>DONE</code></td><td>Indicates that this metadata processor has done all it can and need not be called again. Return this whenever possible.</td></tr> | |
309 | <tr><td><code>RUN_ME_AGAIN</code></td><td>Indicates that this metadata processor is still waiting for metadata from another metadata processor to become available.</td></tr> | |
310 | <tr><td><code>DEFAULTS</code></td><td>The returned metadata dictionary will only be used to provide default values. The actual metadata generated so far will be recursively merged into the returned dict.</td></tr> | |
311 | <tr><td><code>OVERWRITE</code></td><td>The returned metadata dictionary will be recursively merged into the actual metadata generated so far (inverse of <code>DEFAULTS</code>).</td></tr> | |
312 | </table> | |
313 | ||
314 | Here is an example of how to use `DEFAULTS`: | |
315 | ||
316 | @metadata_processor | |
317 | def my_metadata_processor(metadata): | |
318 | return { | |
319 | "foo": { | |
320 | "bar": 47, | |
321 | }, | |
322 | }, DONE, DEFAULTS | |
323 | ||
324 | This means `node.metadata["foo"]["bar"]` will be 47 by default, but can also be overridden in static metadata at the node/group level. | |
325 | ||
326 | For your convenience, you can access `repo`, `node`, `metadata_processor` and all the options in `metadata.py` without importing them. |
88 | 88 | |
89 | 89 | Cannot be set at group level. |
90 | 90 | |
91 | ||
92 | ### magic_number | |
93 | ||
94 | A large number derived from the node's name. This number is very likely to be unique for your entire repository. You can, for example, use this number to easily "jitter" cronjobs: | |
95 | ||
96 | '{} {} * * * root /my/script'.format( | |
97 | node.magic_number % 60, | |
98 | node.magic_number % 2 + 4, | |
99 | ) | |
100 | ||
91 | <br> | |
101 | 92 | |
102 | 93 | ### metadata |
103 | 94 |
19 | 19 | - Python API: guide/api.md |
20 | 20 | - OS compatibility: guide/os_compatibility.md |
21 | 21 | - Migrating to 2.0: guide/migrate_12.md |
22 | - Migrating to 3.0: guide/migrate_23.md | |
22 | 23 | - Repository: |
23 | 24 | - Overview: repo/layout.md |
24 | 25 | - nodes.py: repo/nodes.py.md |
52 | 53 | - About: misc/about.md |
53 | 54 | - Glossary: misc/glossary.md |
54 | 55 | - FAQ: misc/faq.md |
55 | - Alternatives: misc/alternatives.md | |
56 | 56 | - Contributing: misc/contributing.md |
15 | 15 | |
16 | 16 | setup( |
17 | 17 | name="bundlewrap", |
18 | version="2.20.0", | |
18 | version="3.0.1", | |
19 | 19 | description="Config management with Python", |
20 | 20 | long_description=( |
21 | 21 | "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | from os.path import join | |
2 | from os.path import exists, join | |
3 | 3 | |
4 | 4 | from bundlewrap.utils.testing import host_os, make_repo, run |
5 | 5 | |
40 | 40 | with open(join(str(tmpdir), "file")) as f: |
41 | 41 | content = f.read() |
42 | 42 | assert content == "1\n2\n3\n" |
43 | ||
44 | ||
45 | def test_precedes_unless(tmpdir): | |
46 | make_repo( | |
47 | tmpdir, | |
48 | bundles={ | |
49 | "test": { | |
50 | 'files': { | |
51 | join(str(tmpdir), "file"): { | |
52 | 'content': "1\n", | |
53 | 'triggered': True, | |
54 | 'precedes': ["tag:tag1"], | |
55 | }, | |
56 | }, | |
57 | 'actions': { | |
58 | "action2": { | |
59 | 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), | |
60 | 'tags': ["tag1"], | |
61 | 'unless': 'true', | |
62 | }, | |
63 | "action3": { | |
64 | 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), | |
65 | 'tags': ["tag1"], | |
66 | 'needs': ["action:action2"], | |
67 | }, | |
68 | }, | |
69 | }, | |
70 | }, | |
71 | nodes={ | |
72 | "localhost": { | |
73 | 'bundles': ["test"], | |
74 | 'os': host_os(), | |
75 | }, | |
76 | }, | |
77 | ) | |
78 | run("bw apply localhost", path=str(tmpdir)) | |
79 | with open(join(str(tmpdir), "file")) as f: | |
80 | content = f.read() | |
81 | assert content == "1\n3\n" | |
82 | ||
83 | ||
84 | def test_precedes_unless2(tmpdir): | |
85 | make_repo( | |
86 | tmpdir, | |
87 | bundles={ | |
88 | "test": { | |
89 | 'files': { | |
90 | join(str(tmpdir), "file"): { | |
91 | 'content': "1\n", | |
92 | 'triggered': True, | |
93 | 'precedes': ["tag:tag1"], | |
94 | }, | |
95 | }, | |
96 | 'actions': { | |
97 | "action2": { | |
98 | 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), | |
99 | 'tags': ["tag1"], | |
100 | 'unless': 'true', | |
101 | }, | |
102 | "action3": { | |
103 | 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), | |
104 | 'tags': ["tag1"], | |
105 | 'needs': ["action:action2"], | |
106 | 'unless': 'true', | |
107 | }, | |
108 | }, | |
109 | }, | |
110 | }, | |
111 | nodes={ | |
112 | "localhost": { | |
113 | 'bundles': ["test"], | |
114 | 'os': host_os(), | |
115 | }, | |
116 | }, | |
117 | ) | |
118 | run("bw apply localhost", path=str(tmpdir)) | |
119 | assert not exists(join(str(tmpdir), "file")) | |
120 | ||
121 | ||
122 | def test_precedes_unless3(tmpdir): | |
123 | make_repo( | |
124 | tmpdir, | |
125 | bundles={ | |
126 | "test": { | |
127 | 'files': { | |
128 | join(str(tmpdir), "file"): { | |
129 | 'content': "1\n", | |
130 | 'triggered': True, | |
131 | 'precedes': ["tag:tag1"], | |
132 | 'unless': 'true', | |
133 | }, | |
134 | }, | |
135 | 'actions': { | |
136 | "action2": { | |
137 | 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), | |
138 | 'tags': ["tag1"], | |
139 | }, | |
140 | "action3": { | |
141 | 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), | |
142 | 'tags': ["tag1"], | |
143 | 'needs': ["action:action2"], | |
144 | }, | |
145 | }, | |
146 | }, | |
147 | }, | |
148 | nodes={ | |
149 | "localhost": { | |
150 | 'bundles': ["test"], | |
151 | 'os': host_os(), | |
152 | }, | |
153 | }, | |
154 | ) | |
155 | run("bw apply localhost", path=str(tmpdir)) | |
156 | with open(join(str(tmpdir), "file")) as f: | |
157 | content = f.read() | |
158 | assert content == "2\n3\n" | |
159 | ||
160 | ||
161 | def test_precedes_unless4(tmpdir): | |
162 | make_repo( | |
163 | tmpdir, | |
164 | bundles={ | |
165 | "test": { | |
166 | 'files': { | |
167 | join(str(tmpdir), "file"): { | |
168 | 'content': "1\n", | |
169 | 'triggered': True, | |
170 | 'precedes': ["action:action3"], | |
171 | }, | |
172 | }, | |
173 | 'actions': { | |
174 | "action2": { | |
175 | 'command': "false", | |
176 | 'needs': ["file:{}".format(join(str(tmpdir), "file"))], | |
177 | }, | |
178 | "action3": { | |
179 | 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), | |
180 | 'needs': ["action:action2"], | |
181 | }, | |
182 | }, | |
183 | }, | |
184 | }, | |
185 | nodes={ | |
186 | "localhost": { | |
187 | 'bundles': ["test"], | |
188 | 'os': host_os(), | |
189 | }, | |
190 | }, | |
191 | ) | |
192 | run("bw apply localhost", path=str(tmpdir)) | |
193 | with open(join(str(tmpdir), "file")) as f: | |
194 | content = f.read() | |
195 | assert content == "1\n" | |
196 | ||
197 | ||
198 | def test_precedes_action(tmpdir): | |
199 | make_repo( | |
200 | tmpdir, | |
201 | bundles={ | |
202 | "test": { | |
203 | 'actions': { | |
204 | "action1": { | |
205 | 'command': "echo 1 > {}".format(join(str(tmpdir), "file")), | |
206 | 'precedes': ["action:action2"], | |
207 | 'triggered': True, | |
208 | }, | |
209 | "action2": { | |
210 | 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), | |
211 | }, | |
212 | }, | |
213 | }, | |
214 | }, | |
215 | nodes={ | |
216 | "localhost": { | |
217 | 'bundles': ["test"], | |
218 | 'os': host_os(), | |
219 | }, | |
220 | }, | |
221 | ) | |
222 | run("bw apply localhost", path=str(tmpdir)) | |
223 | with open(join(str(tmpdir), "file")) as f: | |
224 | content = f.read() | |
225 | assert content == "1\n2\n" |
30 | 30 | }, |
31 | 31 | } |
32 | 32 | """) |
33 | stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir)) | |
34 | assert stdout == b"""group1: node1 | |
35 | group2: node2, node3 | |
36 | group3: node1, node3 | |
37 | group4: node3 | |
33 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir)) | |
34 | assert stdout == b"""group1\tnode1 | |
35 | group2\tnode2,node3 | |
36 | group3\tnode1,node3 | |
37 | group4\tnode3 | |
38 | 38 | """ |
39 | 39 | assert stderr == b"" |
40 | 40 | assert rcode == 0 |
69 | 69 | }, |
70 | 70 | } |
71 | 71 | """) |
72 | stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir)) | |
73 | assert stdout == b"""group1: node3, node4 | |
74 | group2: node4 | |
75 | group3: node1, node2, node3, node4 | |
76 | group4: node1, node3, node4 | |
72 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir)) | |
73 | assert stdout == b"""group1\tnode3,node4 | |
74 | group2\tnode4 | |
75 | group3\tnode1,node2,node3,node4 | |
76 | group4\tnode1,node3,node4 | |
77 | 77 | """ |
78 | 78 | assert stderr == b"" |
79 | 79 | assert rcode == 0 |
101 | 101 | }, |
102 | 102 | } |
103 | 103 | """) |
104 | stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir)) | |
105 | assert stdout == b"""group1: node1 | |
106 | group2: node2 | |
104 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2 nodes", path=str(tmpdir)) | |
105 | assert stdout == b"""group1\tnode1 | |
106 | group2\tnode2 | |
107 | 107 | """ |
108 | 108 | assert stderr == b"" |
109 | 109 | assert rcode == 0 |
136 | 136 | }, |
137 | 137 | } |
138 | 138 | """) |
139 | stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir)) | |
140 | assert stdout == b"""group1: node1, node2 | |
141 | group2: node2 | |
142 | group3: node2 | |
139 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir)) | |
140 | assert stdout == b"""group1\tnode1,node2 | |
141 | group2\tnode2 | |
142 | group3\tnode2 | |
143 | 143 | """ |
144 | 144 | assert stderr == b"" |
145 | 145 | assert rcode == 0 |
183 | 183 | }, |
184 | 184 | } |
185 | 185 | """) |
186 | stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir)) | |
187 | assert stdout == b"""inner_group: node_NOT_in_group, node_in_group | |
188 | intermediate_group: node_in_group | |
189 | super_group: node_in_group | |
186 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i inner_group,intermediate_group,intermediate_group nodes", path=str(tmpdir)) | |
187 | assert stdout == b"""inner_group\tnode_NOT_in_group,node_in_group | |
188 | intermediate_group\tnode_in_group | |
189 | intermediate_group\tnode_in_group | |
190 | 190 | """ |
191 | 191 | assert stderr == b"" |
192 | 192 | assert rcode == 0 |
51 | 51 | 'files': { |
52 | 52 | "/test": { |
53 | 53 | 'content': "${node.name}", |
54 | 'group': None, # BSD has a different default and we don't want to | |
55 | # deal with that here | |
54 | 56 | }, |
55 | 57 | }, |
56 | 58 | }, |
64 | 66 | hashes.add(stdout.strip()) |
65 | 67 | |
66 | 68 | assert len(hashes) == 1 |
67 | assert hashes.pop() == b"8c155b4e7056463eb2c8a8345f4f316f6d7359f6" | |
69 | assert hashes.pop() == b"2203e7acc35608bbff471c023b7b7498e5b385d9" | |
68 | 70 | |
69 | 71 | |
70 | 72 | def test_dict(tmpdir): |
80 | 82 | 'files': { |
81 | 83 | "/test": { |
82 | 84 | 'content': "yes please", |
85 | 'group': None, # BSD has a different default and we don't want to | |
86 | # deal with that here | |
83 | 87 | }, |
84 | 88 | }, |
85 | 89 | }, |
88 | 92 | |
89 | 93 | stdout, stderr, rcode = run("bw hash -d", path=str(tmpdir)) |
90 | 94 | assert rcode == 0 |
91 | assert stdout == b"8ab35c696b63a853ccf568b27a50e24a69964487 node1\n" | |
95 | assert stdout == b"93e7a2c6e8cdc71fb4df5426bc0d0bb978d84381 node1\n" | |
92 | 96 | |
93 | 97 | stdout, stderr, rcode = run("bw hash -d node1", path=str(tmpdir)) |
94 | 98 | assert rcode == 0 |
95 | assert stdout == b"503583964eadabacb18fda32cc9fb1e9f66e424b file:/test\n" | |
99 | assert stdout == b"59d1a7c79640ccdfd3700ab141698a9389fcd0b7 file:/test\n" | |
96 | 100 | |
97 | 101 | stdout, stderr, rcode = run("bw hash -d node1 file:/test", path=str(tmpdir)) |
98 | 102 | assert rcode == 0 |
99 | 103 | assert stdout == ( |
100 | 104 | b"content_hash\tc05a36d547e2b1682472f76985018038d1feebc5\n" |
105 | b"mode\t0644\n" | |
106 | b"owner\troot\n" | |
101 | 107 | b"type\tfile\n" |
102 | 108 | ) |
103 | 109 |
23 | 23 | }, |
24 | 24 | ) |
25 | 25 | |
26 | stdout, stderr, rcode = run("bw items -f /test node1", path=str(tmpdir)) | |
26 | stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir)) | |
27 | 27 | assert stdout == "föö".encode('utf-8') # our output is always utf-8 |
28 | 28 | assert rcode == 0 |
29 | 29 | |
76 | 76 | }, |
77 | 77 | ) |
78 | 78 | |
79 | stdout, stderr, rcode = run("bw items -f /test node1", path=str(tmpdir)) | |
79 | stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir)) | |
80 | 80 | assert rcode == 1 |
81 | 81 | |
82 | 82 |
90 | 90 | ) |
91 | 91 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: |
92 | 92 | f.write( |
93 | """def foo(metadata): | |
93 | """@metadata_processor | |
94 | def foo(metadata): | |
94 | 95 | metadata["baz"] = node.name |
95 | return metadata | |
96 | return metadata, DONE | |
96 | 97 | """) |
97 | 98 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) |
98 | 99 | assert loads(stdout.decode()) == { |
103 | 104 | assert rcode == 0 |
104 | 105 | |
105 | 106 | |
106 | def test_metadatapy_loop(tmpdir): | |
107 | def test_metadatapy_defaults(tmpdir): | |
107 | 108 | make_repo( |
108 | 109 | tmpdir, |
109 | 110 | bundles={"test": {}}, |
110 | 111 | nodes={ |
111 | 112 | "node1": { |
112 | 113 | 'bundles': ["test"], |
113 | 'metadata': {"foo": 1}, | |
114 | 'metadata': {"foo": "bar"}, | |
114 | 115 | }, |
115 | 116 | }, |
116 | 117 | ) |
117 | 118 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: |
118 | 119 | f.write( |
119 | """def foo(metadata): | |
120 | metadata["foo"] += 1 | |
121 | return metadata | |
120 | """@metadata_processor | |
121 | def foo(metadata): | |
122 | return { | |
123 | "foo": "baz", | |
124 | "baz": "foo", | |
125 | }, DONE, DEFAULTS | |
122 | 126 | """) |
123 | 127 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) |
124 | assert rcode == 1 | |
128 | assert loads(stdout.decode()) == { | |
129 | "baz": "foo", | |
130 | "foo": "bar", | |
131 | } | |
132 | assert stderr == b"" | |
133 | assert rcode == 0 | |
134 | ||
135 | ||
136 | def test_metadatapy_update(tmpdir): | |
137 | make_repo( | |
138 | tmpdir, | |
139 | bundles={"test": {}}, | |
140 | nodes={ | |
141 | "node1": { | |
142 | 'bundles': ["test"], | |
143 | 'metadata': {"foo": "bar"}, | |
144 | }, | |
145 | }, | |
146 | ) | |
147 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
148 | f.write( | |
149 | """@metadata_processor | |
150 | def foo(metadata): | |
151 | return { | |
152 | "foo": "baz", | |
153 | "baz": "foo", | |
154 | }, DONE, OVERWRITE | |
155 | """) | |
156 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
157 | assert loads(stdout.decode()) == { | |
158 | "baz": "foo", | |
159 | "foo": "baz", | |
160 | } | |
161 | assert stderr == b"" | |
162 | assert rcode == 0 | |
125 | 163 | |
126 | 164 | |
127 | 165 | def test_table(tmpdir): |
0 | from os.path import join | |
1 | ||
2 | 0 | from bundlewrap.utils.testing import make_repo, run |
3 | 1 | |
4 | 2 | |
19 | 17 | |
20 | 18 | |
21 | 19 | def test_hostname(tmpdir): |
22 | make_repo(tmpdir, nodes={"node1": {'hostname': "node1.example.com"}}) | |
23 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes --attrs | grep '\thostname' | cut -f 3", path=str(tmpdir)) | |
24 | assert stdout == b"node1.example.com\n" | |
25 | assert stderr == b"" | |
26 | assert rcode == 0 | |
27 | ||
28 | ||
29 | def test_inline(tmpdir): | |
30 | 20 | make_repo( |
31 | 21 | tmpdir, |
32 | nodes={ | |
33 | "node1": { | |
34 | 'bundles': ["bundle1", "bundle2"], | |
35 | }, | |
36 | "node2": { | |
37 | 'bundles': ["bundle1"], | |
38 | }, | |
39 | }, | |
40 | bundles={ | |
41 | "bundle1": {}, | |
42 | "bundle2": {}, | |
43 | }, | |
22 | groups={"all": {'members': ["node1"]}}, | |
23 | nodes={"node1": {'hostname': "node1.example.com"}}, | |
44 | 24 | ) |
45 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -ai | grep '\tbundles' | grep bundle2 | cut -f 1", path=str(tmpdir)) | |
46 | assert stdout == b"node1\n" | |
47 | assert stderr == b"" | |
48 | assert rcode == 0 | |
49 | ||
50 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -ai | grep '\tbundles' | grep -v bundle2 | cut -f 1", path=str(tmpdir)) | |
51 | assert stdout == b"node2\n" | |
52 | assert stderr == b"" | |
53 | assert rcode == 0 | |
54 | ||
55 | ||
56 | def test_in_group(tmpdir): | |
57 | make_repo( | |
58 | tmpdir, | |
59 | groups={ | |
60 | "group1": { | |
61 | 'members': ["node2"], | |
62 | }, | |
63 | }, | |
64 | nodes={ | |
65 | "node1": {}, | |
66 | "node2": {}, | |
67 | }, | |
68 | ) | |
69 | stdout, stderr, rcode = run("bw nodes -g group1", path=str(tmpdir)) | |
70 | assert stdout == b"node2\n" | |
25 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all hostname | cut -f 2", path=str(tmpdir)) | |
26 | assert stdout == b"node1.example.com\n" | |
71 | 27 | assert stderr == b"" |
72 | 28 | assert rcode == 0 |
73 | 29 | |
79 | 35 | "bundle1": {}, |
80 | 36 | "bundle2": {}, |
81 | 37 | }, |
38 | groups={"all": {'members': ["node1", "node2"]}}, | |
82 | 39 | nodes={ |
83 | 40 | "node1": {'bundles': ["bundle1", "bundle2"]}, |
84 | 41 | "node2": {'bundles': ["bundle2"]}, |
85 | 42 | }, |
86 | 43 | ) |
87 | stdout, stderr, rcode = run("bw nodes --bundles", path=str(tmpdir)) | |
88 | assert stdout.decode().strip().split("\n") == [ | |
89 | "node1: bundle1, bundle2", | |
90 | "node2: bundle2", | |
91 | ] | |
44 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all bundles | grep node1 | cut -f 2", path=str(tmpdir)) | |
45 | assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2"] | |
92 | 46 | assert stderr == b"" |
93 | 47 | assert rcode == 0 |
94 | ||
95 | ||
96 | def test_groups(tmpdir): | |
97 | make_repo( | |
98 | tmpdir, | |
99 | groups={ | |
100 | "group1": { | |
101 | 'members': ["node2"], | |
102 | }, | |
103 | "group2": { | |
104 | 'members': ["node1"], | |
105 | }, | |
106 | "group3": { | |
107 | 'subgroup_patterns': ["p2"], | |
108 | }, | |
109 | "group4": { | |
110 | 'subgroups': ["group1"], | |
111 | }, | |
112 | }, | |
113 | nodes={ | |
114 | "node1": {}, | |
115 | "node2": {}, | |
116 | }, | |
117 | ) | |
118 | stdout, stderr, rcode = run("bw nodes --groups", path=str(tmpdir)) | |
119 | assert stdout.decode().strip().split("\n") == [ | |
120 | "node1: group2, group3", | |
121 | "node2: group1, group4", | |
122 | ] | |
123 | assert stderr == b"" | |
124 | assert rcode == 0 | |
125 | ||
126 | ||
127 | def test_group_members_remove_bundle(tmpdir): | |
128 | make_repo( | |
129 | tmpdir, | |
130 | bundles={ | |
131 | "bundle1": {}, | |
132 | "bundle2": {}, | |
133 | }, | |
134 | nodes={ | |
135 | "node1": {}, | |
136 | "node2": {}, | |
137 | }, | |
138 | ) | |
139 | with open(join(str(tmpdir), "groups.py"), 'w') as f: | |
140 | f.write(""" | |
141 | groups = { | |
142 | "group1": { | |
143 | 'bundles': ["bundle1"], | |
144 | 'members': ["node1", "node2"], | |
145 | }, | |
146 | "group2": { | |
147 | 'bundles': ["bundle1", "bundle2"], | |
148 | 'members': ["node1", "node2"], | |
149 | 'members_remove': lambda node: node.name == "node2", | |
150 | }, | |
151 | } | |
152 | """) | |
153 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -a node1 | grep \tbundles | cut -f 3", path=str(tmpdir)) | |
154 | assert stdout == b"bundle1\nbundle2\n" | |
155 | assert stderr == b"" | |
156 | assert rcode == 0 | |
157 | ||
158 | stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -a node2 | grep \tbundles | cut -f 3", path=str(tmpdir)) | |
159 | assert stdout == b"bundle1\n" | |
160 | assert stderr == b"" | |
161 | assert rcode == 0 |
6 | 6 | make_repo(tmpdir) |
7 | 7 | stdout, stderr, rcode = run("bw test", path=str(tmpdir)) |
8 | 8 | assert stdout == b"" |
9 | assert stderr == b"" | |
10 | assert rcode == 0 | |
9 | 11 | |
10 | 12 | |
11 | 13 | def test_bundle_not_found(tmpdir): |
36 | 38 | def test_node(repo, node, **kwargs): |
37 | 39 | io.stdout("BBB") |
38 | 40 | """) |
39 | assert b"AAA" in run("bw test", path=str(tmpdir))[0] | |
40 | assert b"BBB" in run("bw test", path=str(tmpdir))[0] | |
41 | assert b"AAA" in run("bw test -H", path=str(tmpdir))[0] | |
42 | assert b"BBB" in run("bw test -J", path=str(tmpdir))[0] | |
41 | 43 | |
42 | 44 | |
43 | 45 | def test_circular_dep_direct(tmpdir): |
61 | 63 | }, |
62 | 64 | }, |
63 | 65 | ) |
64 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
66 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
65 | 67 | |
66 | 68 | |
67 | 69 | def test_circular_dep_indirect(tmpdir): |
88 | 90 | }, |
89 | 91 | }, |
90 | 92 | ) |
91 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
93 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
92 | 94 | |
93 | 95 | |
94 | 96 | def test_circular_dep_self(tmpdir): |
109 | 111 | }, |
110 | 112 | }, |
111 | 113 | ) |
112 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
114 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
113 | 115 | |
114 | 116 | |
115 | 117 | def test_circular_trigger_self(tmpdir): |
130 | 132 | }, |
131 | 133 | }, |
132 | 134 | ) |
133 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
135 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
134 | 136 | |
135 | 137 | |
136 | 138 | def test_file_invalid_attribute(tmpdir): |
151 | 153 | }, |
152 | 154 | }, |
153 | 155 | ) |
154 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
156 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
155 | 157 | |
156 | 158 | |
157 | 159 | def test_file_template_error(tmpdir): |
173 | 175 | }, |
174 | 176 | }, |
175 | 177 | ) |
176 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
178 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
177 | 179 | |
178 | 180 | |
179 | 181 | def test_group_loop(tmpdir): |
191 | 193 | }, |
192 | 194 | }, |
193 | 195 | ) |
194 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
196 | assert run("bw test -S", path=str(tmpdir))[2] == 1 | |
195 | 197 | |
196 | 198 | |
197 | 199 | def test_group_metadata_collision(tmpdir): |
222 | 224 | }, |
223 | 225 | }, |
224 | 226 | ) |
225 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
227 | assert run("bw test -M", path=str(tmpdir))[2] == 1 | |
226 | 228 | |
227 | 229 | |
228 | 230 | def test_group_metadata_collision_subgroups(tmpdir): |
253 | 255 | }, |
254 | 256 | }, |
255 | 257 | ) |
256 | assert run("bw test", path=str(tmpdir))[2] == 0 | |
258 | assert run("bw test -M", path=str(tmpdir))[2] == 0 | |
257 | 259 | |
258 | 260 | |
259 | 261 | def test_group_metadata_collision_list(tmpdir): |
275 | 277 | }, |
276 | 278 | }, |
277 | 279 | ) |
278 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
280 | assert run("bw test -M", path=str(tmpdir))[2] == 1 | |
279 | 281 | |
280 | 282 | |
281 | 283 | def test_group_metadata_collision_dict(tmpdir): |
297 | 299 | }, |
298 | 300 | }, |
299 | 301 | ) |
300 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
302 | assert run("bw test -M", path=str(tmpdir))[2] == 1 | |
301 | 303 | |
302 | 304 | |
303 | 305 | def test_group_metadata_collision_dict_ok(tmpdir): |
319 | 321 | }, |
320 | 322 | }, |
321 | 323 | ) |
322 | assert run("bw test", path=str(tmpdir))[2] == 0 | |
324 | assert run("bw test -M", path=str(tmpdir))[2] == 0 | |
323 | 325 | |
324 | 326 | |
325 | 327 | def test_group_metadata_collision_set(tmpdir): |
341 | 343 | }, |
342 | 344 | }, |
343 | 345 | ) |
344 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
346 | assert run("bw test -M", path=str(tmpdir))[2] == 1 | |
345 | 347 | |
346 | 348 | |
347 | 349 | def test_group_metadata_collision_set_ok(tmpdir): |
363 | 365 | }, |
364 | 366 | }, |
365 | 367 | ) |
366 | assert run("bw test", path=str(tmpdir))[2] == 0 | |
368 | assert run("bw test -M", path=str(tmpdir))[2] == 0 | |
367 | 369 | |
368 | 370 | |
369 | 371 | def test_fault_missing(tmpdir): |
385 | 387 | }, |
386 | 388 | }, |
387 | 389 | ) |
388 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
389 | assert run("bw test --ignore-missing-faults", path=str(tmpdir))[2] == 0 | |
390 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
391 | assert run("bw test -iI", path=str(tmpdir))[2] == 0 | |
390 | 392 | |
391 | 393 | |
392 | 394 | def test_metadata_determinism_ok(tmpdir): |
402 | 404 | }, |
403 | 405 | ) |
404 | 406 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: |
405 | f.write(""" | |
407 | f.write("""@metadata_processor | |
406 | 408 | def test(metadata): |
407 | 409 | metadata['test'] = 1 |
408 | return metadata | |
410 | return metadata, DONE | |
409 | 411 | """) |
410 | 412 | assert run("bw test -m 3", path=str(tmpdir))[2] == 0 |
411 | 413 | |
423 | 425 | }, |
424 | 426 | ) |
425 | 427 | with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: |
426 | f.write("""from random import randint as _randint | |
427 | ||
428 | f.write("""from random import randint | |
429 | ||
430 | @metadata_processor | |
428 | 431 | def test(metadata): |
429 | metadata.setdefault('test', _randint(1, 99999)) | |
430 | return metadata | |
432 | metadata.setdefault('test', randint(1, 99999)) | |
433 | return metadata, DONE | |
431 | 434 | """) |
432 | 435 | assert run("bw test -m 3", path=str(tmpdir))[2] == 1 |
433 | 436 | |
492 | 495 | assert run("bw test group2", path=str(tmpdir))[2] == 1 |
493 | 496 | |
494 | 497 | |
498 | def test_empty_group(tmpdir): | |
499 | make_repo( | |
500 | tmpdir, | |
501 | nodes={ | |
502 | "node1": {}, | |
503 | }, | |
504 | groups={ | |
505 | "group1": {}, | |
506 | "group2": {'members': ["node1"]}, | |
507 | }, | |
508 | ) | |
509 | assert run("bw test", path=str(tmpdir))[2] == 0 | |
510 | assert run("bw test -e", path=str(tmpdir))[2] == 1 | |
511 | ||
512 | ||
495 | 513 | def test_group_user_dep_deleted(tmpdir): |
496 | 514 | make_repo( |
497 | 515 | tmpdir, |
515 | 533 | }, |
516 | 534 | }, |
517 | 535 | ) |
518 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
536 | assert run("bw test -I", path=str(tmpdir))[2] == 1 | |
519 | 537 | |
520 | 538 | |
521 | 539 | def test_group_user_dep_ok(tmpdir): |
538 | 556 | }, |
539 | 557 | }, |
540 | 558 | ) |
541 | assert run("bw test", path=str(tmpdir))[2] == 0 | |
559 | assert run("bw test -I", path=str(tmpdir))[2] == 0 | |
542 | 560 | |
543 | 561 | |
544 | 562 | def test_group_user_dep_deleted_gid(tmpdir): |
564 | 582 | }, |
565 | 583 | }, |
566 | 584 | ) |
567 | assert run("bw test", path=str(tmpdir))[2] == 1 | |
585 | assert run("bw test -I", path=str(tmpdir))[2] == 1 |
78 | 78 | def test_format_password(tmpdir): |
79 | 79 | make_repo(tmpdir) |
80 | 80 | |
81 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.format(\"format: {}\", repo.vault.password_for(\"testing\")))'", path=str(tmpdir)) | |
81 | stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir)) | |
82 | 82 | assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n" |
83 | 83 | assert stderr == b"" |
84 | 84 | assert rcode == 0 |