Codebase list bundlewrap / debian/3.0.1-1
Imported Debian patch 3.0.1-1 Jonathan Carter 6 years ago
67 changed file(s) with 1444 addition(s) and 1122 deletion(s). Raw diff Collapse all Expand all
66 - 3.6
77 install:
88 - pip install .
9 - pip install idna==2.5
109 before_script:
1110 - ssh-keygen -f ~/.ssh/id_rsa -N ""
1211 - cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys
0 # 3.0.1
1
2 2017-09-25
3
4 * fixed `bw run`
5 * fixed `bw test -e`
6
7
8 # 3.0.0
9
10 2017-09-24
11
12 * new metadata processor API and options (BACKWARDS INCOMPATIBLE)
13 * files, directories, and symlinks now have defaults for owner, group, and mode (BACKWARDS INCOMPATIBLE)
14 * overhauled options and output of `bw groups` (BACKWARDS INCOMPATIBLE)
15 * overhauled options and output of `bw nodes` (BACKWARDS INCOMPATIBLE)
16 * overhauled options and output of `bw run` (BACKWARDS INCOMPATIBLE)
17 * overhauled options of `bw test` (BACKWARDS INCOMPATIBLE)
18 * svc_systemd services are now 'enabled' by default (BACKWARDS INCOMPATIBLE)
19 * `bw items --file-preview` no longer uses a separate file path argument (BACKWARDS INCOMPATIBLE)
20 * removed `bw apply --profiling` (BACKWARDS INCOMPATIBLE)
21 * removed `Item.display_keys()` (BACKWARDS INCOMPATIBLE)
22 * changed return value of `Item.display_dicts()` (BACKWARDS INCOMPATIBLE)
23 * changed `Item.BLOCK_CONCURRENT` into a class method (BACKWARDS INCOMPATIBLE)
24 * removed `repo.vault.format()` (BACKWARDS INCOMPATIBLE)
25 * removed env vars: BWADDHOSTKEYS, BWCOLORS, BWITEMWORKERS, BWNODEWORKERS (BACKWARDS INCOMPATIBLE)
26
27
28 # 2.20.1
29
30 2017-09-21
31
32 * improved performance of metadata processors
33 * pkg_* and svc_* items no longer throw exceptions when their commands fail
34 * fixed BW_DEBUG_LOG_DIR with `bw debug`
35 * fixed 'precedes' attribute for actions
36
37
038 # 2.20.0
139
240 2017-08-15
00 # -*- coding: utf-8 -*-
11 from __future__ import unicode_literals
22
3 VERSION = (2, 20, 0)
3 VERSION = (3, 0, 1)
44 VERSION_STRING = ".".join([str(v) for v in VERSION])
33 from os.path import exists, join
44
55 from .exceptions import NoSuchBundle, RepositoryError
6 from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE
67 from .utils import cached_property, get_all_attrs_from_file
78 from .utils.text import mark_for_translation as _
89 from .utils.text import validate_name
1112
1213 FILENAME_BUNDLE = "items.py"
1314 FILENAME_METADATA = "metadata.py"
15
16
17 def metadata_processor(func):
18 """
19 Decorator that tags metadata processors.
20 """
21 func.__is_a_metadata_processor = True
22 return func
1423
1524
1625 class Bundle(object):
91100 for name, attr in get_all_attrs_from_file(
92101 self.metadata_file,
93102 base_env={
103 'DEFAULTS': DEFAULTS,
104 'DONE': DONE,
105 'RUN_ME_AGAIN': RUN_ME_AGAIN,
106 'OVERWRITE': OVERWRITE,
107 'metadata_processor': metadata_processor,
94108 'node': self.node,
95109 'repo': self.repo,
96110 },
97111 ).items():
98 if name.startswith("_") or not callable(attr):
99 continue
100 result.append(attr)
112 if getattr(attr, '__is_a_metadata_processor', False):
113 result.append(attr)
101114 return result
00 # -*- coding: utf-8 -*-
11 from __future__ import unicode_literals
22
3 from cProfile import Profile
34 from functools import wraps
45 from os import environ
56 from os.path import abspath, dirname
100101 if not hasattr(pargs, 'func'):
101102 parser_bw.print_help()
102103 exit(2)
104 if pargs.profile:
105 profile = Profile()
106 profile.enable()
103107
104108 path = abspath(pargs.repo_path)
105109 io.debug_mode = pargs.debug
106110 io.activate()
107111 io.debug(_("invocation: {}").format(" ".join([force_text(arg) for arg in argv])))
108
109 if 'BWADDHOSTKEYS' in environ: # TODO remove in 3.0.0
110 environ.setdefault('BW_ADD_HOST_KEYS', environ['BWADDHOSTKEYS'])
111 if 'BWCOLORS' in environ: # TODO remove in 3.0.0
112 environ.setdefault('BW_COLORS', environ['BWCOLORS'])
113 if 'BWITEMWORKERS' in environ: # TODO remove in 3.0.0
114 environ.setdefault('BW_ITEM_WORKERS', environ['BWITEMWORKERS'])
115 if 'BWNODEWORKERS' in environ: # TODO remove in 3.0.0
116 environ.setdefault('BW_NODE_WORKERS', environ['BWNODEWORKERS'])
117112
118113 environ.setdefault('BW_ADD_HOST_KEYS', "1" if pargs.add_ssh_host_keys else "0")
119114
151146 pargs.func(repo, text_pargs)
152147 finally:
153148 io.deactivate()
149 if pargs.profile:
150 profile.disable()
151 profile.dump_stats(pargs.profile)
5757 'interactive': args['interactive'],
5858 'skip_list': skip_list,
5959 'workers': args['item_workers'],
60 'profiling': args['profiling'],
6160 },
6261 }
6362
6665 return
6766 skip_list.add(task_id)
6867 results.append(return_value)
69 if args['profiling']:
70 total_time = 0.0
71 io.stdout(_(" {}").format(bold(task_id)))
72 io.stdout(_(" {} BEGIN PROFILING DATA "
73 "(most expensive items first)").format(bold(task_id)))
74 io.stdout(_(" {} seconds item").format(bold(task_id)))
75 for time_elapsed, item_id in return_value.profiling_info:
76 io.stdout(" {} {:10.3f} {}".format(
77 bold(task_id),
78 time_elapsed.total_seconds(),
79 item_id,
80 ))
81 total_time += time_elapsed.total_seconds()
82 io.stdout(_(" {} {:10.3f} (total)").format(bold(task_id), total_time))
83 io.stdout(_(" {} END PROFILING DATA").format(bold(task_id)))
84 io.stdout(_(" {}").format(bold(task_id)))
8568
8669 def handle_exception(task_id, exception, traceback):
8770 if isinstance(exception, ItemDependencyLoop):
142125 ], ROW_SEPARATOR]
143126
144127 for result in results:
145 totals['items'] += len(result.profiling_info)
128 totals['items'] += result.total
146129 for metric in ('correct', 'fixed', 'skipped', 'failed'):
147130 totals[metric] += getattr(result, metric)
148131 rows.append([
149132 result.node_name,
150 str(len(result.profiling_info)),
133 str(result.total),
151134 str(result.correct),
152135 green_unless_zero(result.fixed),
153136 yellow_unless_zero(result.skipped),
00 # -*- coding: utf-8 -*-
11 from __future__ import unicode_literals
22
3 from ..utils import names
3 from ..group import GROUP_ATTR_DEFAULTS
4 from ..utils.text import bold, mark_for_translation as _
45 from ..utils.ui import io
6 from .nodes import _attribute_table
7
8
9 GROUP_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['nodes'])
10 GROUP_ATTRS_LISTS = ('nodes',)
511
612
713 def bw_groups(repo, args):
8 for group in repo.groups:
9 line = group.name
10 if args['show_nodes']:
11 line += ": " + ", ".join(names(group.nodes))
12 io.stdout(line)
14 if not args['groups']:
15 for group in repo.groups:
16 io.stdout(group.name)
17 else:
18 groups = [repo.get_group(group.strip()) for group in args['groups'].split(",")]
19 if not args['attrs']:
20 subgroups = set(groups)
21 for group in groups:
22 subgroups = subgroups.union(group.subgroups)
23 for subgroup in sorted(subgroups):
24 io.stdout(subgroup.name)
25 else:
26 _attribute_table(
27 groups,
28 bold(_("group")),
29 args['attrs'],
30 GROUP_ATTRS,
31 GROUP_ATTRS_LISTS,
32 args['inline'],
33 )
2727
2828 def bw_items(repo, args):
2929 node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes'])
30 if args['file_preview']:
31 item = get_item(node, "file:{}".format(args['file_preview']))
32 if (
33 item.attributes['content_type'] in ('any', 'base64', 'binary') or
34 item.attributes['delete'] is True
35 ):
36 io.stderr(_(
37 "{x} cannot preview {file} on {node} (unsuitable content_type or deleted)"
38 ).format(x=red("!!!"), file=item.id, node=node.name))
30 if args['file_preview'] and not args['item']:
31 io.stderr(_("{x} no ITEM given for file preview").format(x=red("!!!")))
32 exit(1)
33 elif args['file_preview_path']:
34 if args['item']:
35 io.stderr(_("{x} use --file-preview to preview single files").format(x=red("!!!")))
3936 exit(1)
40 else:
41 try:
42 io.stdout(item.content.decode(item.attributes['encoding']), append_newline=False)
43 except FaultUnavailable:
44 io.stderr(_(
45 "{x} skipped {path} (Fault unavailable)"
46 ).format(x=yellow("»"), path=bold(item.name)))
47 exit(1)
48 elif args['file_preview_path']:
4937 if exists(args['file_preview_path']):
5038 io.stderr(_(
5139 "not writing to existing path: {path}"
8775 ))
8876 elif args['item']:
8977 item = get_item(node, args['item'])
90 if args['show_sdict']:
91 statedict = item.sdict()
78 if args['file_preview']:
79 if item.ITEM_TYPE_NAME != 'file':
80 io.stderr(_(
81 "{x} cannot preview {item} on {node} (not a file)"
82 ).format(x=red("!!!"), item=item.id, node=node.name))
83 exit(1)
84 if (
85 item.attributes['content_type'] in ('any', 'base64', 'binary') or
86 item.attributes['delete'] is True
87 ):
88 io.stderr(_(
89 "{x} cannot preview {file} on {node} (unsuitable content_type or deleted)"
90 ).format(x=red("!!!"), file=item.id, node=node.name))
91 exit(1)
92 else:
93 try:
94 io.stdout(
95 item.content.decode(item.attributes['encoding']),
96 append_newline=False,
97 )
98 except FaultUnavailable:
99 io.stderr(_(
100 "{x} skipped {path} (Fault unavailable)"
101 ).format(x=yellow("»"), path=bold(item.name)))
102 exit(1)
92103 else:
93 statedict = item.cdict()
94 if statedict is None:
95 io.stdout("REMOVE")
96 else:
97 if args['attr']:
98 io.stdout(repr(statedict[args['attr']]))
104 if args['show_sdict']:
105 statedict = item.sdict()
99106 else:
100 io.stdout(statedict_to_json(statedict, pretty=True))
107 statedict = item.cdict()
108 if statedict is None:
109 io.stdout("REMOVE")
110 else:
111 if args['attr']:
112 io.stdout(repr(statedict[args['attr']]))
113 else:
114 io.stdout(statedict_to_json(statedict, pretty=True))
101115 else:
102116 for item in sorted(node.items):
103117 if args['show_repr']:
11 from __future__ import unicode_literals
22
33 from os import environ
4 from sys import exit
45
56 from ..utils import names
6 from ..utils.cmdline import get_group, get_target_nodes
7 from ..utils.cmdline import get_target_nodes
78 from ..utils.table import ROW_SEPARATOR, render_table
8 from ..utils.text import bold, mark_for_translation as _
9 from ..utils.text import bold, mark_for_translation as _, red
910 from ..utils.ui import io, page_lines
1011 from ..group import GROUP_ATTR_DEFAULTS
1112
1213
14 NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'groups', 'hostname'])
15 NODE_ATTRS_LISTS = ('bundles', 'groups')
16
17
18 def _attribute_table(
19 entities,
20 entity_label,
21 selected_attrs,
22 available_attrs,
23 available_attrs_lists,
24 inline,
25 ):
26 rows = [[entity_label], ROW_SEPARATOR]
27 selected_attrs = [attr.strip() for attr in selected_attrs.split(",")]
28 if selected_attrs == ['all']:
29 selected_attrs = available_attrs
30 for attr in selected_attrs:
31 if attr not in available_attrs:
32 io.stderr(_("{x} unknown attribute: {attr}").format(x=red("!!!"), attr=attr))
33 exit(1)
34 rows[0].append(bold(attr))
35 has_list_attrs = False
36 for entity in entities:
37 attr_values = [[entity.name]]
38 for attr in selected_attrs:
39 if attr in available_attrs_lists:
40 if inline:
41 attr_values.append([",".join(names(getattr(entity, attr)))])
42 else:
43 has_list_attrs = True
44 attr_values.append(list(names(getattr(entity, attr))))
45 else:
46 attr_values.append([str(getattr(entity, attr))])
47 number_of_lines = max([len(value) for value in attr_values])
48 if environ.get("BW_TABLE_STYLE") == 'grep':
49 # repeat entity name for each line
50 attr_values[0] = attr_values[0] * number_of_lines
51 for line in range(number_of_lines):
52 row = []
53 for attr_index in range(len(selected_attrs) + 1):
54 try:
55 row.append(attr_values[attr_index][line])
56 except IndexError:
57 row.append("")
58 rows.append(row)
59 if has_list_attrs:
60 rows.append(ROW_SEPARATOR)
61 if environ.get("BW_TABLE_STYLE") == 'grep':
62 rows = rows[2:]
63 page_lines(render_table(
64 rows[:-1] if has_list_attrs else rows, # remove trailing ROW_SEPARATOR
65 ))
66
67
1368 def bw_nodes(repo, args):
14 if args['filter_group'] is not None:
15 nodes = get_group(repo, args['filter_group']).nodes
16 elif args['target'] is not None:
69 if args['target'] is not None:
1770 nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
1871 else:
1972 nodes = repo.nodes
20
21 rows = [[
22 bold(_("node")),
23 bold(_("attribute")),
24 bold(_("value")),
25 ], ROW_SEPARATOR]
26
27 for node in nodes:
28 if args['show_attrs']:
29 first_attr = True
30 for attr in sorted(list(GROUP_ATTR_DEFAULTS) + ['hostname']):
31 rows.append([
32 bold(node.name) if first_attr else "",
33 bold(attr),
34 str(getattr(node, attr)),
35 ])
36 first_attr = environ.get("BW_TABLE_STYLE") == 'grep'
37
38 if args['inline']:
39 rows.append([
40 bold(node.name) if first_attr else "",
41 bold("groups"),
42 ", ".join(sorted([group.name for group in node.groups])),
43 ])
44 first_attr = environ.get("BW_TABLE_STYLE") == 'grep'
45 else:
46 rows.append([
47 "",
48 ROW_SEPARATOR,
49 ROW_SEPARATOR,
50 ])
51 first_group = True
52 for group in sorted(node.groups):
53 rows.append([
54 bold(node.name) if first_attr else "",
55 bold("groups") if first_group else "",
56 group.name,
57 ])
58 first_group = environ.get("BW_TABLE_STYLE") == 'grep'
59 first_attr = environ.get("BW_TABLE_STYLE") == 'grep'
60 rows.append([
61 "",
62 ROW_SEPARATOR,
63 ROW_SEPARATOR,
64 ])
65
66 if args['inline']:
67 rows.append([
68 bold(node.name) if first_attr else "",
69 bold("bundles"),
70 ", ".join(sorted([bundle.name for bundle in node.bundles])),
71 ])
72 first_attr = environ.get("BW_TABLE_STYLE") == 'grep'
73 else:
74 first_bundle = True
75 for bundle in sorted(node.bundles):
76 rows.append([
77 bold(node.name) if first_attr else "",
78 bold("bundles") if first_bundle else "",
79 bundle.name,
80 ])
81 first_bundle = environ.get("BW_TABLE_STYLE") == 'grep'
82 first_attr = environ.get("BW_TABLE_STYLE") == 'grep'
83 rows.append(ROW_SEPARATOR)
84 continue
85 line = ""
86 if args['show_hostnames']:
87 line += node.hostname
88 else:
89 line += node.name
90 if args['show_bundles']:
91 line += ": " + ", ".join(sorted(names(node.bundles)))
92 elif args['show_groups']:
93 line += ": " + ", ".join(sorted(names(node.groups)))
94 elif args['show_os']:
95 line += ": " + node.os
96 io.stdout(line)
97
98 if len(rows) > 2:
99 page_lines(render_table(
100 rows[:-1], # remove trailing ROW_SEPARATOR
101 ))
73 if not args['attrs']:
74 for node in nodes:
75 io.stdout(node.name)
76 else:
77 _attribute_table(
78 nodes,
79 bold(_("node")),
80 args['attrs'],
81 NODE_ATTRS,
82 NODE_ATTRS_LISTS,
83 args['inline'],
84 )
00 # -*- coding: utf-8 -*-
11 from __future__ import unicode_literals
22
3 from argparse import ArgumentParser
3 from argparse import ArgumentParser, SUPPRESS
44 from os import environ, getcwd
55
66 from .. import VERSION_STRING
5353 action='store_true',
5454 default=False,
5555 dest='debug',
56 help=_("print debugging info (implies -v)"),
56 help=_("print debugging info"),
5757 )
5858 parser.add_argument(
5959 "-r",
6262 dest='repo_path',
6363 help=_("Look for repository at this path (defaults to current working directory)"),
6464 metavar=_("DIRECTORY"),
65 type=str,
66 )
67 # hidden option to dump profiling info, can be inpected with
68 # SnakeViz or whatever
69 parser.add_argument(
70 "--profile",
71 default=None,
72 dest='profile',
73 help=SUPPRESS,
74 metavar=_("FILE"),
6575 type=str,
6676 )
6777 parser.add_argument(
119129 help=_("number of items to apply simultaneously on each node "
120130 "(defaults to {})").format(bw_apply_p_items_default),
121131 type=int,
122 )
123 parser_apply.add_argument(
124 "--profiling",
125 action='store_true',
126 default=False,
127 dest='profiling',
128 help=_("print time elapsed for each item"),
129132 )
130133 parser_apply.add_argument(
131134 "-s",
189192 )
190193
191194 # bw groups
192 help_groups = _("Lists groups in this repository (deprecated, use `bw nodes -a`)")
195 help_groups = _("Lists groups in this repository")
193196 parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups)
194197 parser_groups.set_defaults(func=bw_groups)
195198 parser_groups.add_argument(
196 "-n",
197 "--nodes",
198 action='store_true',
199 dest='show_nodes',
200 help=_("show nodes for each group"),
199 "-i",
200 "--inline",
201 action='store_true',
202 dest='inline',
203 help=_("keep lists on a single line (for grep)"),
204 )
205 parser_groups.add_argument(
206 'groups',
207 default=None,
208 metavar=_("GROUP1,GROUP2..."),
209 nargs='?',
210 type=str,
211 help=_("show the given groups and their subgroups"),
212 )
213 parser_groups.add_argument(
214 'attrs',
215 default=None,
216 metavar=_("ATTR1,ATTR2..."),
217 nargs='?',
218 type=str,
219 help=_("show table with the given attributes for each group "
220 "(e.g. 'all', 'members', 'os', ...)"),
201221 )
202222
203223 # bw hash
270290 parser_items.add_argument(
271291 "-f",
272292 "--file-preview",
293 action='store_true',
273294 dest='file_preview',
274 help=_("print preview of given file"),
275 metavar=_("FILE"),
276 required=False,
277 type=str, # TODO 3.0 convert to bool and use ITEM arg
295 help=_("print preview of given file ITEM"),
278296 )
279297 parser_items.add_argument(
280298 "-w",
432450 metavar=_("KEY"),
433451 nargs='*',
434452 type=str,
435 help=_("print only partial metadata from the given space-separated key path"),
453 help=_("print only partial metadata from the given space-separated key path (e.g. `bw metadata mynode users jdoe` to show `mynode.metadata['users']['jdoe']`)"),
436454 )
437455 parser_metadata.add_argument(
438456 "-t",
447465 )
448466
449467 # bw nodes
450 help_nodes = _("List all nodes in this repository")
468 help_nodes = _("List nodes in this repository")
451469 parser_nodes = subparsers.add_parser("nodes", description=help_nodes, help=help_nodes)
452470 parser_nodes.set_defaults(func=bw_nodes)
453471 parser_nodes.add_argument(
454 "-a",
455 "--attrs",
456 action='store_true',
457 dest='show_attrs',
458 help=_("show attributes for each node"),
459 )
460 parser_nodes.add_argument(
461 "--bundles",
462 action='store_true',
463 dest='show_bundles',
464 help=_("show bundles for each node (deprecated, use --attrs)"),
465 )
466 parser_nodes.add_argument(
467 "--hostnames",
468 action='store_true',
469 dest='show_hostnames',
470 help=_("show hostnames instead of node names (deprecated, use --attrs)"),
471 )
472 parser_nodes.add_argument(
473 "-g",
474 "--filter-group",
475 default=None,
476 dest='filter_group',
477 metavar=_("GROUP"),
478 required=False,
479 type=str,
480 help=_("show only nodes in the given group (deprecated)"),
481 )
482 parser_nodes.add_argument(
483 "--groups",
484 action='store_true',
485 dest='show_groups',
486 help=_("show group membership for each node (deprecated, use --attrs)"),
487 )
488 parser_nodes.add_argument(
489472 "-i",
490473 "--inline",
491474 action='store_true',
492475 dest='inline',
493 help=_("show multiple values on the same line (use with --attrs)"),
494 )
495 parser_nodes.add_argument(
496 "--os",
497 action='store_true',
498 dest='show_os',
499 help=_("show OS for each node (deprecated, use --attrs)"),
476 help=_("keep lists on a single line (for grep)"),
500477 )
501478 parser_nodes.add_argument(
502479 'target',
505482 nargs='?',
506483 type=str,
507484 help=_("filter according to nodes, groups and/or bundle selectors"),
485 )
486 parser_nodes.add_argument(
487 'attrs',
488 default=None,
489 metavar=_("ATTR1,ATTR2..."),
490 nargs='?',
491 type=str,
492 help=_("show table with the given attributes for each node "
493 "(e.g. 'all', 'groups', 'bundles', 'hostname', 'os', ...)"),
508494 )
509495
510496 # bw plot
719705 parser_run.add_argument(
720706 'command',
721707 metavar=_("COMMAND"),
722 nargs='+',
723708 type=str,
724709 help=_("command to run"),
725710 )
726711 parser_run.add_argument(
727 "-f",
728 "--may-fail",
729 action='store_true',
730 dest='may_fail',
731 help=_("ignore non-zero exit codes"),
712 "--stderr-table",
713 action='store_true',
714 dest='stderr_table',
715 help=_("include command stderr in stats table"),
732716 )
733717 parser_run.add_argument(
734 "--force",
735 action='store_true',
736 dest='ignore_locks',
737 help=_("ignore soft locks on target nodes"),
718 "--stdout-table",
719 action='store_true',
720 dest='stdout_table',
721 help=_("include command stdout in stats table"),
738722 )
739723 bw_run_p_default = int(environ.get("BW_NODE_WORKERS", "1"))
740724 parser_run.add_argument(
758742 metavar=_("PATH"),
759743 type=str,
760744 )
745 parser_run.add_argument(
746 "-S",
747 "--no-summary",
748 action='store_false',
749 dest='summary',
750 help=_("don't show stats summary"),
751 )
761752
762753 # bw stats
763754 help_stats = _("Show some statistics about your repository")
766757
767758 # bw test
768759 help_test = _("Test your repository for consistency "
769 "(you can use this with a CI tool like Jenkins)")
760 "(you can use this with a CI tool like Jenkins). "
761 "If *any* options other than -i are given, *only* the "
762 "tests selected by those options will be run. Otherwise, a "
763 "default selection of tests will be run (that selection may "
764 "change in future releases). Currently, the default is -IJM "
765 "if specific nodes are given and -HIJMS if testing the "
766 "entire repo.")
770767 parser_test = subparsers.add_parser("test", description=help_test, help=help_test)
771768 parser_test.set_defaults(func=bw_test)
772769 parser_test.add_argument(
775772 metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."),
776773 nargs='?',
777774 type=str,
778 help=_("target nodes, groups and/or bundle selectors"),
775 help=_("target nodes, groups and/or bundle selectors (defaults to all)"),
779776 )
780777 parser_test.add_argument(
781778 "-c",
782 "--plugin-conflict-error",
783 action='store_true',
784 dest='plugin_conflict_error',
779 "--plugin-conflicts",
780 action='store_true',
781 dest='plugin_conflicts',
785782 help=_("check for local modifications to files installed by plugins"),
786783 )
787784 parser_test.add_argument(
795792 type=int,
796793 )
797794 parser_test.add_argument(
795 "-e",
796 "--empty-groups",
797 action='store_true',
798 dest='empty_groups',
799 help=_("check for empty groups"),
800 )
801 parser_test.add_argument(
802 "-H",
803 "--hooks-repo",
804 action='store_true',
805 dest='hooks_repo',
806 help=_("run repo-level test hooks"),
807 )
808 parser_test.add_argument(
798809 "-i",
799810 "--ignore-missing-faults",
800811 action='store_true',
801812 dest='ignore_missing_faults',
802813 help=_("do not fail when encountering a missing Fault"),
814 )
815 parser_test.add_argument(
816 "-I",
817 "--items",
818 action='store_true',
819 dest='items',
820 help=_("run item-level tests (like rendering templates)"),
821 )
822 parser_test.add_argument(
823 "-J",
824 "--hooks-node",
825 action='store_true',
826 dest='hooks_node',
827 help=_("run node-level test hooks"),
803828 )
804829 parser_test.add_argument(
805830 "-m",
811836 metavar="N",
812837 type=int,
813838 )
814 bw_test_p_default = int(environ.get("BW_NODE_WORKERS", "1"))
815 parser_test.add_argument(
816 "-p",
817 "--parallel-nodes",
818 default=bw_test_p_default,
819 dest='node_workers',
820 help=_("number of nodes to test simultaneously "
821 "(defaults to {})").format(bw_test_p_default),
822 type=int,
823 )
824 bw_test_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4"))
825 parser_test.add_argument(
826 "-P",
827 "--parallel-items",
828 default=bw_test_p_items_default,
829 dest='item_workers',
830 help=_("number of items to test simultaneously for each node "
831 "(defaults to {})").format(bw_test_p_items_default),
832 type=int,
839 parser_test.add_argument(
840 "-M",
841 "--metadata-collisions",
842 action='store_true',
843 dest='metadata_collisions',
844 help=_("check for conflicting metadata keys in group metadata"),
845 )
846 parser_test.add_argument(
847 "-o",
848 "--orphaned-bundles",
849 action='store_true',
850 dest='orphaned_bundles',
851 help=_("check for bundles not assigned to any node"),
852 )
853 parser_test.add_argument(
854 "-S",
855 "--subgroup-loops",
856 action='store_true',
857 dest='subgroup_loops',
858 help=_("check for loops in subgroup hierarchies"),
833859 )
834860
835861 # bw verify
2828 node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes'])
2929 for line in graph_for_items(
3030 node.name,
31 prepare_dependencies(node.items),
31 prepare_dependencies(node.items, node.os, node.os_version),
3232 cluster=args['cluster'],
3333 concurrency=args['depends_concurrency'],
3434 static=args['depends_static'],
11 from __future__ import unicode_literals
22
33 from datetime import datetime
4 try:
5 from itertools import zip_longest
6 except ImportError: # Python 2
7 from itertools import izip_longest as zip_longest
8 from sys import exit
49
510 from ..concurrency import WorkerPool
6 from ..exceptions import NodeLockedException
711 from ..utils import SkipList
812 from ..utils.cmdline import get_target_nodes
13 from ..utils.table import ROW_SEPARATOR, render_table
914 from ..utils.text import mark_for_translation as _
10 from ..utils.text import bold, error_summary, green, red, yellow
15 from ..utils.text import blue, bold, error_summary, green, red, yellow
1116 from ..utils.time import format_duration
1217 from ..utils.ui import io
1318
1419
15 def run_on_node(node, command, may_fail, ignore_locks, log_output, skip_list):
20 def run_on_node(node, command, skip_list):
1621 if node.dummy:
1722 io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("»")))
1823 return None
2732 command,
2833 )
2934
30 start = datetime.now()
3135 result = node.run(
3236 command,
33 may_fail=may_fail,
34 log_output=log_output,
37 may_fail=True,
38 log_output=True,
3539 )
36 end = datetime.now()
37 duration = end - start
3840
3941 node.repo.hooks.node_run_end(
4042 node.repo,
4143 node,
4244 command,
43 duration=duration,
45 duration=result.duration,
4446 return_code=result.return_code,
4547 stdout=result.stdout,
4648 stderr=result.stderr,
4749 )
50 return result
4851
49 if result.return_code == 0:
50 io.stdout("{x} {node} {msg}".format(
51 msg=_("completed successfully after {time}").format(
52 time=format_duration(duration, msec=True),
53 ),
54 node=bold(node.name),
55 x=green("✓"),
56 ))
57 else:
58 io.stderr("{x} {node} {msg}".format(
59 msg=_("failed after {time}s (return code {rcode})").format(
60 rcode=result.return_code,
61 time=format_duration(duration, msec=True),
62 ),
63 node=bold(node.name),
64 x=red("✘"),
65 ))
66 return result.return_code
52
53 def stats_summary(results, include_stdout, include_stderr):
54 rows = [[
55 bold(_("node")),
56 bold(_("return code")),
57 bold(_("time")),
58 ], ROW_SEPARATOR]
59 if include_stdout:
60 rows[0].append(bold(_("stdout")))
61 if include_stderr:
62 rows[0].append(bold(_("stderr")))
63
64 for node_name, result in sorted(results.items()):
65 row = [node_name]
66 if result.return_code == 0:
67 row.append(green(str(result.return_code)))
68 else:
69 row.append(red(str(result.return_code)))
70 row.append(format_duration(result.duration, msec=True))
71 rows.append(row)
72 if include_stdout or include_stderr:
73 stdout = result.stdout.decode('utf-8', errors='replace').strip().split("\n")
74 stderr = result.stderr.decode('utf-8', errors='replace').strip().split("\n")
75 if include_stdout:
76 row.append(stdout[0])
77 if include_stderr:
78 row.append(stderr[0])
79 for stdout_line, stderr_line in list(zip_longest(stdout, stderr, fillvalue=""))[1:]:
80 continuation_row = ["", "", ""]
81 if include_stdout:
82 continuation_row.append(stdout_line)
83 if include_stderr:
84 continuation_row.append(stderr_line)
85 rows.append(continuation_row)
86 rows.append(ROW_SEPARATOR)
87
88 if include_stdout or include_stderr:
89 # remove last ROW_SEPARATOR
90 rows = rows[:-1]
91 for line in render_table(rows, alignments={1: 'right', 2: 'right'}):
92 io.stdout("{x} {line}".format(x=blue("i"), line=line))
6793
6894
6995 def bw_run(repo, args):
79105 args['command'],
80106 )
81107 start_time = datetime.now()
82
108 results = {}
83109 skip_list = SkipList(args['resume_file'])
84110
85111 def tasks_available():
92118 'task_id': node.name,
93119 'args': (
94120 node,
95 " ".join(args['command']),
96 args['may_fail'],
97 args['ignore_locks'],
98 True,
121 args['command'],
99122 skip_list,
100123 ),
101124 }
102125
103126 def handle_result(task_id, return_value, duration):
104127 io.progress_advance()
128 results[task_id] = return_value
105129 if return_value == 0:
106130 skip_list.add(task_id)
107131
108132 def handle_exception(task_id, exception, traceback):
109133 io.progress_advance()
110 if isinstance(exception, NodeLockedException):
111 msg = _(
112 "{node_bold} locked by {user} "
113 "(see `bw lock show {node}` for details)"
114 ).format(
115 node_bold=bold(task_id),
116 node=task_id,
117 user=exception.args[0]['user'],
118 )
119 else:
120 msg = "{} {}".format(bold(task_id), exception)
121 io.stderr(traceback)
122 io.stderr(repr(exception))
134 msg = "{} {}".format(bold(task_id), exception)
135 io.stderr(traceback)
136 io.stderr(repr(exception))
123137 io.stderr("{} {}".format(red("!"), msg))
124138 errors.append(msg)
125139
134148 )
135149 worker_pool.run()
136150
151 if args['summary']:
152 stats_summary(results, args['stdout_table'], args['stderr_table'])
137153 error_summary(errors)
138154
139155 repo.hooks.run_end(
143159 args['command'],
144160 duration=datetime.now() - start_time,
145161 )
162
163 exit(1 if errors else 0)
33 from copy import copy
44 from sys import exit
55
6 from ..exceptions import ItemDependencyLoop
7 from ..concurrency import WorkerPool
6 from ..deps import DummyItem
7 from ..exceptions import FaultUnavailable, ItemDependencyLoop
8 from ..itemqueue import ItemTestQueue
9 from ..metadata import check_for_unsolvable_metadata_key_conflicts
810 from ..plugins import PluginManager
911 from ..repo import Repository
1012 from ..utils.cmdline import count_items, get_target_nodes
1113 from ..utils.plot import explain_item_dependency_loop
1214 from ..utils.text import bold, green, mark_for_translation as _, red, yellow
13 from ..utils.ui import io
14
15
16 def bw_test(repo, args):
17 if args['target']:
18 pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
19 else:
20 pending_nodes = copy(list(repo.nodes))
21
22 # Print warnings for unused bundles. Only do this if we are to
23 # test the entire repo, though.
24 # TODO 3.0 Orphaned bundles should be errors (maybe optionally)
25 orphaned_bundles = set(repo.bundle_names)
26 for node in repo.nodes:
27 for bundle in node.bundles:
28 orphaned_bundles.discard(bundle.name)
29 for bundle in sorted(orphaned_bundles):
30 io.stdout(_("{x} {bundle} is an unused bundle").format(
31 bundle=bold(bundle),
32 x=yellow("!"),
33 ))
34
35 io.progress_set_total(count_items(pending_nodes))
36
37 def tasks_available():
38 return bool(pending_nodes)
39
40 def next_task():
41 node = pending_nodes.pop()
42 return {
43 'target': node.test,
44 'task_id': node.name,
45 'kwargs': {
46 'ignore_missing_faults': args['ignore_missing_faults'],
47 'workers': args['item_workers'],
48 },
49 }
50
51 def handle_exception(task_id, exception, traceback):
52 if isinstance(exception, ItemDependencyLoop):
53 for line in explain_item_dependency_loop(exception, task_id):
15 from ..utils.ui import io, QUIT_EVENT
16
17
18 def test_items(nodes, ignore_missing_faults):
19 with io.job(_(" counting items...")):
20 io.progress_set_total(count_items(nodes))
21 for node in nodes:
22 if QUIT_EVENT.is_set():
23 break
24 if not node.items:
25 io.stdout(_("{x} {node} has no items").format(node=bold(node.name), x=yellow("!")))
26 continue
27 item_queue = ItemTestQueue(node.items, node.os, node.os_version)
28 while not QUIT_EVENT.is_set():
29 try:
30 item = item_queue.pop()
31 except IndexError: # no items left
32 break
33 if isinstance(item, DummyItem):
34 continue
35 try:
36 item._test()
37 except FaultUnavailable:
38 if ignore_missing_faults:
39 io.progress_advance()
40 io.stderr(_("{x} {node} {bundle} {item} ({msg})").format(
41 bundle=bold(item.bundle.name),
42 item=item.id,
43 msg=yellow(_("Fault unavailable")),
44 node=bold(node.name),
45 x=yellow("»"),
46 ))
47 else:
48 io.stderr(_("{x} {node} {bundle} {item} missing Fault:").format(
49 bundle=bold(item.bundle.name),
50 item=item.id,
51 node=bold(node.name),
52 x=red("!"),
53 ))
54 raise
55 except Exception:
56 io.stderr(_("{x} {node} {bundle} {item}").format(
57 bundle=bold(item.bundle.name),
58 item=item.id,
59 node=bold(node.name),
60 x=red("!"),
61 ))
62 raise
63 else:
64 if item.id.count(":") < 2:
65 # don't count canned actions
66 io.progress_advance()
67 io.stdout("{x} {node} {bundle} {item}".format(
68 bundle=bold(item.bundle.name),
69 item=item.id,
70 node=bold(node.name),
71 x=green("✓"),
72 ))
73 if item_queue.items_with_deps and not QUIT_EVENT.is_set():
74 exception = ItemDependencyLoop(item_queue.items_with_deps)
75 for line in explain_item_dependency_loop(exception, node.name):
5476 io.stderr(line)
55 raise exception
56
57 worker_pool = WorkerPool(
58 tasks_available,
59 next_task,
60 handle_exception=handle_exception,
61 pool_id="test",
62 workers=args['node_workers'],
63 )
64 worker_pool.run()
65
66 io.progress_set_total(0)
67
77 exit(1)
78
79
80 def test_subgroup_loops(repo):
6881 checked_groups = []
6982 for group in repo.groups:
7083 if group in checked_groups:
7689 group=bold(group.name),
7790 ))
7891
79 # check for plugin inconsistencies
80 if args['plugin_conflict_error']:
81 pm = PluginManager(repo.path)
82 for plugin, version in pm.list():
83 local_changes = pm.local_modifications(plugin)
84 if local_changes:
85 io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format(
86 plugin=plugin,
87 x=red("✘"),
92
93 def test_metadata_collisions(node):
94 with io.job(_(" {node} checking for metadata collisions...").format(node=node.name)):
95 check_for_unsolvable_metadata_key_conflicts(node)
96 io.stdout(_("{x} {node} has no metadata collisions").format(
97 x=green("✓"),
98 node=bold(node.name),
99 ))
100
101
102 def test_orphaned_bundles(repo):
103 orphaned_bundles = set(repo.bundle_names)
104 for node in repo.nodes:
105 for bundle in node.bundles:
106 orphaned_bundles.discard(bundle.name)
107 for bundle in sorted(orphaned_bundles):
108 io.stderr(_("{x} {bundle} is an unused bundle").format(
109 bundle=bold(bundle),
110 x=red("✘"),
111 ))
112 if orphaned_bundles:
113 exit(1)
114
115
116 def test_empty_groups(repo):
117 empty_groups = set()
118 for group in repo.groups:
119 if not group.nodes:
120 empty_groups.add(group)
121 for group in sorted(empty_groups):
122 io.stderr(_("{x} {group} is an empty group").format(
123 group=bold(group),
124 x=red("✘"),
125 ))
126 if empty_groups:
127 exit(1)
128
129
130 def test_plugin_conflicts(repo):
131 pm = PluginManager(repo.path)
132 for plugin, version in pm.list():
133 local_changes = pm.local_modifications(plugin)
134 if local_changes:
135 io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format(
136 plugin=plugin,
137 x=red("✘"),
138 ))
139 for path, actual_checksum, should_checksum in local_changes:
140 io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format(
141 actual_checksum=actual_checksum,
142 path=path,
143 should_checksum=should_checksum,
88144 ))
89 for path, actual_checksum, should_checksum in local_changes:
90 io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format(
91 actual_checksum=actual_checksum,
92 path=path,
93 should_checksum=should_checksum,
94 ))
145 exit(1)
146 else:
147 io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format(
148 plugin=plugin,
149 x=green("✓"),
150 ))
151
152
153 def test_determinism_config(repo, nodes, iterations):
154 """
155 Generate configuration a couple of times for every node and see if
156 anything changes between iterations
157 """
158 hashes = {}
159 io.progress_set_total(len(nodes) * iterations)
160 for i in range(iterations):
161 if i == 0:
162 # optimization: for the first iteration, just use the repo
163 # we already have
164 iteration_repo = repo
165 else:
166 iteration_repo = Repository(repo.path)
167 iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes]
168 for node in iteration_nodes:
169 with io.job(_(" {node} generating configuration ({i}/{n})...").format(
170 i=i + 1,
171 n=iterations,
172 node=node.name,
173 )):
174 result = node.hash()
175 hashes.setdefault(node.name, result)
176 if hashes[node.name] != result:
177 io.stderr(_(
178 "{x} Configuration for node {node} changed when generated repeatedly "
179 "(use `bw hash -d {node}` to debug)"
180 ).format(node=node.name, x=red("✘")))
95181 exit(1)
96 else:
97 io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format(
98 plugin=plugin,
99 x=green("✓"),
100 ))
101
102 # generate metadata a couple of times for every node and see if
103 # anything changes between iterations
104 if args['determinism_metadata'] > 1:
105 hashes = {}
106 for i in range(args['determinism_metadata']):
107 repo = Repository(repo.path)
108 if args['target']:
109 nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
110 else:
111 nodes = repo.nodes
112 for node in nodes:
113 with io.job(_(" {node} generating metadata ({i}/{n})... ").format(
114 i=i + 1,
115 n=args['determinism_metadata'],
116 node=node.name,
117 )):
118 result = node.metadata_hash()
119 hashes.setdefault(node.name, result)
120 if hashes[node.name] != result:
121 io.stderr(_(
122 "{x} Metadata for node {node} changed when generated repeatedly "
123 "(use `bw hash -d {node}` to debug)"
124 ).format(node=node.name, x=red("✘")))
125 exit(1)
126 io.stdout(_("{x} Metadata remained the same after being generated {n} times").format(
127 n=args['determinism_metadata'],
128 x=green("✓"),
129 ))
130
131 # generate configuration a couple of times for every node and see if
132 # anything changes between iterations
133 if args['determinism_config'] > 1:
134 hashes = {}
135 for i in range(args['determinism_config']):
136 repo = Repository(repo.path)
137 if args['target']:
138 nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
139 else:
140 nodes = repo.nodes
141 for node in nodes:
142 with io.job(_(" {node} generating configuration ({i}/{n})...").format(
143 i=i + 1,
144 n=args['determinism_config'],
145 node=node.name,
146 )):
147 result = node.hash()
148 hashes.setdefault(node.name, result)
149 if hashes[node.name] != result:
150 io.stderr(_(
151 "{x} Configuration for node {node} changed when generated repeatedly "
152 "(use `bw hash -d {node}` to debug)"
153 ).format(node=node.name, x=red("✘")))
154 exit(1)
155 io.stdout(_("{x} Configuration remained the same after being generated {n} times").format(
156 n=args['determinism_config'],
157 x=green("✓"),
158 ))
159
160 if not args['target']:
182 io.progress_advance()
183 io.stdout(_("{x} Configuration remained the same after being generated {n} times").format(
184 n=iterations,
185 x=green("✓"),
186 ))
187
188
189 def test_determinism_metadata(repo, nodes, iterations):
190 """
191 Generate metadata a couple of times for every node and see if
192 anything changes between iterations
193 """
194 hashes = {}
195 io.progress_set_total(len(nodes) * iterations)
196 for i in range(iterations):
197 if i == 0:
198 # optimization: for the first iteration, just use the repo
199 # we already have
200 iteration_repo = repo
201 else:
202 iteration_repo = Repository(repo.path)
203 iteration_nodes = [iteration_repo.get_node(node.name) for node in nodes]
204 for node in iteration_nodes:
205 with io.job(_(" {node} generating metadata ({i}/{n})... ").format(
206 i=i + 1,
207 n=iterations,
208 node=node.name,
209 )):
210 result = node.metadata_hash()
211 hashes.setdefault(node.name, result)
212 if hashes[node.name] != result:
213 io.stderr(_(
214 "{x} Metadata for node {node} changed when generated repeatedly "
215 "(use `bw hash -d {node}` to debug)"
216 ).format(node=node.name, x=red("✘")))
217 exit(1)
218 io.progress_advance()
219 io.stdout(_("{x} Metadata remained the same after being generated {n} times").format(
220 n=iterations,
221 x=green("✓"),
222 ))
223
224
225 def bw_test(repo, args):
226 options_selected = (
227 args['determinism_config'] > 1 or
228 args['determinism_metadata'] > 1 or
229 args['hooks_node'] or
230 args['hooks_repo'] or
231 args['items'] or
232 args['metadata_collisions'] or
233 args['orphaned_bundles'] or
234 args['empty_groups'] or
235 args['plugin_conflicts'] or
236 args['subgroup_loops']
237 )
238 if args['target']:
239 nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
240 if not options_selected:
241 args['hooks_node'] = True
242 args['items'] = True
243 args['metadata_collisions'] = True
244 else:
245 nodes = copy(list(repo.nodes))
246 if not options_selected:
247 args['hooks_node'] = True
248 args['hooks_repo'] = True
249 args['items'] = True
250 args['metadata_collisions'] = True
251 args['subgroup_loops'] = True
252
253 if args['plugin_conflicts'] and not QUIT_EVENT.is_set():
254 test_plugin_conflicts(repo)
255
256 if args['subgroup_loops'] and not QUIT_EVENT.is_set():
257 test_subgroup_loops(repo)
258
259 if args['empty_groups'] and not QUIT_EVENT.is_set():
260 test_empty_groups(repo)
261
262 if args['orphaned_bundles'] and not QUIT_EVENT.is_set():
263 test_orphaned_bundles(repo)
264
265 if args['metadata_collisions'] and not QUIT_EVENT.is_set():
266 io.progress_set_total(len(nodes))
267 for node in nodes:
268 test_metadata_collisions(node)
269 io.progress_advance()
270
271 if args['items']:
272 test_items(nodes, args['ignore_missing_faults'])
273
274 if args['determinism_metadata'] > 1 and not QUIT_EVENT.is_set():
275 test_determinism_metadata(repo, nodes, args['determinism_metadata'])
276
277 if args['determinism_config'] > 1 and not QUIT_EVENT.is_set():
278 test_determinism_config(repo, nodes, args['determinism_config'])
279
280 if args['hooks_node'] and not QUIT_EVENT.is_set():
281 io.progress_set_total(len(nodes))
282 for node in nodes:
283 repo.hooks.test_node(repo, node)
284 io.progress_advance()
285
286 if args['hooks_repo'] and not QUIT_EVENT.is_set():
161287 repo.hooks.test(repo)
247247 return items
248248
249249
250 def _inject_concurrency_blockers(items):
250 def _inject_concurrency_blockers(items, node_os, node_os_version):
251251 """
252252 Looks for items with BLOCK_CONCURRENT set and inserts daisy-chain
253253 dependencies to force a sequential apply.
258258 item._concurrency_deps = [] # used for DOT (graphviz) output only
259259 if (
260260 not isinstance(item, DummyItem) and
261 item.BLOCK_CONCURRENT
261 item.block_concurrent(node_os, node_os_version)
262262 ):
263263 item_types.add(item.__class__)
264264
265 # Now that we have collected all types with BLOCK_CONCURRENT,
265 # Now that we have collected all relevant types,
266266 # we must group them together when they overlap. E.g.:
267267 #
268 # Type1.BLOCK_CONCURRENT = ["type1", "type2"]
269 # Type2.BLOCK_CONCURRENT = ["type2, "type3"]
270 # Type4.BLOCK_CONCURRENT = ["type4"]
268 # Type1.block_concurrent(...) == ["type1", "type2"]
269 # Type2.block_concurrent(...) == ["type2", "type3"]
270 # Type4.block_concurrent(...) == ["type4"]
271271 #
272272 # becomes
273273 #
283283
284284 chain_groups = []
285285 for item_type in item_types:
286 block_concurrent = list(item_type.BLOCK_CONCURRENT) + [item_type.ITEM_TYPE_NAME]
286 block_concurrent = [item_type.ITEM_TYPE_NAME]
287 block_concurrent.extend(item_type.block_concurrent(node_os, node_os_version))
287288 found = False
288289 for blocked_types in chain_groups:
289290 for blocked_type in block_concurrent:
421422 depending_item = items[depending_item_id]
422423 except KeyError:
423424 raise ItemDependencyError(_(
424 "'{item}' in bundle '{bundle}' has a reverse dependency (needed_by)"
425 "'{item}' in bundle '{bundle}' has a reverse dependency (needed_by) "
425426 "on '{dep}', which doesn't exist"
426427 ).format(
427428 item=item.id,
552553 return items
553554
554555
555 def prepare_dependencies(items):
556 def prepare_dependencies(items, node_os, node_os_version):
556557 """
557558 Performs all dependency preprocessing on a list of items.
558559 """
573574 items = _inject_trigger_dependencies(items)
574575 items = _inject_preceded_by_dependencies(items)
575576 items = _flatten_dependencies(items)
576 items = _inject_concurrency_blockers(items)
577 items = _inject_concurrency_blockers(items, node_os, node_os_version)
577578
578579 for item in items.values():
579580 if not isinstance(item, DummyItem):
1414
1515
1616 class BaseQueue(object):
17 def __init__(self, items):
18 self.items_with_deps = prepare_dependencies(items)
17 def __init__(self, items, node_os, node_os_version):
18 self.items_with_deps = prepare_dependencies(items, node_os, node_os_version)
1919 self.items_without_deps = []
2020 self._split()
2121 self.pending_items = []
8585 )
8686 self._split()
8787
88 def pop(self, interactive=False):
88 def pop(self):
8989 """
9090 Gets the next item available for processing and moves it into
9191 self.pending_items. Will raise IndexError if no item is
92 available. Otherwise, it will return the item and a list of
93 items that have been skipped while looking for the item.
92 available.
9493 """
95 skipped_items = []
96
9794 if not self.items_without_deps:
9895 raise IndexError
9996
100 while self.items_without_deps:
101 item = self.items_without_deps.pop()
102
103 if item._precedes_items:
104 if item._precedes_incorrect_item(interactive=interactive):
105 item.has_been_triggered = True
106 else:
107 # we do not have to cascade here at all because
108 # all chained preceding items will be skipped by
109 # this same mechanism
110 io.debug(
111 _("skipping {node}:{bundle}:{item} because its precede trigger "
112 "did not fire").format(
113 bundle=item.bundle.name,
114 item=item.id,
115 node=item.node.name,
116 ),
117 )
118 self.items_with_deps = remove_dep_from_items(self.items_with_deps, item.id)
119 self._split()
120 skipped_items.append(item)
121 item = None
122 continue
123 break
124 assert item is not None
97 item = self.items_without_deps.pop()
12598 self.pending_items.append(item)
126 return (item, skipped_items)
99 return item
127100
128101 def _fire_triggers_for_item(self, item):
129102 for triggered_item_id in item.triggers:
8686 STATUS_SKIPPED = 4
8787 STATUS_ACTION_SUCCEEDED = 5
8888 WHEN_CREATING_ATTRIBUTES = {}
89
90 @classmethod
91 def block_concurrent(cls, node_os, node_os_version):
92 """
93 Return a list of item types that cannot be applied in parallel
94 with this item type.
95 """
96 return []
8997
9098 def __init__(
9199 self,
241249
242250 @cached_property
243251 def cached_unless_result(self):
244 if self.unless and not self.cached_status.correct:
252 """
253 Returns True if 'unless' wants to skip this item.
254 """
255 if self.unless and (self.ITEM_TYPE_NAME == 'action' or not self.cached_status.correct):
245256 unless_result = self.node.run(self.unless, may_fail=True)
246257 return unless_result.return_code == 0
247258 else:
248259 return False
249260
250 def _precedes_incorrect_item(self, interactive=False):
251 """
252 Returns True if this item precedes another and the triggering
253 item is in need of fixing.
254 """
255 for item in self._precedes_items:
256 if item._precedes_incorrect_item():
257 return True
261 def _triggers_preceding_items(self, interactive=False):
262 """
263 Preceding items will execute this to figure out if they're
264 triggered.
265 """
258266 if self.cached_unless_result:
259 # triggering item failed unless, so there is nothing to do
267 # 'unless' says we don't need to run
260268 return False
261269 if self.ITEM_TYPE_NAME == 'action':
270 # so we have an action where 'unless' says it must be run
271 # but the 'interactive' attribute might still override that
262272 if self.attributes['interactive'] != interactive or \
263273 self.attributes['interactive'] is None:
274 return True
275 else:
264276 return False
265 else:
266 return True
267277 return not self.cached_status.correct
268278
269279 def _prepare_deps(self, items):
314324
315325 @classmethod
316326 def _validate_attribute_names(cls, bundle, item_id, attributes):
327 if not isinstance(attributes, dict):
328 raise BundleError(_(
329 "invalid item '{item}' in bundle '{bundle}': not a dict"
330 ).format(
331 item=item_id,
332 bundle=bundle.name,
333 ))
317334 invalid_attributes = set(attributes.keys()).difference(
318335 set(cls.ITEM_ATTRIBUTES.keys()).union(
319336 set(BUILTIN_ITEM_ATTRIBUTES.keys())
394411 if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks):
395412 status_code = self.STATUS_SKIPPED
396413 keys_to_fix = [_("soft locked")]
414
415 for item in self._precedes_items:
416 if item._triggers_preceding_items(interactive=interactive):
417 io.debug(_(
418 "preceding item {item} on {node} has been triggered by {other_item}"
419 ).format(item=self.id, node=self.node.name, other_item=item.id))
420 self.has_been_triggered = True
421 break
422 else:
423 io.debug(_(
424 "preceding item {item} on {node} has NOT been triggered by {other_item}"
425 ).format(item=self.id, node=self.node.name, other_item=item.id))
397426
398427 if self.triggered and not self.has_been_triggered and status_code is None:
399428 io.debug(_(
448477 status_code = self.STATUS_OK
449478
450479 if status_code is None:
451 keys_to_fix = self.display_keys(
452 copy(self.cached_cdict),
453 copy(status_before.sdict),
454 status_before.keys_to_fix[:],
455 )
480 keys_to_fix = status_before.keys_to_fix
456481 if not interactive:
457482 with io.job(_(" {node} {bundle} {item} fixing...").format(
458483 bundle=self.bundle.name,
466491 elif status_before.must_be_deleted:
467492 question_text = _("Found on node. Will be removed.")
468493 else:
469 cdict, sdict = self.display_dicts(
494 cdict, sdict, display_keys_to_fix = self.display_dicts(
470495 copy(self.cached_cdict),
471496 copy(status_before.sdict),
472 keys_to_fix,
497 copy(keys_to_fix),
473498 )
474 question_text = self.ask(cdict, sdict, keys_to_fix)
499 question_text = self.ask(cdict, sdict, display_keys_to_fix)
475500 if self.comment:
476501 question_text += format_comment(self.comment)
477502 question = wrap_question(
514539 elif status_before.must_be_deleted:
515540 changes = False
516541 elif status_code == self.STATUS_FAILED:
517 changes = self.display_keys(
542 changes = self.display_dicts(
518543 self.cached_cdict.copy(),
519544 status_after.sdict.copy(),
520545 status_after.keys_to_fix[:],
521 )
546 )[2]
522547 else:
523548 changes = keys_to_fix
524549
631656 def display_dicts(self, cdict, sdict, keys):
632657 """
633658 Given cdict and sdict as implemented above, modify them to
634 better suit interactive presentation. The keys parameter is the
635 return value of display_keys (see below) and provided for
636 reference only.
659 better suit interactive presentation. The keys parameter is a
660 list of keys whose values differ between cdict and sdict.
637661
638662 MAY be overridden by subclasses.
639663 """
640 return (cdict, sdict)
641
642 def display_keys(self, cdict, sdict, keys):
643 """
644 Given a list of keys whose values differ between cdict and
645 sdict, modify them to better suit presentation to the user.
646
647 MAY be overridden by subclasses.
648 """
649 return keys
664 return (cdict, sdict, keys)
650665
651666 def patch_attributes(self, attributes):
652667 """
6262 if interactive is False and self.attributes['interactive'] is True:
6363 return (self.STATUS_SKIPPED, [_("interactive only")])
6464
65 for item in self._precedes_items:
66 if item._triggers_preceding_items(interactive=interactive):
67 io.debug(_(
68 "preceding item {item} on {node} has been triggered by {other_item}"
69 ).format(item=self.id, node=self.node.name, other_item=item.id))
70 self.has_been_triggered = True
71 break
72 else:
73 io.debug(_(
74 "preceding item {item} on {node} has NOT been triggered by {other_item}"
75 ).format(item=self.id, node=self.node.name, other_item=item.id))
76
6577 if self.triggered and not self.has_been_triggered:
6678 io.debug(_("skipping {} because it wasn't triggered").format(self.id))
67 return (self.STATUS_SKIPPED, [_("no trigger")])
79 return (self.STATUS_SKIPPED, [_("not triggered")])
6880
6981 if self.unless:
7082 with io.job(_(" {node} {bundle} {item} checking 'unless' condition...").format(
3232 "mode for {item} should be three or four digits long, was: '{value}'"
3333 ).format(item=item_id, value=value))
3434
35
3536 ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None)
3637 ATTRIBUTE_VALIDATORS.update({
3738 'mode': validator_mode,
4445 """
4546 BUNDLE_ATTRIBUTE_NAME = "directories"
4647 ITEM_ATTRIBUTES = {
47 'group': None,
48 'mode': None,
49 'owner': None,
48 'group': "root",
49 'mode': "0755",
50 'owner': "root",
5051 'purge': False,
5152 }
5253 ITEM_TYPE_NAME = "directory"
6768 return cdict
6869
6970 def display_dicts(self, cdict, sdict, keys):
70 if UNMANAGED_PATH_DESC in keys:
71 try:
72 keys.remove('paths_to_purge')
73 except ValueError:
74 pass
75 else:
76 keys.append(UNMANAGED_PATH_DESC)
7177 cdict[UNMANAGED_PATH_DESC] = cdict['paths_to_purge']
7278 sdict[UNMANAGED_PATH_DESC] = sdict['paths_to_purge']
7379 del cdict['paths_to_purge']
7480 del sdict['paths_to_purge']
75 return (cdict, sdict)
76
77 def display_keys(self, cdict, sdict, keys):
78 try:
79 keys.remove('paths_to_purge')
80 except ValueError:
81 pass
82 else:
83 keys.append(UNMANAGED_PATH_DESC)
84 return keys
81 return (cdict, sdict, keys)
8582
8683 def fix(self, status):
8784 if status.must_be_created or 'type' in status.keys_to_fix:
182179 path=line,
183180 ))
184181 yield line
185
186
187182
188183 def get_auto_deps(self, items):
189184 deps = []
256251 def patch_attributes(self, attributes):
257252 if 'mode' in attributes and attributes['mode'] is not None:
258253 attributes['mode'] = str(attributes['mode']).zfill(4)
254 if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD:
255 # BSD doesn't have a root group, so we have to use a
256 # different default value here
257 attributes['group'] = 'wheel'
259258 return attributes
260259
261260 @classmethod
178178 'context': None,
179179 'delete': False,
180180 'encoding': "utf-8",
181 'group': None,
182 'mode': None,
183 'owner': None,
181 'group': "root",
182 'mode': "0644",
183 'owner': "root",
184184 'source': None,
185185 'verify_with': None,
186186 }
350350 }
351351
352352 def display_dicts(self, cdict, sdict, keys):
353 if 'content' in keys:
354 del cdict['content_hash']
355 del sdict['content_hash']
356 cdict['content'] = self.content
357 sdict['content'] = get_remote_file_contents(self.node, self.name)
358 return (cdict, sdict)
359
360 def display_keys(self, cdict, sdict, keys):
361353 if (
362354 'content_hash' in keys and
363355 self.attributes['content_type'] not in ('base64', 'binary') and
366358 ):
367359 keys.remove('content_hash')
368360 keys.append('content')
369 return keys
361 del cdict['content_hash']
362 del sdict['content_hash']
363 cdict['content'] = self.content
364 sdict['content'] = get_remote_file_contents(self.node, self.name)
365 return (cdict, sdict, keys)
370366
371367 def patch_attributes(self, attributes):
372368 if (
380376 attributes['context'] = {}
381377 if 'mode' in attributes and attributes['mode'] is not None:
382378 attributes['mode'] = str(attributes['mode']).zfill(4)
379 if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD:
380 # BSD doesn't have a root group, so we have to use a
381 # different default value here
382 attributes['group'] = 'wheel'
383383 return attributes
384384
385385 def test(self):
3131 }
3232 ITEM_TYPE_NAME = "group"
3333 REQUIRED_ATTRIBUTES = []
34
35 @classmethod
36 def block_concurrent(cls, node_os, node_os_version):
37 # https://github.com/bundlewrap/bundlewrap/issues/367
38 if node_os == 'openbsd':
39 return [cls.ITEM_TYPE_NAME]
40 else:
41 return []
3442
3543 def __repr__(self):
3644 return "<Group name:{}>".format(self.name)
1717 'installed': True,
1818 }
1919 _pkg_install_cache = {}
20
21 @classmethod
22 def block_concurrent(cls, node_os, node_os_version):
23 return [cls.ITEM_TYPE_NAME]
2024
2125 def __repr__(self):
2226 return "<{} name:{} installed:{}>".format(
1111 """
1212 A package installed by apt.
1313 """
14 BLOCK_CONCURRENT = ["pkg_apt"]
1514 BUNDLE_ATTRIBUTE_NAME = "pkg_apt"
1615 ITEM_TYPE_NAME = "pkg_apt"
1716 WHEN_CREATING_ATTRIBUTES = {
3029 runlevel +
3130 "DEBIAN_FRONTEND=noninteractive "
3231 "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends "
33 "install {}".format(quote(self.name.replace("_", ":")))
32 "install {}".format(quote(self.name.replace("_", ":"))),
33 may_fail=True,
3434 )
3535
3636 def pkg_installed(self):
99 """
1010 A package installed by dnf.
1111 """
12 BLOCK_CONCURRENT = ["pkg_dnf", "pkg_yum"]
1312 BUNDLE_ATTRIBUTE_NAME = "pkg_dnf"
1413 ITEM_TYPE_NAME = "pkg_dnf"
14
15 @classmethod
16 def block_concurrent(cls, node_os, node_os_version):
17 return ["pkg_dnf", "pkg_yum"]
1518
1619 def pkg_all_installed(self):
1720 result = self.node.run("dnf -d0 -e0 list installed")
1922 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0])
2023
2124 def pkg_install(self):
22 self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name)))
25 self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True)
2326
2427 def pkg_installed(self):
2528 result = self.node.run(
2932 return result.return_code == 0
3033
3134 def pkg_remove(self):
32 self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)))
35 self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True)
1313
1414 def pkg_install(node, pkgname, version):
1515 full_name = "{}-{}".format(pkgname, version) if version else pkgname
16 return node.run("pkg_add -r -I {}".format(full_name))
16 return node.run("pkg_add -r -I {}".format(full_name), may_fail=True)
1717
1818
1919 def pkg_installed(node, pkgname):
2121 "pkg_info | cut -f 1 -d ' '",
2222 may_fail=True,
2323 )
24 for line in result.stdout.decode('utf-8').strip().split("\n"):
24 for line in result.stdout.decode('utf-8').strip().splitlines():
2525 installed_package, installed_version = PKGSPEC_REGEX.match(line).groups()
2626 if installed_package == pkgname:
2727 return installed_version
2929
3030
3131 def pkg_remove(node, pkgname):
32 return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname)))
32 return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname)), may_fail=True)
3333
3434
3535 class OpenBSDPkg(Item):
3636 """
3737 A package installed by pkg_add/pkg_delete.
3838 """
39 BLOCK_CONCURRENT = ["pkg_openbsd"]
4039 BUNDLE_ATTRIBUTE_NAME = "pkg_openbsd"
4140 ITEM_ATTRIBUTES = {
4241 'installed': True,
1010 """
1111 A package installed by pacman.
1212 """
13 BLOCK_CONCURRENT = ["pkg_pacman"]
1413 BUNDLE_ATTRIBUTE_NAME = "pkg_pacman"
1514 ITEM_ATTRIBUTES = {
1615 'installed': True,
3433 local_file = join(self.item_dir, self.attributes['tarball'])
3534 remote_file = "/tmp/{}".format(basename(local_file))
3635 self.node.upload(local_file, remote_file)
37 self.node.run("pacman --noconfirm -U {}".format(quote(remote_file)))
36 self.node.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True)
3837 self.node.run("rm -- {}".format(quote(remote_file)))
3938 else:
40 self.node.run("pacman --noconfirm -S {}".format(quote(self.name)))
39 self.node.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True)
4140
4241 def pkg_installed(self):
4342 result = self.node.run(
4746 return result.return_code == 0
4847
4948 def pkg_remove(self):
50 self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name)))
49 self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True)
1212 if version:
1313 pkgname = "{}=={}".format(pkgname, version)
1414 pip_path, pkgname = split_path(pkgname)
15 return node.run("{} install -U {}".format(quote(pip_path), quote(pkgname)))
15 return node.run("{} install -U {}".format(quote(pip_path), quote(pkgname)), may_fail=True)
1616
1717
1818 def pkg_installed(node, pkgname):
2929
3030 def pkg_remove(node, pkgname):
3131 pip_path, pkgname = split_path(pkgname)
32 return node.run("{} uninstall -y {}".format(quote(pip_path), quote(pkgname)))
32 return node.run("{} uninstall -y {}".format(quote(pip_path), quote(pkgname)), may_fail=True)
3333
3434
3535 class PipPkg(Item):
3636 """
3737 A package installed by pip.
3838 """
39 BLOCK_CONCURRENT = ["pkg_pip"]
4039 BUNDLE_ATTRIBUTE_NAME = "pkg_pip"
4140 ITEM_ATTRIBUTES = {
4241 'installed': True,
99 """
1010 A package installed by snap.
1111 """
12 BLOCK_CONCURRENT = ["pkg_snap"]
1312 BUNDLE_ATTRIBUTE_NAME = "pkg_snap"
1413 ITEM_TYPE_NAME = "pkg_snap"
1514
1918 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(" ")[0])
2019
2120 def pkg_install(self):
22 self.node.run("snap install {}".format(quote(self.name)))
21 self.node.run("snap install {}".format(quote(self.name)), may_fail=True)
2322
2423 def pkg_installed(self):
2524 result = self.node.run(
2928 return result.return_code == 0
3029
3130 def pkg_remove(self):
32 self.node.run("snap remove {}".format(quote(self.name)))
31 self.node.run("snap remove {}".format(quote(self.name)), may_fail=True)
99 """
1010 A package installed by yum.
1111 """
12 BLOCK_CONCURRENT = ["pkg_dnf", "pkg_yum"]
1312 BUNDLE_ATTRIBUTE_NAME = "pkg_yum"
1413 ITEM_TYPE_NAME = "pkg_yum"
14
15 @classmethod
16 def block_concurrent(cls, node_os, node_os_version):
17 return ["pkg_dnf", "pkg_yum"]
1518
1619 def pkg_all_installed(self):
1720 result = self.node.run("yum -d0 -e0 list installed")
1922 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0])
2023
2124 def pkg_install(self):
22 self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name)))
25 self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True)
2326
2427 def pkg_installed(self):
2528 result = self.node.run(
2932 return result.return_code == 0
3033
3134 def pkg_remove(self):
32 self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name)))
35 self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True)
1313
1414
1515 def pkg_install(node, pkgname):
16 return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname)))
16 return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True)
1717
1818
1919 def pkg_installed(node, pkgname):
2929
3030
3131 def pkg_remove(node, pkgname):
32 return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname)))
32 return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname)), may_fail=True)
3333
3434
3535 class ZypperPkg(Item):
3636 """
3737 A package installed by zypper.
3838 """
39 BLOCK_CONCURRENT = ["pkg_zypper"]
4039 BUNDLE_ATTRIBUTE_NAME = "pkg_zypper"
4140 ITEM_ATTRIBUTES = {
4241 'installed': True,
4342 }
4443 ITEM_TYPE_NAME = "pkg_zypper"
44
45 @classmethod
46 def block_concurrent(cls, node_os, node_os_version):
47 return [cls.ITEM_TYPE_NAME]
4548
4649 def __repr__(self):
4750 return "<ZypperPkg name:{} installed:{}>".format(
88
99
1010 def svc_start(node, svcname):
11 return node.run("/etc/rc.d/{} start".format(quote(svcname)))
11 return node.run("/etc/rc.d/{} start".format(quote(svcname)), may_fail=True)
1212
1313
1414 def svc_running(node, svcname):
1717
1818
1919 def svc_stop(node, svcname):
20 return node.run("/etc/rc.d/{} stop".format(quote(svcname)))
20 return node.run("/etc/rc.d/{} stop".format(quote(svcname)), may_fail=True)
2121
2222
2323 def svc_enable(node, svcname):
24 return node.run("rcctl set {} status on".format(quote(svcname)))
24 return node.run("rcctl set {} status on".format(quote(svcname)), may_fail=True)
2525
2626
2727 def svc_enabled(node, svcname):
3333
3434
3535 def svc_disable(node, svcname):
36 return node.run("rcctl set {} status off".format(quote(svcname)))
36 return node.run("rcctl set {} status off".format(quote(svcname)), may_fail=True)
3737
3838
3939 class SvcOpenBSD(Item):
88
99
1010 def svc_start(node, svcname):
11 return node.run("systemctl start -- {}".format(quote(svcname)))
11 return node.run("systemctl start -- {}".format(quote(svcname)), may_fail=True)
1212
1313
1414 def svc_running(node, svcname):
2020
2121
2222 def svc_stop(node, svcname):
23 return node.run("systemctl stop -- {}".format(quote(svcname)))
23 return node.run("systemctl stop -- {}".format(quote(svcname)), may_fail=True)
2424
2525
2626 def svc_enable(node, svcname):
27 return node.run("systemctl enable -- {}".format(quote(svcname)))
27 return node.run("systemctl enable -- {}".format(quote(svcname)), may_fail=True)
2828
2929
3030 def svc_enabled(node, svcname):
3636
3737
3838 def svc_disable(node, svcname):
39 return node.run("systemctl disable -- {}".format(quote(svcname)))
39 return node.run("systemctl disable -- {}".format(quote(svcname)), may_fail=True)
4040
4141
4242 class SvcSystemd(Item):
4444 A service managed by systemd.
4545 """
4646 BUNDLE_ATTRIBUTE_NAME = "svc_systemd"
47 # bw 3.0: Both should default to True.
4847 ITEM_ATTRIBUTES = {
49 'enabled': None,
48 'enabled': True,
5049 'running': True,
5150 }
5251 ITEM_TYPE_NAME = "svc_systemd"
88
99
1010 def svc_start(node, svcname):
11 return node.run("/etc/init.d/{} start".format(quote(svcname)))
11 return node.run("/etc/init.d/{} start".format(quote(svcname)), may_fail=True)
1212
1313
1414 def svc_running(node, svcname):
2020
2121
2222 def svc_stop(node, svcname):
23 return node.run("/etc/init.d/{} stop".format(quote(svcname)))
23 return node.run("/etc/init.d/{} stop".format(quote(svcname)), may_fail=True)
2424
2525
2626 class SvcSystemV(Item):
88
99
1010 def svc_start(node, svcname):
11 return node.run("initctl start --no-wait -- {}".format(quote(svcname)))
11 return node.run("initctl start --no-wait -- {}".format(quote(svcname)), may_fail=True)
1212
1313
1414 def svc_running(node, svcname):
1919
2020
2121 def svc_stop(node, svcname):
22 return node.run("initctl stop --no-wait -- {}".format(quote(svcname)))
22 return node.run("initctl stop --no-wait -- {}".format(quote(svcname)), may_fail=True)
2323
2424
2525 class SvcUpstart(Item):
2020 """
2121 BUNDLE_ATTRIBUTE_NAME = "symlinks"
2222 ITEM_ATTRIBUTES = {
23 'group': None,
24 'owner': None,
23 'group': "root",
24 'owner': "root",
2525 'target': None,
2626 }
2727 ITEM_TYPE_NAME = "symlink"
6060 group = self.attributes['group'] or ""
6161 if group:
6262 group = ":" + quote(group)
63 self.node.run("chown -h {}{} -- {}".format(
63 if self.node.os in self.node.OS_FAMILY_BSD:
64 command = "chown -h {}{} {}"
65 else:
66 command = "chown -h {}{} -- {}"
67 self.node.run(command.format(
6468 quote(self.attributes['owner'] or ""),
6569 group,
6670 quote(self.name),
132136 deps.append(item.id)
133137 return deps
134138
139 def patch_attributes(self, attributes):
140 if 'group' not in attributes and self.node.os in self.node.OS_FAMILY_BSD:
141 # BSD doesn't have a root group, so we have to use a
142 # different default value here
143 attributes['group'] = 'wheel'
144 return attributes
145
135146 def sdict(self):
136147 path_info = PathInfo(self.node, self.name)
137148 if not path_info.exists:
106106 'use_shadow': None,
107107 }
108108 ITEM_TYPE_NAME = "user"
109
110 @classmethod
111 def block_concurrent(cls, node_os, node_os_version):
112 # https://github.com/bundlewrap/bundlewrap/issues/367
113 if node_os == 'openbsd':
114 return [cls.ITEM_TYPE_NAME]
115 else:
116 return []
109117
110118 def __repr__(self):
111119 return "<User name:{}>".format(self.name)
2525 type(None),
2626 )
2727
28 # constants returned as options by metadata processors
29 DONE = 1
30 RUN_ME_AGAIN = 2
31 DEFAULTS = 3
32 OVERWRITE = 4
33
2834
2935 def atomic(obj):
3036 """
3945 "(not: {})".format(repr(obj)))
4046 else:
4147 return cls(obj)
48
49
50 def check_metadata_processor_result(result, node_name, metadata_processor_name):
51 """
52 Validates the return value of a metadata processor and splits it
53 into metadata and options.
54 """
55 if not isinstance(result, tuple) or not len(result) >= 2:
56 raise ValueError(_(
57 "metadata processor {metaproc} for node {node} did not return "
58 "a tuple of length 2 or greater"
59 ).format(
60 metaproc=metadata_processor_name,
61 node=node_name,
62 ))
63 result_dict, options = result[0], result[1:]
64 if not isinstance(result_dict, dict):
65 raise ValueError(_(
66 "metadata processor {metaproc} for node {node} did not return "
67 "a dict as the first element"
68 ).format(
69 metaproc=metadata_processor_name,
70 node=node_name,
71 ))
72 for option in options:
73 if option not in (DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE):
74 raise ValueError(_(
75 "metadata processor {metaproc} for node {node} returned an "
76 "invalid option: {opt}"
77 ).format(
78 metaproc=metadata_processor_name,
79 node=node_name,
80 opt=repr(option),
81 ))
82 if DONE in options and RUN_ME_AGAIN in options:
83 raise ValueError(_(
84 "metadata processor {metaproc} for node {node} cannot return both "
85 "DONE and RUN_ME_AGAIN"
86 ).format(
87 metaproc=metadata_processor_name,
88 node=node_name,
89 ))
90 if DONE not in options and RUN_ME_AGAIN not in options:
91 raise ValueError(_(
92 "metadata processor {metaproc} for node {node} must return either "
93 "DONE or RUN_ME_AGAIN"
94 ).format(
95 metaproc=metadata_processor_name,
96 node=node_name,
97 ))
98 if DEFAULTS in options and OVERWRITE in options:
99 raise ValueError(_(
100 "metadata processor {metaproc} for node {node} cannot return both "
101 "DEFAULTS and OVERWRITE"
102 ).format(
103 metaproc=metadata_processor_name,
104 node=node_name,
105 ))
106 return result_dict, options
42107
43108
44109 def check_for_unsolvable_metadata_key_conflicts(node):
153218 Our own version of deepcopy.copy that doesn't pickle and ensures
154219 a limited range of types is used in metadata.
155220 """
156 if isinstance(obj, dict):
221 if isinstance(obj, METADATA_TYPES):
222 return obj
223 elif isinstance(obj, dict):
157224 new_obj = {}
158225 for key, value in obj.items():
159226 if not isinstance(key, METADATA_TYPES):
168235 new_obj = set()
169236 for member in obj:
170237 new_obj.add(deepcopy_metadata(member))
171 elif isinstance(obj, METADATA_TYPES):
172 return obj
173238 else:
174239 raise ValueError(_("illegal metadata value type: {}").format(repr(obj)))
175240 return new_obj
33 from datetime import datetime, timedelta
44 from hashlib import md5
55 from os import environ
6 from sys import exit
76 from threading import Lock
87
98 from . import operations
1514 )
1615 from .exceptions import (
1716 DontCache,
18 FaultUnavailable,
1917 ItemDependencyLoop,
2018 NodeLockedException,
2119 NoSuchBundle,
2220 RepositoryError,
2321 )
2422 from .group import GROUP_ATTR_DEFAULTS
25 from .itemqueue import ItemQueue, ItemTestQueue
23 from .itemqueue import ItemQueue
2624 from .items import Item
2725 from .lock import NodeLock
28 from .metadata import check_for_unsolvable_metadata_key_conflicts, hash_metadata
26 from .metadata import hash_metadata
2927 from .utils import cached_property, names
3028 from .utils.statedict import hash_statedict
3129 from .utils.text import blue, bold, cyan, green, red, validate_name, yellow
4442 self.fixed = 0
4543 self.skipped = 0
4644 self.failed = 0
47 self.profiling_info = []
48
49 for item_id, result, time_elapsed in item_results:
50 self.profiling_info.append((time_elapsed, item_id))
45 self.total = 0
46
47 for item_id, result, duration in item_results:
48 self.total += 1
5149 if result == Item.STATUS_ACTION_SUCCEEDED:
5250 self.correct += 1
5351 elif result == Item.STATUS_OK:
6361 "can't make sense of results for {} on {}: {}"
6462 ).format(item_id, self.node_name, result))
6563
66 self.profiling_info.sort()
67 self.profiling_info.reverse()
68
6964 self.start = None
7065 self.end = None
7166
119114 other_peoples_soft_locks=(),
120115 workers=1,
121116 interactive=False,
122 profiling=False,
123117 ):
124118 with io.job(_(" {node} processing dependencies...").format(node=node.name)):
125 item_queue = ItemQueue(node.items)
119 item_queue = ItemQueue(node.items, node.os, node.os_version)
126120
127121 results = []
128122
130124 return bool(item_queue.items_without_deps)
131125
132126 def next_task():
133 item, skipped_items = item_queue.pop()
134 for skipped_item in skipped_items:
135 io.progress_advance()
136 handle_apply_result(
137 node,
138 skipped_item,
139 Item.STATUS_SKIPPED,
140 interactive,
141 changes=[_("no pre-trigger")],
142 )
143 results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0)))
144
127 item = item_queue.pop()
145128 return {
146129 'task_id': "{}:{}".format(node.name, item.id),
147130 'target': item.apply,
532515 def magic_number(self):
533516 return int(md5(self.name.encode('UTF-8')).hexdigest(), 16)
534517
535 @property
536 def _static_items(self):
537 for bundle in self.bundles:
538 for item in bundle._static_items:
539 yield item
540
541518 def apply(
542519 self,
543520 autoskip_selector="",
545522 force=False,
546523 skip_list=tuple(),
547524 workers=4,
548 profiling=False,
549525 ):
550526 if not list(self.items):
551527 io.stdout(_("{x} {node} has no items").format(
591567 other_peoples_soft_locks=lock.other_peoples_soft_locks,
592568 workers=workers,
593569 interactive=interactive,
594 profiling=profiling,
595570 )
596571 except NodeLockedException as e:
597572 if not interactive:
726701 wrapper_outer=self.cmd_wrapper_outer,
727702 )
728703
729 def test(self, ignore_missing_faults=False, workers=4):
730 with io.job(_(" {node} checking for metadata collisions...").format(node=self.name)):
731 check_for_unsolvable_metadata_key_conflicts(self)
732 io.stdout(_("{x} {node} has no metadata collisions").format(
733 x=green("✓"),
734 node=bold(self.name),
735 ))
736 if self.items:
737 test_items(self, ignore_missing_faults=ignore_missing_faults, workers=workers)
738 else:
739 io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!")))
740
741 self.repo.hooks.test_node(self.repo, self)
742
743704 def upload(self, local_path, remote_path, mode=None, owner="", group=""):
744705 return operations.upload(
745706 self.hostname,
811772
812773 for attr, default in GROUP_ATTR_DEFAULTS.items():
813774 setattr(Node, attr, build_attr_property(attr, default))
814
815
816 def test_items(node, ignore_missing_faults=False, workers=1):
817 item_queue = ItemTestQueue(node.items)
818
819 def tasks_available():
820 return bool(item_queue.items_without_deps)
821
822 def next_task():
823 try:
824 # Get the next non-DummyItem in the queue.
825 while True:
826 item = item_queue.pop()
827 if not isinstance(item, DummyItem):
828 break
829 except IndexError: # no more items available right now
830 return None
831 else:
832 return {
833 'task_id': item.node.name + ":" + item.bundle.name + ":" + item.id,
834 'target': item._test,
835 }
836
837 def handle_result(task_id, return_value, duration):
838 node_name, bundle_name, item_id = task_id.split(":", 2)
839 if item_id.count(":") < 2:
840 # don't count canned actions
841 io.progress_advance()
842 io.stdout("{x} {node} {bundle} {item}".format(
843 bundle=bold(bundle_name),
844 item=item_id,
845 node=bold(node_name),
846 x=green("✓"),
847 ))
848
849 def handle_exception(task_id, exception, traceback):
850 io.progress_advance()
851 node_name, bundle_name, item_id = task_id.split(":", 2)
852 if ignore_missing_faults and isinstance(exception, FaultUnavailable):
853 io.stderr(_("{x} {node} {bundle} {item} ({msg})").format(
854 bundle=bold(bundle_name),
855 item=item_id,
856 msg=yellow(_("Fault unavailable")),
857 node=bold(node_name),
858 x=yellow("»"),
859 ))
860 else:
861 io.stderr("{x} {node} {bundle} {item}".format(
862 bundle=bold(bundle_name),
863 item=item_id,
864 node=bold(node_name),
865 x=red("!"),
866 ))
867 io.stderr(traceback)
868 io.stderr("{}: {}".format(type(exception), str(exception)))
869 exit(1)
870
871 worker_pool = WorkerPool(
872 tasks_available,
873 next_task,
874 handle_result=handle_result,
875 handle_exception=handle_exception,
876 pool_id="test_{}".format(node.name),
877 workers=workers,
878 )
879 worker_pool.run()
880
881 if item_queue.items_with_deps:
882 raise ItemDependencyLoop(item_queue.items_with_deps)
883775
884776
885777 def verify_items(node, show_all=False, workers=1):
00 # -*- coding: utf-8 -*-
11 from __future__ import unicode_literals
22
3 from datetime import datetime
34 from pipes import quote
45 from select import select
56 from shlex import split
7273
7374 class RunResult(object):
7475 def __init__(self):
76 self.duration = None
7577 self.return_code = None
7678 self.stderr = None
7779 self.stdout = None
110112
111113 cmd_id = randstr(length=4).upper()
112114 io.debug("running command with ID {}: {}".format(cmd_id, " ".join(command)))
115 start = datetime.utcnow()
113116
114117 # Launch the child process. It's important that SSH gets a dummy
115118 # stdin, i.e. it must *not* read from the terminal. Otherwise, it
180183 ))
181184
182185 result = RunResult()
186 result.duration = datetime.utcnow() - start
183187 result.stdout = stdout_lb.record.getvalue()
184188 result.stderr = stderr_lb.record.getvalue()
185189 result.return_code = child_process.returncode
1111 from . import items, utils, VERSION_STRING
1212 from .bundle import FILENAME_BUNDLE
1313 from .exceptions import (
14 BundleError,
1514 NoSuchGroup,
1615 NoSuchNode,
1716 NoSuchRepository,
1918 RepositoryError,
2019 )
2120 from .group import Group
22 from .metadata import deepcopy_metadata
21 from .metadata import check_metadata_processor_result, deepcopy_metadata, DEFAULTS, DONE, OVERWRITE
2322 from .node import _flatten_group_hierarchy, Node
2423 from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy
2524 from .utils import cached_property, merge_dict, names
8988 FILENAME_REQUIREMENTS: "bundlewrap>={}\n".format(VERSION_STRING),
9089 FILENAME_SECRETS: generate_initial_secrets_cfg,
9190 }
92 META_PROC_MAX_ITER = 1000 # maximum iterations for metadata processors
9391
9492
9593 def groups_from_file(filepath, libs, repo_path, vault):
465463 Builds complete metadata for all nodes that appear in
466464 self._node_metadata_partial.keys().
467465 """
468 iterations = {}
469466 # these processors have indicated that they do not need to be run again
470467 blacklisted_metaprocs = set()
471 while (
472 not iterations or max(iterations.values()) <= META_PROC_MAX_ITER
473 ) and not QUIT_EVENT.is_set():
468 while not QUIT_EVENT.is_set():
474469 # First, get the static metadata out of the way
475470 for node_name in list(self._node_metadata_partial):
476471 if QUIT_EVENT.is_set():
491486 )
492487
493488 with io.job(_(" {node} merging node metadata...").format(node=node.name)):
494 self._node_metadata_partial[node.name] = merge_dict(
489 # deepcopy_metadata is important here because up to this point
490 # different nodes from the same group might still share objects
491 # nested deeply in their metadata. This becomes a problem if we
492 # start messing with these objects in metadata processors. Every
493 # time we would edit one of these objects, the changes would be
494 # shared amongst multiple nodes.
495 self._node_metadata_partial[node.name] = deepcopy_metadata(merge_dict(
495496 self._node_metadata_partial[node.name],
496497 node._node_metadata,
497 )
498 ))
498499
499500 # Now for the interesting part: We run all metadata processors
500 # in sequence until none of them return changed metadata.
501 modified = False
501 # until none of them return DONE anymore (indicating that they're
502 # just waiting for another metaproc to maybe insert new data,
503 # which isn't happening if none return DONE)
504 metaproc_returned_DONE = False
502505 for node_name in list(self._node_metadata_partial):
503506 if QUIT_EVENT.is_set():
504507 break
507510 for metadata_processor_name, metadata_processor in node.metadata_processors:
508511 if (node_name, metadata_processor_name) in blacklisted_metaprocs:
509512 continue
510 iterations.setdefault((node.name, metadata_processor_name), 1)
511513 io.debug(_(
512 "running metadata processor {metaproc} for node {node}, "
513 "iteration #{i}"
514 "running metadata processor {metaproc} for node {node}"
514515 ).format(
515516 metaproc=metadata_processor_name,
516517 node=node.name,
517 i=iterations[(node.name, metadata_processor_name)],
518518 ))
519519 try:
520 processed = metadata_processor(
521 deepcopy_metadata(self._node_metadata_partial[node.name]),
522 )
520 processed = metadata_processor(self._node_metadata_partial[node.name])
523521 except Exception as exc:
524522 io.stderr(_(
525523 "{x} Exception while executing metadata processor "
530528 node=node.name,
531529 ))
532530 raise exc
533 iterations[(node.name, metadata_processor_name)] += 1
534 if isinstance(processed, tuple) and len(processed) == 2:
535 if processed[1] is True:
536 io.debug(_(
537 "metadata processor {metaproc} for node {node} "
538 "has indicated that it need not be run again"
539 ).format(
540 metaproc=metadata_processor_name,
541 node=node.name,
542 ))
543 blacklisted_metaprocs.add((node_name, metadata_processor_name))
544 processed = processed[0]
545 if not isinstance(processed, dict):
546 raise ValueError(_(
547 "metadata processor {metaproc} for node {node} did not return "
548 "a dictionary or tuple of (dict, bool)"
531 processed_dict, options = check_metadata_processor_result(
532 processed,
533 node.name,
534 metadata_processor_name,
535 )
536 if DONE in options:
537 io.debug(_(
538 "metadata processor {metaproc} for node {node} "
539 "has indicated that it need NOT be run again"
549540 ).format(
550541 metaproc=metadata_processor_name,
551542 node=node.name,
552543 ))
553 if processed != self._node_metadata_partial[node.name]:
544 blacklisted_metaprocs.add((node_name, metadata_processor_name))
545 metaproc_returned_DONE = True
546 else:
554547 io.debug(_(
555 "metadata processor {metaproc} for node {node} changed metadata, "
556 "rerunning all metadata processors for this node"
548 "metadata processor {metaproc} for node {node} "
549 "has indicated that it must be run again"
557550 ).format(
558551 metaproc=metadata_processor_name,
559552 node=node.name,
560553 ))
561 self._node_metadata_partial[node.name] = processed
562 modified = True
563 if not modified:
554
555 if DEFAULTS in options:
556 self._node_metadata_partial[node.name] = merge_dict(
557 processed_dict,
558 self._node_metadata_partial[node.name],
559 )
560 elif OVERWRITE in options:
561 self._node_metadata_partial[node.name] = merge_dict(
562 self._node_metadata_partial[node.name],
563 processed_dict,
564 )
565 else:
566 self._node_metadata_partial[node.name] = processed_dict
567
568 if not metaproc_returned_DONE:
564569 if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()):
565570 # During metadata processor execution, partial metadata may
566571 # have been requested for nodes we did not previously
572577 continue
573578 else:
574579 break
575
576 for culprit, number_of_iterations in iterations.items():
577 if number_of_iterations >= META_PROC_MAX_ITER:
578 node, metadata_processor = culprit
579 raise BundleError(_(
580 "Metadata processor '{proc}' stopped after too many iterations "
581 "({max_iter}) for node '{node}' to prevent infinite loop. "
582 "This usually means one of two things: "
583 "1) You have two metadata processors that keep overwriting each other's "
584 "data or 2) You have a single metadata processor that keeps changing its own "
585 "data. "
586 "To fix this, use `bw --debug metadata {node}` and look for repeated messages "
587 "indicating that the same metadata processor keeps changing metadata. Then "
588 "rewrite that metadata processor to eventually stop changing metadata.".format(
589 max_iter=META_PROC_MAX_ITER,
590 node=node,
591 proc=metadata_processor,
592 ),
593 ))
594580
595581 def metadata_hash(self):
596582 repo_dict = {}
307307 f.write(fernet.encrypt(plaintext))
308308 return target_file
309309
310 def _format(self, format_str=None, faults=None):
311 return format_str.format(*[fault.value for fault in faults])
312
313 def format(self, format_str, *faults):
314 """
315 Returns a Fault for a string formatted with the given Faults,
316 e.g.:
317
318 vault.format("password: {}", vault.password_for("something"))
319
320 DEPRECATED, remove in 3.0, use Fault.format_into instead.
321 """
322 return Fault(
323 self._format,
324 format_str=format_str,
325 faults=faults,
326 )
327
328310 def human_password_for(
329311 self, identifier, digits=2, key='generate', per_word=3, words=4,
330312 ):
1414
1515 from ..exceptions import DontCache, FaultUnavailable
1616
17 __GETATTR_CACHE = {}
17 __GETATTR_CODE_CACHE = {}
18 __GETATTR_RESULT_CACHE = {}
1819 __GETATTR_NODEFAULT = "very_unlikely_default_value"
1920
2021
150151 return content
151152
152153
153 def get_all_attrs_from_file(path, cache=True, base_env=None):
154 def get_all_attrs_from_file(path, base_env=None):
154155 """
155156 Reads all 'attributes' (if it were a module) from a source file.
156157 """
157158 if base_env is None:
158159 base_env = {}
159 if base_env:
160
161 if not base_env and path in __GETATTR_RESULT_CACHE:
160162 # do not allow caching when passing in a base env because that
161163 # breaks repeated calls with different base envs for the same
162164 # file
163 cache = False
164 if path not in __GETATTR_CACHE or not cache:
165 return __GETATTR_RESULT_CACHE[path]
166
167 if path not in __GETATTR_CODE_CACHE:
165168 source = get_file_contents(path)
166 env = base_env.copy()
167 try:
168 exec(source, env)
169 except:
170 from .ui import io
171 io.stderr("Exception while executing {}".format(path))
172 raise
173 if cache:
174 __GETATTR_CACHE[path] = env
175 else:
176 env = __GETATTR_CACHE[path]
169 __GETATTR_CODE_CACHE[path] = compile(source, path, mode='exec')
170
171 code = __GETATTR_CODE_CACHE[path]
172 env = base_env.copy()
173 try:
174 exec(code, env)
175 except:
176 from .ui import io
177 io.stderr("Exception while executing {}".format(path))
178 raise
179
180 if not base_env:
181 __GETATTR_RESULT_CACHE[path] = env
182
177183 return env
178184
179185
180 def getattr_from_file(path, attrname, base_env=None, cache=True, default=__GETATTR_NODEFAULT):
186 def getattr_from_file(path, attrname, base_env=None, default=__GETATTR_NODEFAULT):
181187 """
182188 Reads a specific 'attribute' (if it were a module) from a source
183189 file.
184190 """
185 env = get_all_attrs_from_file(path, base_env=base_env, cache=cache)
191 env = get_all_attrs_from_file(path, base_env=base_env)
186192 if default == __GETATTR_NODEFAULT:
187193 return env[attrname]
188194 else:
44 from ..exceptions import NoSuchGroup, NoSuchItem, NoSuchNode
55 from . import names
66 from .text import mark_for_translation as _, red
7 from .ui import io
7 from .ui import io, QUIT_EVENT
88
99
1010 def count_items(nodes):
1111 count = 0
1212 for node in nodes:
13 if QUIT_EVENT.is_set():
14 return 0
1315 count += len(node.items)
1416 return count
1517
5252 def capture_for_debug_logfile(f):
5353 @wraps(f)
5454 def wrapped(self, msg, **kwargs):
55 if self.debug_log_file:
55 if self.debug_log_file and self._active:
5656 self.debug_log_file.write(
5757 datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") +
5858 ansi_clean(msg).rstrip("\n") + "\n"
0 bundlewrap (3.0.1-1) unstable; urgency=medium
1
2 * New upstream release
3 * Update standards version to 4.1.0
4
5 -- Jonathan Carter <jcc@debian.org> Wed, 27 Sep 2017 09:38:20 +0200
6
07 bundlewrap (2.20.0-1) unstable; urgency=medium
18
29 * New upstream release
66 dh-python,
77 python,
88 python-setuptools
9 Standards-Version: 4.0.1
9 Standards-Version: 4.1.0
1010 Homepage: http://bundlewrap.org/
1111 Vcs-Svn: svn://anonscm.debian.org/python-apps/packages/bundlewrap/trunk
1212 Vcs-Browser: https://anonscm.debian.org/viewvc/python-apps/packages/bundlewrap/trunk/
119119 **`.items`**
120120
121121 A list of items on this node (instances of subclasses of `bundlewrap.items.Item`)
122
123 <br>
124
125 **`.magic_number`**
126
127 A large number derived from the node's name. This number is very likely to be unique for your entire repository. You can, for example, use this number to easily "jitter" cronjobs:
128
129 '{} {} * * * root /my/script'.format(
130 node.magic_number % 60,
131 node.magic_number % 2 + 4,
132 )
122133
123134 <br>
124135
2828 """
2929 A foo.
3030 """
31 BLOCK_CONCURRENT = []
3231 BUNDLE_ATTRIBUTE_NAME = "foo"
3332 ITEM_ATTRIBUTES = {
3433 'attribute': "default value",
3534 }
3635 ITEM_TYPE_NAME = "foo"
3736 REQUIRED_ATTRIBUTES = ['attribute']
37
38 @classmethod
39 def block_concurrent(cls, node_os, node_os_version):
40 """
41 Return a list of item types that cannot be applied in parallel
42 with this item type.
43 """
44 return []
3845
3946 def __repr__(self):
4047 return "<Foo attribute:{}>".format(self.attributes['attribute'])
6370
6471 def display_dicts(self, cdict, sdict, keys):
6572 """
66 Given cdict and sdict as implemented above, modify them to better
67 suit interactive presentation. The keys parameter is the return
68 value of display_keys (see below) and provided for reference only.
73 Given cdict and sdict as implemented above, modify them to
74 better suit interactive presentation. The keys parameter is a
75 list of keys whose values differ between cdict and sdict.
6976
7077 Implementing this method is optional.
7178 """
72 return (cdict, sdict)
73
74 def display_keys(self, cdict, sdict, keys):
75 """
76 Given a list of keys whose values differ between cdict and sdict,
77 modify them to better suit presentation to the user.
78
79 Implementing this method is optional.
80 """
81 return keys
79 return (cdict, sdict, keys)
8280
8381 def fix(self, status):
8482 """
112110 ITEM_TYPE_NAME = "foo"
113111
114112
115 `BLOCK_CONCURRENT` is a list of item types (e.g. `pkg_apt`), that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time):
116
117 BLOCK_CONCURRENT = ["pkg_apt"]
118
119
120113 `REQUIRED_ATTRIBUTES` is a list of attribute names that must be set on each item of this type. If BundleWrap encounters an item without all these attributes during bundle inspection, an exception will be raised. Example:
121114
122115 REQUIRED_ATTRIBUTES = ['attr1', 'attr2']
130123
131124 The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.node.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values.
132125
126 `block_concurrent()` must return a list of item types (e.g. `['pkg_apt']`) that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time).
127
133128 If you're having trouble, try looking at the [source code for the items that come with BundleWrap](https://github.com/bundlewrap/bundlewrap/tree/master/bundlewrap/items). The `pkg_*` items are pretty simple and easy to understand while `files` is the most complex to date. Or just drop by on [IRC](irc://chat.freenode.net/bundlewrap), we're glad to help.
0 # Migrating from BundleWrap 2.x to 3.x
1
2 As per [semver](http://semver.org), BundleWrap 3.0 breaks compatibility with repositories created for BundleWrap 2.x. This document provides a guide on how to upgrade your repositories to BundleWrap 3.x. Please read the entire document before proceeding.
3
4 <br>
5
6 ## metadata.py
7
8 BundleWrap 2.x simply used all functions in `metadata.py` whose names don't start with an underscore as metadata processors. This led to awkward imports like `from foo import bar as _bar`. BundleWrap 3.x requires a decorator for explicitly designating functions as metadata processors:
9
10 @metadata_processor
11 def myproc(metadata):
12 return metadata, DONE
13
14 You will have to add `@metadata_processor` to each metadata processor function. There is no need to import it; it is provided automatically, just like `node` and `repo`.
15
16 The accepted return values of metadata processors have changed as well. Metadata processors now always have to return a tuple with the first element being a dictionary of metadata and the remaining elements made up of various options to tell BundleWrap what to do with the dictionary. In most cases, you will want to return the `DONE` options as in the example above. There is no need to import options, they're always available.
17
18 When you previously returned `metadata, False` from a metadata processor, you will now have to return `metadata, RUN_ME_AGAIN`. For a more detailed description of the available options, see [the documentation](../repo/bundles.md#metadatapy).
19
20 <br>
21
22 ## File and directory ownership defaults
23
24 [Files](../items/file.md), [directories](../items/directory.md), and [symlinks](../items/symlink.md) now have default values for the ownership and mode attributes. Previously the default was to ignore them. It's very likely that you won't have to do anything here, just be aware.
25
26 <br>
27
28 ## systemd services enabled by default
29
30 Again, just be [aware](../items/svc_systemd.md), it's probably what you intended anyway.
31
32 <br>
33
34 ## Environment variables
35
36 The following [env vars](env.md) have been renamed (though the new names have already been available for a while, so chances are you're already using them):
37
38 <table>
39 <tr><th>Old</th><th>New</th></tr>
40 <tr><td><code>BWADDHOSTKEYS</code></td><td><code>BW_ADD_HOST_KEYS</code></td></tr>
41 <tr><td><code>BWCOLORS</code></td><td><code>BW_COLORS</code></td></tr>
42 <tr><td><code>BWITEMWORKERS</code></td><td><code>BW_ITEM_WORKERS</code></td></tr>
43 <tr><td><code>BWNODEWORKERS</code></td><td><code>BW_NODE_WORKERS</code></td></tr>
44 </table>
45
46 <br>
47
48 ## Item.display_keys and Item.display_dicts
49
50 If you've written your own items and used the `display_keys()` or `display_dicts()` methods or the `BLOCK_CONCURRENT` attribute, you will have to update them to the [new API](dev_item.md).
3636 - hate Python and/or JSON
3737 - like to use community-maintained configuration templates
3838 - need unattended bootstrapping of nodes
39 - need to manage non-Linux systems
4039 - don’t trust your coworkers
41
42 We have also prepared a [comparison with other popular config management systems](misc/alternatives.md).
11
22 directories = {
33 "/path/to/directory": {
4 "mode": "0644",
4 "mode": "0755",
55 "owner": "root",
66 "group": "root",
77 },
1515
1616 ### group
1717
18 Name of the group this directory belongs to. Defaults to `None` (don't care about group).
18 Name of the group this directory belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
1919
2020 <br>
2121
2222 ### mode
2323
24 Directory mode as returned by `stat -c %a <directory>`. Defaults to `None` (don't care about mode).
24 Directory mode as returned by `stat -c %a <directory>`. Defaults to `755`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
2525
2626 <br>
2727
2828 ### owner
2929
30 Username of the directory's owner. Defaults to `None` (don't care about owner).
30 Username of the directory's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
3131
3232 <br>
3333
6363
6464 ### group
6565
66 Name of the group this file belongs to. Defaults to `None` (don't care about group).
66 Name of the group this file belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
6767
6868 <br>
6969
7070 ### mode
7171
72 File mode as returned by `stat -c %a <file>`. Defaults to `None` (don't care about mode).
72 File mode as returned by `stat -c %a <file>`. Defaults to `644`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
7373
7474 <br>
7575
7676 ### owner
7777
78 Username of the file's owner. Defaults to `None` (don't care about owner).
78 Username of the file's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
7979
8080 <br>
8181
33
44 svc_systemd = {
55 "fcron.service": {
6 "enabled": True,
6 "enabled": True, # default
77 "running": True, # default
88 },
99 "sgopherd.socket": {
2121
2222 ### enabled
2323
24 `True` if the service shall be automatically started during system bootup; `False` otherwise. `None`, the default value, makes BundleWrap ignore this setting.
24 `True` if the service shall be automatically started during system bootup; `False` otherwise. `None` makes BundleWrap ignore this setting.
2525
2626 <br>
2727
2323
2424 ### group
2525
26 Name of the group this symlink belongs to. Defaults to `root`. Defaults to `None` (don't care about group).
26 Name of the group this symlink belongs to. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
2727
2828 <br>
2929
3030 ### owner
3131
32 Username of the symlink's owner. Defaults to `root`. Defaults to `None` (don't care about owner).
32 Username of the symlink's owner. Defaults to `'root'`. Set to `None` if you don't want BundleWrap to change whatever is set on the node.
+0
-60
docs/content/misc/alternatives.md less more
0 # Alternatives
1
2 <div class="alert alert-info">This page is an effort to compare BundleWrap to other config management systems. It very hard to keep this information complete and up to date, so please feel free to raise issues or create pull requests if something is amiss.</div>
3
4 BundleWrap has the following properties that are unique to it or at least not common among other solutions:
5
6 * server- and agent-less architecture
7 * item-level parallelism to speed up convergence of complex nodes
8 * interactive mode to review configuration as it it being applied
9 * [Mako file templates](../items/file_templates)
10 * verifies that each action taken actually fixed the item in question
11 * verify mode to assess the state of your configuration without mutating it
12 * useful and actionable error messages
13 * can apply actions (and other items) prior to fixing an item (and only then)
14 * built-in visualization of node configuration
15 * nice [Python API](../guide/api.md)
16 * designed to be mastered quickly and easily remembered
17 * for better or worse: no commercial agenda/support
18 * no support for non-Linux target nodes (BundleWrap itself can be run from Mac OS as well)
19
20
21 ## Ansible
22
23 [Ansible](http://ansible.com>) is very similar to BundleWrap in how it communicates with nodes. Both systems do not use server or agent processes, but SSH. Ansible can optionally use OpenSSH instead of a Python SSH implementation to speed up performance. On the other hand, BundleWrap will always use the Python implementation, but with multiple connections to each node. This should give BundleWrap a performance advantage on very complex systems with many items, since each connection can work on a different item simultaneously.
24
25 To apply configuration, Ansible uploads pieces of code called modules to each node and runs them there. Many Ansible modules depend on the node having a Python 2.x interpreter installed. In some cases, third-party Python libraries are needed as well, increasing the footprint on the node. BundleWrap runs commands on the target node just as you would in an interactive SSH session. Most of the [commands needed](../guide/installation.md#requirements-for-managed-systems) by BundleWrap are provided by coreutils and should be present on all standard Linux systems.
26
27 Ansible ships with loads of modules while BundleWrap will only give you the most needed primitives to work with. For example, we will not add an item type for remote downloads because you can easily build that yourself using an [action](../items/action.md) with `wget`.
28
29 Ansible's playbooks roughly correspond to BundleWrap's bundles, but are written in YAML using a special playbook language. BundleWrap uses Python for this purpose, so if you know some basic Python you only need to learn the schema of the dictionaries you're building. This also means that you will never run into a problem the playbook language cannot solve. Anything you can do in Python, you can do in BundleWrap.
30
31 While you can automate application deployments in BundleWrap, Ansible is much more capable in that regard as it combines config management and sophisticated deployment mechanisms (multi-stage, rolling updates).
32
33 File templates in Ansible are [Jinja2](http://jinja2.pocoo.org), while BundleWrap offers both [Mako](http://makotemplates.org>) and Jinja2.
34
35 Ansible, Inc. offers paid support for Ansible and an optional web-based addon called [Ansible Tower](http://ansible.com/tower). No such offerings are available for BundleWrap.
36
37
38 BCFG2
39 -----
40
41 BCFG2's bundles obviously were an inspiration for BundleWrap. One important difference is that BundleWrap's bundles are usually completely isolated and self-contained within their directory while BCFG2 bundles may need resources (e.g. file templates) from elsewhere in the repository.
42
43 On a practical level BundleWrap prefers pure Python and Mako over the XML- and text-variants of Genshi used for bundle and file templating in BCFG2.
44
45 And of course BCFG2 has a very traditional client/server model while BundleWrap runs only on the operators computer.
46
47
48 Chef
49 ----
50
51 [Chef](http://www.getchef.com) has basically two modes of operation: The most widely used one involves a server component and the `chef-client` agent. The second option is `chef-solo`, which will apply configuration from a local repository to the node the repository is located on. BundleWrap supports neither of these modes and always applies configuration over SSH.
52
53 Overall, Chef is harder to get into, but will scale to thousands of nodes.
54
55 The community around Chef is quite large and probably the largest of all config management systems. This means lots of community-maintained cookbooks to choose from. BundleWrap does have a [plugin system](../repo/plugins.md) to provide almost anything in a repository, but there aren't many plugins to choose from yet.
56
57 Chef is written in Ruby and uses the popular [ERB](http://www.kuwata-lab.com/erubis/) template language. BundleWrap is heavily invested in Python and offers support for Mako and Jinja2 templates.
58
59 OpsCode offers paid support for Chef and SaaS hosting for the server component. [AWS OpsWorks](http://aws.amazon.com/opsworks/) also integrates Chef cookbooks.
5252 While it sounds scary, Copyright assignment is used to improve the enforceability of the GPL. Even the FSF does it, [read their explanation why](http://www.gnu.org/licenses/why-assign.html). The agreement used by BundleWrap is from [harmonyagreements.org](http://harmonyagreements.org).
5353
5454 If you're still concerned, please do not hesitate to contact [@trehn](https://twitter.com/trehn).
55
56 <br>
57
58 ### Isn't this all very similar to Ansible?
59
60 Some parts are, but there are significant differences as well. Check out the [alternatives page](alternatives.md#ansible) for a writeup of the details.
61
62 <br>
273273
274274 # metadata.py
275275
276 Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. To do that, you simply write any number of functions whose name doesn't start with an underscore and put them into `metadata.py`.
277
278 <div class="alert alert-warning">Understand that <strong>any</strong> function will be used as a metadata processor, unless its name starts with an underscore. This is also true for imported functions, so you'll need to import them like this: <code>from module import func as _func</code>.</div>
279
280 These functions take the metadata dictionary generated so far as their single argument. You must then return the same dictionary with any modifications you need to make. These functions are called metadata processors. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until no more changes are made to the metadata. Here's an example for how a `metadata.py` could look like (note that you have access to `repo` and `node` just like in `items.py`):
281
276 Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated.
277
278 This is accomplished through metadata processors. Metadata processors are functions that take the metadata dictionary generated so far as their single argument. You must then return a dictionary with any modifications you need to make plus at least one of several options:
279
280 @metadata_processor
282281 def my_metadata_processor(metadata):
283282 metadata["foo"] = node.name
284 return metadata
285
286 <div class="alert alert-danger">To avoid deadlocks when accessing <strong>other</strong> nodes' metadata from within a metadata processor, use <code>other_node.partial_metadata</code> instead of <code>other_node.metadata</code>. For the same reason, always use the <code>metadata</code> parameter to access the current node's metadata, never <code>node.metadata</code>.</div>
287
288 To improve performance, you can optionally return a tuple instead, with the first element being your metadata dict and the second one being a boolean indicating whether this metadata processor has done its work for this particular node and need not be run again:
289
290 def my_metadata_processor(metadata):
291 metadata["foo"] = node.name
292 return metadata, True
293
294 The example above is a typical case of a metadata processor that only needs to be run once: it always does the same thing anyway. If you depend on other metadata processors, you have to return `False` (or just the dict):
295
283 return metadata, DONE
284
285 You must always return the modified metadata dictionary as the first element. After that, there are a few options you can return. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until it indicates that it is done by returning the `DONE` option or until *all* remaining metadata processors return `RUN_ME_AGAIN`. You must always return one of `DONE` or `RUN_ME_AGAIN`. Use the latter if your metadata processor depends on metadata that is generated by another metadata processor (which may be called after yours). Here is another example:
286
287 @metadata_processor
296288 def first_metadata_processor(metadata):
297289 metadata["foo"] = node.name
298 return metadata, True
299
300
290 return metadata, DONE
291
292 @metadata_processor
301293 def second_metadata_processor(metadata):
302294 if "foo" in metadata:
303295 metadata["bar"] = metadata["foo"]
304 # our job is done, returning True
305 return metadata, True
296 return metadata, DONE
306297 else:
307 # return False, so we get called again
308 return metadata, False
309
310 In this example, `second_metadata_processor` might be called before `first_metadata_processor`. But it can't do its job without `metadata["foo"]`, so it needs to be called again until it has become available (because `first_metadata_processor` has been called in the meantime).
298 return metadata, RUN_ME_AGAIN
299
300 In this example, `"bar"` can only be set once `"foo"` is available and thus the `second_metadata_processor` has to wait and request to `RUN_ME_AGAIN` until `first_metadata_processor` ran. This is necessary because the running order of metadata processors is undefined.
301
302 <div class="alert alert-danger">To avoid deadlocks when accessing <strong>other</strong> nodes' metadata from within a metadata processor, use <code>other_node.partial_metadata</code> instead of <code>other_node.metadata</code>. For the same reason, always use the <code>metadata</code> parameter to access the current node's metadata, never <code>node.metadata</code>.</div>
303
304 Available options:
305
306 <table>
307 <tr><th>Option</th><th>Description</th></tr>
308 <tr><td><code>DONE</code></td><td>Indicates that this metadata processor has done all it can and need not be called again. Return this whenever possible.</td></tr>
309 <tr><td><code>RUN_ME_AGAIN</code></td><td>Indicates that this metadata processor is still waiting for metadata from another metadata processor to become available.</td></tr>
310 <tr><td><code>DEFAULTS</code></td><td>The returned metadata dictionary will only be used to provide default values. The actual metadata generated so far will be recursively merged into the returned dict.</td></tr>
311 <tr><td><code>OVERWRITE</code></td><td>The returned metadata dictionary will be recursively merged into the actual metadata generated so far (inverse of <code>DEFAULTS</code>).</td></tr>
312 </table>
313
314 Here is an example of how to use `DEFAULTS`:
315
316 @metadata_processor
317 def my_metadata_processor(metadata):
318 return {
319 "foo": {
320 "bar": 47,
321 },
322 }, DONE, DEFAULTS
323
324 This means `node.metadata["foo"]["bar"]` will be 47 by default, but can also be overridden in static metadata at the node/group level.
325
326 For your convenience, you can access `repo`, `node`, `metadata_processor` and all the options in `metadata.py` without importing them.
8888
8989 Cannot be set at group level.
9090
91
92 ### magic_number
93
94 A large number derived from the node's name. This number is very likely to be unique for your entire repository. You can, for example, use this number to easily "jitter" cronjobs:
95
96 '{} {} * * * root /my/script'.format(
97 node.magic_number % 60,
98 node.magic_number % 2 + 4,
99 )
100
91 <br>
10192
10293 ### metadata
10394
1919 - Python API: guide/api.md
2020 - OS compatibility: guide/os_compatibility.md
2121 - Migrating to 2.0: guide/migrate_12.md
22 - Migrating to 3.0: guide/migrate_23.md
2223 - Repository:
2324 - Overview: repo/layout.md
2425 - nodes.py: repo/nodes.py.md
5253 - About: misc/about.md
5354 - Glossary: misc/glossary.md
5455 - FAQ: misc/faq.md
55 - Alternatives: misc/alternatives.md
5656 - Contributing: misc/contributing.md
1515
1616 setup(
1717 name="bundlewrap",
18 version="2.20.0",
18 version="3.0.1",
1919 description="Config management with Python",
2020 long_description=(
2121 "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n"
00 # -*- coding: utf-8 -*-
11 from __future__ import unicode_literals
2 from os.path import join
2 from os.path import exists, join
33
44 from bundlewrap.utils.testing import host_os, make_repo, run
55
4040 with open(join(str(tmpdir), "file")) as f:
4141 content = f.read()
4242 assert content == "1\n2\n3\n"
43
44
45 def test_precedes_unless(tmpdir):
46 make_repo(
47 tmpdir,
48 bundles={
49 "test": {
50 'files': {
51 join(str(tmpdir), "file"): {
52 'content': "1\n",
53 'triggered': True,
54 'precedes': ["tag:tag1"],
55 },
56 },
57 'actions': {
58 "action2": {
59 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")),
60 'tags': ["tag1"],
61 'unless': 'true',
62 },
63 "action3": {
64 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")),
65 'tags': ["tag1"],
66 'needs': ["action:action2"],
67 },
68 },
69 },
70 },
71 nodes={
72 "localhost": {
73 'bundles': ["test"],
74 'os': host_os(),
75 },
76 },
77 )
78 run("bw apply localhost", path=str(tmpdir))
79 with open(join(str(tmpdir), "file")) as f:
80 content = f.read()
81 assert content == "1\n3\n"
82
83
84 def test_precedes_unless2(tmpdir):
85 make_repo(
86 tmpdir,
87 bundles={
88 "test": {
89 'files': {
90 join(str(tmpdir), "file"): {
91 'content': "1\n",
92 'triggered': True,
93 'precedes': ["tag:tag1"],
94 },
95 },
96 'actions': {
97 "action2": {
98 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")),
99 'tags': ["tag1"],
100 'unless': 'true',
101 },
102 "action3": {
103 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")),
104 'tags': ["tag1"],
105 'needs': ["action:action2"],
106 'unless': 'true',
107 },
108 },
109 },
110 },
111 nodes={
112 "localhost": {
113 'bundles': ["test"],
114 'os': host_os(),
115 },
116 },
117 )
118 run("bw apply localhost", path=str(tmpdir))
119 assert not exists(join(str(tmpdir), "file"))
120
121
122 def test_precedes_unless3(tmpdir):
123 make_repo(
124 tmpdir,
125 bundles={
126 "test": {
127 'files': {
128 join(str(tmpdir), "file"): {
129 'content': "1\n",
130 'triggered': True,
131 'precedes': ["tag:tag1"],
132 'unless': 'true',
133 },
134 },
135 'actions': {
136 "action2": {
137 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")),
138 'tags': ["tag1"],
139 },
140 "action3": {
141 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")),
142 'tags': ["tag1"],
143 'needs': ["action:action2"],
144 },
145 },
146 },
147 },
148 nodes={
149 "localhost": {
150 'bundles': ["test"],
151 'os': host_os(),
152 },
153 },
154 )
155 run("bw apply localhost", path=str(tmpdir))
156 with open(join(str(tmpdir), "file")) as f:
157 content = f.read()
158 assert content == "2\n3\n"
159
160
161 def test_precedes_unless4(tmpdir):
162 make_repo(
163 tmpdir,
164 bundles={
165 "test": {
166 'files': {
167 join(str(tmpdir), "file"): {
168 'content': "1\n",
169 'triggered': True,
170 'precedes': ["action:action3"],
171 },
172 },
173 'actions': {
174 "action2": {
175 'command': "false",
176 'needs': ["file:{}".format(join(str(tmpdir), "file"))],
177 },
178 "action3": {
179 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")),
180 'needs': ["action:action2"],
181 },
182 },
183 },
184 },
185 nodes={
186 "localhost": {
187 'bundles': ["test"],
188 'os': host_os(),
189 },
190 },
191 )
192 run("bw apply localhost", path=str(tmpdir))
193 with open(join(str(tmpdir), "file")) as f:
194 content = f.read()
195 assert content == "1\n"
196
197
198 def test_precedes_action(tmpdir):
199 make_repo(
200 tmpdir,
201 bundles={
202 "test": {
203 'actions': {
204 "action1": {
205 'command': "echo 1 > {}".format(join(str(tmpdir), "file")),
206 'precedes': ["action:action2"],
207 'triggered': True,
208 },
209 "action2": {
210 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")),
211 },
212 },
213 },
214 },
215 nodes={
216 "localhost": {
217 'bundles': ["test"],
218 'os': host_os(),
219 },
220 },
221 )
222 run("bw apply localhost", path=str(tmpdir))
223 with open(join(str(tmpdir), "file")) as f:
224 content = f.read()
225 assert content == "1\n2\n"
3030 },
3131 }
3232 """)
33 stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir))
34 assert stdout == b"""group1: node1
35 group2: node2, node3
36 group3: node1, node3
37 group4: node3
33 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir))
34 assert stdout == b"""group1\tnode1
35 group2\tnode2,node3
36 group3\tnode1,node3
37 group4\tnode3
3838 """
3939 assert stderr == b""
4040 assert rcode == 0
6969 },
7070 }
7171 """)
72 stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir))
73 assert stdout == b"""group1: node3, node4
74 group2: node4
75 group3: node1, node2, node3, node4
76 group4: node1, node3, node4
72 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir))
73 assert stdout == b"""group1\tnode3,node4
74 group2\tnode4
75 group3\tnode1,node2,node3,node4
76 group4\tnode1,node3,node4
7777 """
7878 assert stderr == b""
7979 assert rcode == 0
101101 },
102102 }
103103 """)
104 stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir))
105 assert stdout == b"""group1: node1
106 group2: node2
104 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2 nodes", path=str(tmpdir))
105 assert stdout == b"""group1\tnode1
106 group2\tnode2
107107 """
108108 assert stderr == b""
109109 assert rcode == 0
136136 },
137137 }
138138 """)
139 stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir))
140 assert stdout == b"""group1: node1, node2
141 group2: node2
142 group3: node2
139 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir))
140 assert stdout == b"""group1\tnode1,node2
141 group2\tnode2
142 group3\tnode2
143143 """
144144 assert stderr == b""
145145 assert rcode == 0
183183 },
184184 }
185185 """)
186 stdout, stderr, rcode = run("bw groups -n", path=str(tmpdir))
187 assert stdout == b"""inner_group: node_NOT_in_group, node_in_group
188 intermediate_group: node_in_group
189 super_group: node_in_group
186 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i inner_group,intermediate_group,intermediate_group nodes", path=str(tmpdir))
187 assert stdout == b"""inner_group\tnode_NOT_in_group,node_in_group
188 intermediate_group\tnode_in_group
189 intermediate_group\tnode_in_group
190190 """
191191 assert stderr == b""
192192 assert rcode == 0
5151 'files': {
5252 "/test": {
5353 'content': "${node.name}",
54 'group': None, # BSD has a different default and we don't want to
55 # deal with that here
5456 },
5557 },
5658 },
6466 hashes.add(stdout.strip())
6567
6668 assert len(hashes) == 1
67 assert hashes.pop() == b"8c155b4e7056463eb2c8a8345f4f316f6d7359f6"
69 assert hashes.pop() == b"2203e7acc35608bbff471c023b7b7498e5b385d9"
6870
6971
7072 def test_dict(tmpdir):
8082 'files': {
8183 "/test": {
8284 'content': "yes please",
85 'group': None, # BSD has a different default and we don't want to
86 # deal with that here
8387 },
8488 },
8589 },
8892
8993 stdout, stderr, rcode = run("bw hash -d", path=str(tmpdir))
9094 assert rcode == 0
91 assert stdout == b"8ab35c696b63a853ccf568b27a50e24a69964487 node1\n"
95 assert stdout == b"93e7a2c6e8cdc71fb4df5426bc0d0bb978d84381 node1\n"
9296
9397 stdout, stderr, rcode = run("bw hash -d node1", path=str(tmpdir))
9498 assert rcode == 0
95 assert stdout == b"503583964eadabacb18fda32cc9fb1e9f66e424b file:/test\n"
99 assert stdout == b"59d1a7c79640ccdfd3700ab141698a9389fcd0b7 file:/test\n"
96100
97101 stdout, stderr, rcode = run("bw hash -d node1 file:/test", path=str(tmpdir))
98102 assert rcode == 0
99103 assert stdout == (
100104 b"content_hash\tc05a36d547e2b1682472f76985018038d1feebc5\n"
105 b"mode\t0644\n"
106 b"owner\troot\n"
101107 b"type\tfile\n"
102108 )
103109
2323 },
2424 )
2525
26 stdout, stderr, rcode = run("bw items -f /test node1", path=str(tmpdir))
26 stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir))
2727 assert stdout == "föö".encode('utf-8') # our output is always utf-8
2828 assert rcode == 0
2929
7676 },
7777 )
7878
79 stdout, stderr, rcode = run("bw items -f /test node1", path=str(tmpdir))
79 stdout, stderr, rcode = run("bw items -f node1 file:/test", path=str(tmpdir))
8080 assert rcode == 1
8181
8282
9090 )
9191 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
9292 f.write(
93 """def foo(metadata):
93 """@metadata_processor
94 def foo(metadata):
9495 metadata["baz"] = node.name
95 return metadata
96 return metadata, DONE
9697 """)
9798 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
9899 assert loads(stdout.decode()) == {
103104 assert rcode == 0
104105
105106
106 def test_metadatapy_loop(tmpdir):
107 def test_metadatapy_defaults(tmpdir):
107108 make_repo(
108109 tmpdir,
109110 bundles={"test": {}},
110111 nodes={
111112 "node1": {
112113 'bundles': ["test"],
113 'metadata': {"foo": 1},
114 'metadata': {"foo": "bar"},
114115 },
115116 },
116117 )
117118 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
118119 f.write(
119 """def foo(metadata):
120 metadata["foo"] += 1
121 return metadata
120 """@metadata_processor
121 def foo(metadata):
122 return {
123 "foo": "baz",
124 "baz": "foo",
125 }, DONE, DEFAULTS
122126 """)
123127 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
124 assert rcode == 1
128 assert loads(stdout.decode()) == {
129 "baz": "foo",
130 "foo": "bar",
131 }
132 assert stderr == b""
133 assert rcode == 0
134
135
136 def test_metadatapy_update(tmpdir):
137 make_repo(
138 tmpdir,
139 bundles={"test": {}},
140 nodes={
141 "node1": {
142 'bundles': ["test"],
143 'metadata': {"foo": "bar"},
144 },
145 },
146 )
147 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
148 f.write(
149 """@metadata_processor
150 def foo(metadata):
151 return {
152 "foo": "baz",
153 "baz": "foo",
154 }, DONE, OVERWRITE
155 """)
156 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
157 assert loads(stdout.decode()) == {
158 "baz": "foo",
159 "foo": "baz",
160 }
161 assert stderr == b""
162 assert rcode == 0
125163
126164
127165 def test_table(tmpdir):
0 from os.path import join
1
20 from bundlewrap.utils.testing import make_repo, run
31
42
1917
2018
2119 def test_hostname(tmpdir):
22 make_repo(tmpdir, nodes={"node1": {'hostname': "node1.example.com"}})
23 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes --attrs | grep '\thostname' | cut -f 3", path=str(tmpdir))
24 assert stdout == b"node1.example.com\n"
25 assert stderr == b""
26 assert rcode == 0
27
28
29 def test_inline(tmpdir):
3020 make_repo(
3121 tmpdir,
32 nodes={
33 "node1": {
34 'bundles': ["bundle1", "bundle2"],
35 },
36 "node2": {
37 'bundles': ["bundle1"],
38 },
39 },
40 bundles={
41 "bundle1": {},
42 "bundle2": {},
43 },
22 groups={"all": {'members': ["node1"]}},
23 nodes={"node1": {'hostname': "node1.example.com"}},
4424 )
45 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -ai | grep '\tbundles' | grep bundle2 | cut -f 1", path=str(tmpdir))
46 assert stdout == b"node1\n"
47 assert stderr == b""
48 assert rcode == 0
49
50 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -ai | grep '\tbundles' | grep -v bundle2 | cut -f 1", path=str(tmpdir))
51 assert stdout == b"node2\n"
52 assert stderr == b""
53 assert rcode == 0
54
55
56 def test_in_group(tmpdir):
57 make_repo(
58 tmpdir,
59 groups={
60 "group1": {
61 'members': ["node2"],
62 },
63 },
64 nodes={
65 "node1": {},
66 "node2": {},
67 },
68 )
69 stdout, stderr, rcode = run("bw nodes -g group1", path=str(tmpdir))
70 assert stdout == b"node2\n"
25 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all hostname | cut -f 2", path=str(tmpdir))
26 assert stdout == b"node1.example.com\n"
7127 assert stderr == b""
7228 assert rcode == 0
7329
7935 "bundle1": {},
8036 "bundle2": {},
8137 },
38 groups={"all": {'members': ["node1", "node2"]}},
8239 nodes={
8340 "node1": {'bundles': ["bundle1", "bundle2"]},
8441 "node2": {'bundles': ["bundle2"]},
8542 },
8643 )
87 stdout, stderr, rcode = run("bw nodes --bundles", path=str(tmpdir))
88 assert stdout.decode().strip().split("\n") == [
89 "node1: bundle1, bundle2",
90 "node2: bundle2",
91 ]
44 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all bundles | grep node1 | cut -f 2", path=str(tmpdir))
45 assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2"]
9246 assert stderr == b""
9347 assert rcode == 0
94
95
96 def test_groups(tmpdir):
97 make_repo(
98 tmpdir,
99 groups={
100 "group1": {
101 'members': ["node2"],
102 },
103 "group2": {
104 'members': ["node1"],
105 },
106 "group3": {
107 'subgroup_patterns': ["p2"],
108 },
109 "group4": {
110 'subgroups': ["group1"],
111 },
112 },
113 nodes={
114 "node1": {},
115 "node2": {},
116 },
117 )
118 stdout, stderr, rcode = run("bw nodes --groups", path=str(tmpdir))
119 assert stdout.decode().strip().split("\n") == [
120 "node1: group2, group3",
121 "node2: group1, group4",
122 ]
123 assert stderr == b""
124 assert rcode == 0
125
126
127 def test_group_members_remove_bundle(tmpdir):
128 make_repo(
129 tmpdir,
130 bundles={
131 "bundle1": {},
132 "bundle2": {},
133 },
134 nodes={
135 "node1": {},
136 "node2": {},
137 },
138 )
139 with open(join(str(tmpdir), "groups.py"), 'w') as f:
140 f.write("""
141 groups = {
142 "group1": {
143 'bundles': ["bundle1"],
144 'members': ["node1", "node2"],
145 },
146 "group2": {
147 'bundles': ["bundle1", "bundle2"],
148 'members': ["node1", "node2"],
149 'members_remove': lambda node: node.name == "node2",
150 },
151 }
152 """)
153 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -a node1 | grep \tbundles | cut -f 3", path=str(tmpdir))
154 assert stdout == b"bundle1\nbundle2\n"
155 assert stderr == b""
156 assert rcode == 0
157
158 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes -a node2 | grep \tbundles | cut -f 3", path=str(tmpdir))
159 assert stdout == b"bundle1\n"
160 assert stderr == b""
161 assert rcode == 0
66 make_repo(tmpdir)
77 stdout, stderr, rcode = run("bw test", path=str(tmpdir))
88 assert stdout == b""
9 assert stderr == b""
10 assert rcode == 0
911
1012
1113 def test_bundle_not_found(tmpdir):
3638 def test_node(repo, node, **kwargs):
3739 io.stdout("BBB")
3840 """)
39 assert b"AAA" in run("bw test", path=str(tmpdir))[0]
40 assert b"BBB" in run("bw test", path=str(tmpdir))[0]
41 assert b"AAA" in run("bw test -H", path=str(tmpdir))[0]
42 assert b"BBB" in run("bw test -J", path=str(tmpdir))[0]
4143
4244
4345 def test_circular_dep_direct(tmpdir):
6163 },
6264 },
6365 )
64 assert run("bw test", path=str(tmpdir))[2] == 1
66 assert run("bw test -I", path=str(tmpdir))[2] == 1
6567
6668
6769 def test_circular_dep_indirect(tmpdir):
8890 },
8991 },
9092 )
91 assert run("bw test", path=str(tmpdir))[2] == 1
93 assert run("bw test -I", path=str(tmpdir))[2] == 1
9294
9395
9496 def test_circular_dep_self(tmpdir):
109111 },
110112 },
111113 )
112 assert run("bw test", path=str(tmpdir))[2] == 1
114 assert run("bw test -I", path=str(tmpdir))[2] == 1
113115
114116
115117 def test_circular_trigger_self(tmpdir):
130132 },
131133 },
132134 )
133 assert run("bw test", path=str(tmpdir))[2] == 1
135 assert run("bw test -I", path=str(tmpdir))[2] == 1
134136
135137
136138 def test_file_invalid_attribute(tmpdir):
151153 },
152154 },
153155 )
154 assert run("bw test", path=str(tmpdir))[2] == 1
156 assert run("bw test -I", path=str(tmpdir))[2] == 1
155157
156158
157159 def test_file_template_error(tmpdir):
173175 },
174176 },
175177 )
176 assert run("bw test", path=str(tmpdir))[2] == 1
178 assert run("bw test -I", path=str(tmpdir))[2] == 1
177179
178180
179181 def test_group_loop(tmpdir):
191193 },
192194 },
193195 )
194 assert run("bw test", path=str(tmpdir))[2] == 1
196 assert run("bw test -S", path=str(tmpdir))[2] == 1
195197
196198
197199 def test_group_metadata_collision(tmpdir):
222224 },
223225 },
224226 )
225 assert run("bw test", path=str(tmpdir))[2] == 1
227 assert run("bw test -M", path=str(tmpdir))[2] == 1
226228
227229
228230 def test_group_metadata_collision_subgroups(tmpdir):
253255 },
254256 },
255257 )
256 assert run("bw test", path=str(tmpdir))[2] == 0
258 assert run("bw test -M", path=str(tmpdir))[2] == 0
257259
258260
259261 def test_group_metadata_collision_list(tmpdir):
275277 },
276278 },
277279 )
278 assert run("bw test", path=str(tmpdir))[2] == 1
280 assert run("bw test -M", path=str(tmpdir))[2] == 1
279281
280282
281283 def test_group_metadata_collision_dict(tmpdir):
297299 },
298300 },
299301 )
300 assert run("bw test", path=str(tmpdir))[2] == 1
302 assert run("bw test -M", path=str(tmpdir))[2] == 1
301303
302304
303305 def test_group_metadata_collision_dict_ok(tmpdir):
319321 },
320322 },
321323 )
322 assert run("bw test", path=str(tmpdir))[2] == 0
324 assert run("bw test -M", path=str(tmpdir))[2] == 0
323325
324326
325327 def test_group_metadata_collision_set(tmpdir):
341343 },
342344 },
343345 )
344 assert run("bw test", path=str(tmpdir))[2] == 1
346 assert run("bw test -M", path=str(tmpdir))[2] == 1
345347
346348
347349 def test_group_metadata_collision_set_ok(tmpdir):
363365 },
364366 },
365367 )
366 assert run("bw test", path=str(tmpdir))[2] == 0
368 assert run("bw test -M", path=str(tmpdir))[2] == 0
367369
368370
369371 def test_fault_missing(tmpdir):
385387 },
386388 },
387389 )
388 assert run("bw test", path=str(tmpdir))[2] == 1
389 assert run("bw test --ignore-missing-faults", path=str(tmpdir))[2] == 0
390 assert run("bw test -I", path=str(tmpdir))[2] == 1
391 assert run("bw test -iI", path=str(tmpdir))[2] == 0
390392
391393
392394 def test_metadata_determinism_ok(tmpdir):
402404 },
403405 )
404406 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
405 f.write("""
407 f.write("""@metadata_processor
406408 def test(metadata):
407409 metadata['test'] = 1
408 return metadata
410 return metadata, DONE
409411 """)
410412 assert run("bw test -m 3", path=str(tmpdir))[2] == 0
411413
423425 },
424426 )
425427 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
426 f.write("""from random import randint as _randint
427
428 f.write("""from random import randint
429
430 @metadata_processor
428431 def test(metadata):
429 metadata.setdefault('test', _randint(1, 99999))
430 return metadata
432 metadata.setdefault('test', randint(1, 99999))
433 return metadata, DONE
431434 """)
432435 assert run("bw test -m 3", path=str(tmpdir))[2] == 1
433436
492495 assert run("bw test group2", path=str(tmpdir))[2] == 1
493496
494497
498 def test_empty_group(tmpdir):
499 make_repo(
500 tmpdir,
501 nodes={
502 "node1": {},
503 },
504 groups={
505 "group1": {},
506 "group2": {'members': ["node1"]},
507 },
508 )
509 assert run("bw test", path=str(tmpdir))[2] == 0
510 assert run("bw test -e", path=str(tmpdir))[2] == 1
511
512
495513 def test_group_user_dep_deleted(tmpdir):
496514 make_repo(
497515 tmpdir,
515533 },
516534 },
517535 )
518 assert run("bw test", path=str(tmpdir))[2] == 1
536 assert run("bw test -I", path=str(tmpdir))[2] == 1
519537
520538
521539 def test_group_user_dep_ok(tmpdir):
538556 },
539557 },
540558 )
541 assert run("bw test", path=str(tmpdir))[2] == 0
559 assert run("bw test -I", path=str(tmpdir))[2] == 0
542560
543561
544562 def test_group_user_dep_deleted_gid(tmpdir):
564582 },
565583 },
566584 )
567 assert run("bw test", path=str(tmpdir))[2] == 1
585 assert run("bw test -I", path=str(tmpdir))[2] == 1
7878 def test_format_password(tmpdir):
7979 make_repo(tmpdir)
8080
81 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.format(\"format: {}\", repo.vault.password_for(\"testing\")))'", path=str(tmpdir))
81 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir))
8282 assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n"
8383 assert stderr == b""
8484 assert rcode == 0