Codebase list bundlewrap / bd807d2
Merge previous changes Jonathan Carter 3 years ago
126 changed file(s) with 3645 addition(s) and 3554 deletion(s). Raw diff Collapse all Expand all
00 language: python
11 python:
2 - 2.7
3 - 3.5
42 - 3.6
53 - 3.7
64 - 3.8
7 dist: bionic
8 services:
9 - postgresql
5 dist: focal
6 addons:
7 postgresql: "12"
8 apt:
9 packages:
10 - postgresql-12
1011 install:
1112 - pip install .
1213 before_script:
0 # 3.10.0
1
2 2020-05-17
3
4 * added metadata defaults and reactors
0 # 4.0.0
1
2 2020-06-22
3
4 * new metadata processor API (BACKWARDS INCOMPATIBLE)
5 * removed `template_node` node attribute (BACKWARDS INCOMPATIBLE)
6 * removed support for Python 2.7 (BACKWARDS INCOMPATIBLE)
7 * removed support for Python 3.4 (BACKWARDS INCOMPATIBLE)
8 * removed support for Python 3.5 (BACKWARDS INCOMPATIBLE)
9 * removed `members_add/remove` attribute for groups (BACKWARDS INCOMPATIBLE)
10 * removed `bw --adhoc-nodes` (BACKWARDS INCOMPATIBLE)
11 * added `locking_node` node attribute
512 * added `bw diff`
13 * added `bw metadata -b`
14 * added `bw metadata --hide-defaults`
15 * added `bw metadata --hide-reactors`
16 * added `bw metadata --hide-groups`
17 * added `bw metadata --hide-node`
18 * added `git_deploy` items (formerly a plugin)
19 * added paging and color-coding for metadata sources to `bw metadata`
20 * removed `bw metadata --table`, now done automatically (BACKWARDS INCOMPATIBLE)
21 * removed `bw repo plugin` (BACKWARDS INCOMPATIBLE)
22 * removed `bw test --secret-rotation` (BACKWARDS INCOMPATIBLE)
23 * renamed `bw test --metadata-collisions` to `bw test --metadata-conflicts` (BACKWARDS INCOMPATIBLE)
24 * reworked passing multi-value options on CLI (BACKWARDS INCOMPATIBLE)
25 * `bw apply` will now exit with return code 1 if even a single item fails
626 * `items/` is now searched recursively
27 * failed items will now show what commands they ran and what their output was
728
829
930 # 3.9.0
2525
2626 ------------------------------------------------------------------------
2727
28 BundleWrap is © 2013 - 2019 [Torsten Rehn](mailto:torsten@rehn.email)
28 BundleWrap is © 2013 - 2020 [Torsten Rehn](mailto:torsten@rehn.email)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 VERSION = (3, 10, 0)
0 VERSION = (4, 0, 0)
41 VERSION_STRING = ".".join([str(v) for v in VERSION])
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import exists, join
41
52 from .exceptions import BundleError, NoSuchBundle, RepositoryError
6 from .metadata import DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE, DoNotRunAgain
3 from .metadata import DoNotRunAgain
74 from .utils import cached_property
85 from .utils.text import bold, mark_for_translation as _
96 from .utils.text import validate_name
1411 FILENAME_METADATA = "metadata.py"
1512
1613
17 def metadata_processor_classic(func):
18 """
19 Decorator that tags metadata processors.
20 """
21 func._is_metadata_processor = True
22 func._is_classic_metadata_processor = True
23 return func
14 def metadata_reactor_for_bundle(bundle_name):
15 reactor_names = set()
16
17 def metadata_reactor(func):
18 """
19 Decorator that tags metadata reactors.
20 """
21 if func.__name__ == "defaults":
22 raise ValueError(_(
23 "metadata reactor in bundle '{}' cannot be named 'defaults'"
24 ).format(bundle_name))
25 if func.__name__ in reactor_names:
26 raise ValueError(_(
27 "duplicate metadata reactor '{reactor}' in bundle '{bundle}'"
28 ).format(bundle=bundle_name, reactor=func.__name__))
29 reactor_names.add(func.__name__)
30 func._is_metadata_reactor = True
31 return func
32
33 return metadata_reactor
2434
2535
26 def metadata_reactor(func):
27 """
28 Decorator that tags metadata reactors.
29 """
30 func._is_metadata_processor = True
31 func._is_metadata_reactor = True
32 return func
33
34
35 class Bundle(object):
36 class Bundle:
3637 """
3738 A collection of config items, bound to a node.
3839 """
6162 if not exists(self.bundle_file):
6263 return {}
6364 else:
65 base_env={
66 'node': self.node,
67 'repo': self.repo,
68 }
69 for item_class in self.repo.item_classes:
70 base_env[item_class.BUNDLE_ATTRIBUTE_NAME] = {}
71
6472 return self.repo.get_all_attrs_from_file(
6573 self.bundle_file,
66 base_env={
67 'node': self.node,
68 'repo': self.repo,
69 },
74 base_env=base_env,
7075 )
7176
7277 @cached_property
97102 )
98103
99104 @cached_property
100 def _metadata_processors(self):
101 with io.job(_("{node} {bundle} collecting metadata processors").format(
105 def _metadata_defaults_and_reactors(self):
106 with io.job(_("{node} {bundle} collecting metadata reactors").format(
102107 node=bold(self.node.name),
103108 bundle=bold(self.name),
104109 )):
105110 if not exists(self.metadata_file):
106 return {}, set(), set()
111 return {}, set()
112
107113 defaults = {}
108114 reactors = set()
109 classic_processors = set()
110115 internal_names = set()
111116 for name, attr in self.repo.get_all_attrs_from_file(
112117 self.metadata_file,
113118 base_env={
114 'DEFAULTS': DEFAULTS,
115 'DONE': DONE,
116 'OVERWRITE': OVERWRITE,
117 'RUN_ME_AGAIN': RUN_ME_AGAIN,
118119 'DoNotRunAgain': DoNotRunAgain,
119 'metadata_processor': metadata_processor_classic,
120 'metadata_reactor': metadata_reactor,
120 'metadata_reactor': metadata_reactor_for_bundle(self.name),
121121 'node': self.node,
122122 'repo': self.repo,
123123 },
124124 ).items():
125125 if name == "defaults":
126126 defaults = attr
127 elif getattr(attr, '_is_metadata_processor', False):
127 elif getattr(attr, '_is_metadata_reactor', False):
128128 internal_name = getattr(attr, '__name__', name)
129129 if internal_name in internal_names:
130130 raise BundleError(_(
131 "Metadata processor '{name}' in bundle {bundle} for node {node} has "
131 "Metadata reactor '{name}' in bundle {bundle} for node {node} has "
132132 "__name__ '{internal_name}', which was previously used by another "
133 "metadata processor in the same metadata.py. BundleWrap uses __name__ "
134 "internally to tell metadata processors apart, so this is a problem. "
135 "Perhaps you used a decorator on your metadata processors that "
133 "metadata reactor in the same metadata.py. BundleWrap uses __name__ "
134 "internally to tell metadata reactors apart, so this is a problem. "
135 "Perhaps you used a decorator on your metadata reactors that "
136136 "doesn't use functools.wraps? You should use that."
137137 ).format(
138138 bundle=self.name,
141141 name=name,
142142 ))
143143 internal_names.add(internal_name)
144 if getattr(attr, '_is_metadata_reactor', False):
145 reactors.add(attr)
146 elif getattr(attr, '_is_classic_metadata_processor', False):
147 classic_processors.add(attr)
148 else:
149 # this should never happen
150 raise AssertionError
151 return defaults, reactors, classic_processors
144 reactors.add(attr)
145 return defaults, reactors
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from cProfile import Profile
41 from functools import wraps
52 from os import environ
63 from os.path import abspath
7 from pipes import quote
4 from shlex import quote
85 from sys import argv, exit, stderr, stdout
96 from traceback import format_exc, print_exc
107
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime
41 from sys import exit
52
2724
2825 def bw_apply(repo, args):
2926 errors = []
30 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
31 pending_nodes = target_nodes[:]
27 target_nodes = get_target_nodes(repo, args['targets'])
28 pending_nodes = target_nodes.copy()
3229
3330 io.progress_set_total(count_items(pending_nodes))
3431
3532 repo.hooks.apply_start(
3633 repo,
37 args['target'],
34 args['targets'],
3835 target_nodes,
3936 interactive=args['interactive'],
4037 )
9794 worker_pool.run()
9895
9996 total_duration = datetime.now() - start_time
97 totals = stats(results)
10098
10199 if args['summary'] and results:
102 stats_summary(results, total_duration)
100 stats_summary(results, totals, total_duration)
103101 error_summary(errors)
104102
105103 repo.hooks.apply_end(
106104 repo,
107 args['target'],
105 args['targets'],
108106 target_nodes,
109107 duration=total_duration,
110108 )
111109
112 exit(1 if errors else 0)
110 exit(1 if errors or totals['failed'] else 0)
113111
114112
115 def stats_summary(results, total_duration):
113 def stats(results):
116114 totals = {
117115 'items': 0,
118116 'correct': 0,
120118 'skipped': 0,
121119 'failed': 0,
122120 }
121 for result in results:
122 totals['items'] += result.total
123 for metric in ('correct', 'fixed', 'skipped', 'failed'):
124 totals[metric] += getattr(result, metric)
125 return totals
123126
127
128 def stats_summary(results, totals, total_duration):
124129 rows = [[
125130 bold(_("node")),
126131 _("items"),
132137 ], ROW_SEPARATOR]
133138
134139 for result in results:
135 totals['items'] += result.total
136 for metric in ('correct', 'fixed', 'skipped', 'failed'):
137 totals[metric] += getattr(result, metric)
138140 rows.append([
139141 result.node_name,
140142 str(result.total),
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from code import interact
41
52 from .. import VERSION_STRING
245245 ).format(x=red("!!!")))
246246 exit(1)
247247
248 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
248 target_nodes = get_target_nodes(repo, args['targets'])
249249
250250 if args['branch'] or args['cmd_change'] or args['cmd_reset'] or args['prompt']:
251251 intermissions = []
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from ..group import GROUP_ATTR_DEFAULTS
41 from ..utils.text import bold, mark_for_translation as _
52 from ..utils.ui import io
129
1310 def bw_groups(repo, args):
1411 if not args['groups']:
15 for group in repo.groups:
12 for group in sorted(repo.groups):
1613 io.stdout(group.name)
1714 else:
18 groups = [repo.get_group(group.strip()) for group in args['groups'].split(",")]
15 groups = {repo.get_group(group.strip()) for group in args['groups']}
1916 if not args['attrs']:
20 subgroups = set(groups)
17 subgroups = groups.copy()
2118 for group in groups:
22 subgroups = subgroups.union(group.subgroups)
19 subgroups.update(group.subgroups)
2320 for subgroup in sorted(subgroups):
2421 io.stdout(subgroup.name)
2522 else:
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from sys import exit
41
52 from ..exceptions import NoSuchGroup, NoSuchNode
3027 target = repo.get_group(args['node_or_group'])
3128 target_type = 'group'
3229 except NoSuchGroup:
33 if args['adhoc_nodes']:
34 target = repo.create_node(args['node_or_group'])
35 target_type = 'node'
36 else:
37 io.stderr(_("{x} No such node or group: {node_or_group}").format(
38 node_or_group=args['node_or_group'],
39 x=red("!!!"),
40 ))
41 exit(1)
30 io.stderr(_("{x} No such node or group: {node_or_group}").format(
31 node_or_group=args['node_or_group'],
32 x=red("!!!"),
33 ))
34 exit(1)
4235 else:
4336 if args['item']:
4437 target = get_item(target, args['item'])
5750 if args['dict']:
5851 if args['group_membership']:
5952 if target_type in ('node', 'repo'):
60 for group in target.groups:
53 for group in sorted(target.groups):
6154 io.stdout(group.name)
6255 else:
63 for node in target.nodes:
56 for node in sorted(target.nodes):
6457 io.stdout(node.name)
6558 elif args['metadata']:
66 for node in target.nodes:
59 for node in sorted(target.nodes):
6760 io.stdout("{}\t{}".format(node.name, node.metadata_hash()))
6861 else:
6962 cdict = target.cached_cdict if args['item'] else target.cdict
7164 io.stdout("REMOVE")
7265 else:
7366 for key, value in sorted(cdict.items()):
74 io.stdout("{}\t{}".format(key, value) if args['item'] else "{} {}".format(value, key))
67 io.stdout(
68 "{}\t{}".format(key, value) if args['item']
69 else "{} {}".format(value, key)
70 )
7571 else:
7672 if args['group_membership']:
7773 io.stdout(target.group_membership_hash())
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os import makedirs
41 from os.path import dirname, exists, join
52 from sys import exit
2623
2724
2825 def bw_items(repo, args):
29 node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes'])
26 node = get_node(repo, args['node'])
3027 if args['preview'] and not args['item']:
3128 io.stderr(_("{x} no ITEM given for preview").format(x=red("!!!")))
3229 exit(1)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os import environ
41
52 from ..concurrency import WorkerPool
3936
4037 def bw_lock_add(repo, args):
4138 errors = []
42 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
39 target_nodes = get_target_nodes(repo, args['targets'])
4340 target_nodes = remove_dummy_nodes(target_nodes)
4441 pending_nodes = target_nodes[:]
4542 max_node_name_length = max([len(node.name) for node in target_nodes])
5855 'kwargs': {
5956 'comment': args['comment'],
6057 'expiry': args['expiry'],
61 'item_selectors': args['items'].split(","),
58 'item_selectors': args['items'],
6259 },
6360 }
6461
9390
9491 def bw_lock_remove(repo, args):
9592 errors = []
96 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
93 target_nodes = get_target_nodes(repo, args['targets'])
9794 target_nodes = remove_dummy_nodes(target_nodes)
9895 pending_nodes = target_nodes[:]
9996 max_node_name_length = max([len(node.name) for node in target_nodes])
149146
150147 def bw_lock_show(repo, args):
151148 errors = []
152 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
149 target_nodes = get_target_nodes(repo, args['targets'])
153150 target_nodes = remove_dummy_nodes(target_nodes)
154151 pending_nodes = target_nodes[:]
155152 locks_on_node = {}
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
0 from collections import OrderedDict
1 from decimal import Decimal
2 from sys import version_info
23
3 from decimal import Decimal
4
5 from ..metadata import metadata_to_json
4 from ..metadata import deepcopy_metadata, metadata_to_json
65 from ..utils import Fault
7 from ..utils.cmdline import get_node, get_target_nodes
8 from ..utils.dicts import value_at_key_path
6 from ..utils.cmdline import get_target_nodes
7 from ..utils.dicts import (
8 delete_key_at_path,
9 replace_key_at_path,
10 value_at_key_path,
11 )
912 from ..utils.table import ROW_SEPARATOR, render_table
10 from ..utils.text import bold, force_text, mark_for_translation as _, red
13 from ..utils.text import (
14 ansi_clean,
15 blue,
16 bold,
17 force_text,
18 green,
19 mark_for_translation as _,
20 red,
21 yellow,
22 )
1123 from ..utils.ui import io, page_lines
1224
1325
26 def _color_for_source(key, source):
27 if source.startswith("metadata_defaults:"):
28 return blue(key)
29 elif source.startswith("metadata_reactor:"):
30 return green(key)
31 elif source.startswith("group:"):
32 return yellow(key)
33 elif source.startswith("node:"):
34 return red(key)
35 else:
36 return key
37
38
39 def _colorize_path(
40 metadata,
41 path,
42 sources,
43 hide_defaults,
44 hide_reactors,
45 hide_groups,
46 hide_node,
47 ):
48 if not isinstance(value_at_key_path(metadata, path), (dict, list, tuple, set)):
49 # only last source relevant for atomic types
50 sources = [sources[-1]]
51 sources_filtered = False
52 for src in sources.copy():
53 if (
54 (src.startswith("metadata_defaults:") and hide_defaults) or
55 (src.startswith("metadata_reactor:") and hide_reactors) or
56 (src.startswith("group:") and hide_groups) or
57 (src.startswith("node:") and hide_node)
58 ):
59 sources.remove(src)
60 sources_filtered = True
61 if not sources:
62 delete_key_at_path(metadata, path)
63 return None
64 elif len(sources) == 1:
65 if sources_filtered:
66 # do not colorize if a key is really mixed-source
67 colorized_key = path[-1]
68 else:
69 colorized_key = _color_for_source(path[-1], sources[0])
70 replace_key_at_path(
71 metadata,
72 path,
73 colorized_key,
74 )
75 return colorized_key
76
77
78 def _sort_dict_colorblind(old_dict):
79 if version_info < (3, 7):
80 new_dict = OrderedDict()
81 else:
82 new_dict = {}
83
84 for key in sorted(old_dict.keys(), key=lambda k: ansi_clean(k)):
85 if isinstance(old_dict[key], dict):
86 new_dict[key] = _sort_dict_colorblind(old_dict[key])
87 else:
88 new_dict[key] = old_dict[key]
89
90 return new_dict
91
92
93 def _list_starts_with(list_a, list_b):
94 """
95 Returns True if list_a starts with list_b.
96 """
97 list_a = tuple(list_a)
98 list_b = tuple(list_b)
99 try:
100 return list_a[:len(list_b)] == list_b
101 except IndexError:
102 return False
103
104
14105 def bw_metadata(repo, args):
15 if args['table']:
16 if not args['keys']:
17 io.stdout(_("{x} at least one key is required with --table").format(x=red("!!!")))
106 target_nodes = get_target_nodes(repo, args['targets'])
107 key_paths = sorted([path.strip().split("/") for path in args['keys']])
108 if len(target_nodes) > 1:
109 if not key_paths:
110 io.stdout(_("{x} at least one key is required when viewing multiple nodes").format(x=red("!!!")))
18111 exit(1)
19 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
20 key_paths = [path.strip().split(" ") for path in " ".join(args['keys']).split(",")]
21 table = [[bold(_("node"))] + [bold(" ".join(path)) for path in key_paths], ROW_SEPARATOR]
22 for node in target_nodes:
112 if args['blame']:
113 io.stdout(_("{x} blame information can only be shown for a single node").format(x=red("!!!")))
114 exit(1)
115
116 table = [[bold(_("node"))] + [bold("/".join(path)) for path in key_paths], ROW_SEPARATOR]
117 for node in sorted(target_nodes):
23118 values = []
24119 for key_path in key_paths:
25120 metadata = node.metadata
37132 table.append([bold(node.name)] + values)
38133 page_lines(render_table(table))
39134 else:
40 node = get_node(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
135 node = target_nodes.pop()
41136 if args['blame']:
42 key_paths = [path.strip() for path in " ".join(args['keys']).split(",")]
43137 table = [[bold(_("path")), bold(_("source"))], ROW_SEPARATOR]
44138 for path, blamed in sorted(node.metadata_blame.items()):
45 joined_path = " ".join(path)
139 joined_path = "/".join(path)
46140 for key_path in key_paths:
47141 if joined_path.startswith(key_path):
48142 table.append([joined_path, ", ".join(blamed)])
49143 break
50144 page_lines(render_table(table))
51145 else:
52 for line in metadata_to_json(
53 value_at_key_path(node.metadata, args['keys']),
54 ).splitlines():
55 io.stdout(force_text(line))
146 metadata = deepcopy_metadata(node.metadata)
147 blame = list(node.metadata_blame.items())
148 # sort descending by key path length since we will be replacing
149 # the keys and can't access paths beneath replaced keys anymore
150 blame.sort(key=lambda e: len(e[0]), reverse=True)
151
152 for path, blamed in blame:
153 if key_paths:
154 # remove all paths we did not ask to see
155 path_seen = False
156 for filtered_path in key_paths:
157 if (
158 _list_starts_with(path, filtered_path) or
159 _list_starts_with(filtered_path, path)
160 ):
161 path_seen = True
162 break
163 if not path_seen:
164 delete_key_at_path(metadata, path)
165 continue
166
167 colorized_key = _colorize_path(
168 metadata,
169 path,
170 blamed,
171 args['hide_defaults'],
172 args['hide_reactors'],
173 args['hide_groups'],
174 args['hide_node'],
175 )
176 for key_path in key_paths:
177 if colorized_key and list(path) == key_path[:len(path)]:
178 # we just replaced a key in the filtered path
179 key_path[len(path) - 1] = colorized_key
180
181 # now we need to recreate the dict, sorting the keys as if
182 # they were not colored (otherwise we'd end up sorted by
183 # color)
184 metadata_sorted = _sort_dict_colorblind(metadata)
185
186 page_lines([
187 force_text(line).replace("\\u001b", "\033")
188 for line in metadata_to_json(
189 metadata_sorted,
190 sort_keys=False,
191 ).splitlines()
192 ])
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os import environ
41 from sys import exit
52
118 from ..group import GROUP_ATTR_DEFAULTS
129
1310
14 NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'groups', 'hostname'])
11 NODE_ATTRS = sorted(list(GROUP_ATTR_DEFAULTS) + ['bundles', 'file_path', 'groups', 'hostname'])
1512 NODE_ATTRS_LISTS = ('bundles', 'groups')
1613
1714
2421 inline,
2522 ):
2623 rows = [[entity_label], ROW_SEPARATOR]
27 selected_attrs = [attr.strip() for attr in selected_attrs.split(",")]
28 if selected_attrs == ['all']:
24 selected_attrs = {attr.strip() for attr in selected_attrs}
25
26 if selected_attrs == {'all'}:
2927 selected_attrs = available_attrs
28 elif 'all' in selected_attrs:
29 io.stderr(_(
30 "{x} invalid attribute list requested ('all' and extraneous): {attr}"
31 ).format(x=red("!!!"), attr=", ".join(sorted(selected_attrs))))
32 exit(1)
33
3034 for attr in selected_attrs:
3135 if attr not in available_attrs:
3236 io.stderr(_("{x} unknown attribute: {attr}").format(x=red("!!!"), attr=attr))
3337 exit(1)
3438 rows[0].append(bold(attr))
39
3540 has_list_attrs = False
36 for entity in entities:
41 for entity in sorted(entities):
3742 attr_values = [[entity.name]]
3843 for attr in selected_attrs:
3944 if attr in available_attrs_lists:
6671
6772
6873 def bw_nodes(repo, args):
69 if args['target'] is not None:
70 nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
74 if args['targets']:
75 nodes = get_target_nodes(repo, args['targets'])
7176 else:
7277 nodes = repo.nodes
7378 if not args['attrs']:
74 for node in nodes:
79 for node in sorted(nodes):
7580 io.stdout(node.name)
7681 else:
7782 _attribute_table(
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from argparse import ArgumentParser, SUPPRESS
0 from argparse import ArgumentParser, RawTextHelpFormatter, SUPPRESS
41 from os import environ, getcwd
52
63 from .. import VERSION_STRING
1613 from .metadata import bw_metadata
1714 from .nodes import bw_nodes
1815 from .plot import bw_plot_group, bw_plot_node, bw_plot_node_groups
19 from .repo import bw_repo_bundle_create, bw_repo_create, bw_repo_plugin_install, \
20 bw_repo_plugin_list, bw_repo_plugin_search, bw_repo_plugin_remove, bw_repo_plugin_update
16 from .repo import bw_repo_bundle_create, bw_repo_create
2117 from .run import bw_run
2218 from .stats import bw_stats
2319 from .test import bw_test
3733 default=False,
3834 dest='add_ssh_host_keys',
3935 help=_("set StrictHostKeyChecking=no instead of yes for SSH"),
40 )
41 parser.add_argument(
42 "-A",
43 "--adhoc-nodes",
44 action='store_true',
45 default=False,
46 dest='adhoc_nodes',
47 help=_(
48 "treat unknown node names as adhoc 'virtual' nodes that receive configuration only "
49 "through groups whose member_patterns match the node name given on the command line "
50 "(which also has to be a resolvable hostname)"),
5136 )
5237 parser.add_argument(
5338 "-d",
8873
8974 # bw apply
9075 help_apply = _("Applies the configuration defined in your repository to your nodes")
91 parser_apply = subparsers.add_parser("apply", description=help_apply, help=help_apply)
76 parser_apply = subparsers.add_parser(
77 "apply",
78 description=help_apply,
79 help=help_apply,
80 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
81 )
9282 parser_apply.set_defaults(func=bw_apply)
9383 parser_apply.add_argument(
94 'target',
95 metavar=_("TARGETS"),
84 'targets',
85 metavar=_("TARGET"),
86 nargs='+',
9687 type=str,
9788 help=HELP_get_target_nodes,
9889 )
115106 parser_apply.add_argument(
116107 "-o",
117108 "--only",
118 default="",
109 default=[],
119110 dest='autoonly',
120 help=_(
121 "e.g. 'file:/foo,tag:foo,bundle:bar' "
122 "to skip EVERYTHING BUT all instances of file:/foo "
123 "and items with tag 'foo', "
124 "or in bundle 'bar', "
125 "or a dependency of any of these"
126 ),
111 help=_("""skip all items not matching any SELECTOR:
112
113 file:/my_path # this specific item
114 tag:my_tag # items with this tag
115 bundle:my_bundle # items in this bundle
116
117 dependencies of selected items will NOT be skipped
118 """),
127119 metavar=_("SELECTOR"),
120 nargs='+',
128121 type=str,
129122 )
130123 bw_apply_p_default = int(environ.get("BW_NODE_WORKERS", "4"))
150143 parser_apply.add_argument(
151144 "-s",
152145 "--skip",
153 default="",
146 default=[],
154147 dest='autoskip',
155 help=_(
156 "e.g. 'file:/foo,tag:foo,bundle:bar' "
157 "to skip all instances of file:/foo "
158 "and items with tag 'foo', "
159 "or in bundle 'bar'"
160 ),
148 help=_("""skip items matching any SELECTOR:
149
150 file:/my_path # this specific item
151 tag:my_tag # items with this tag
152 bundle:my_bundle # items in this bundle
153 """),
161154 metavar=_("SELECTOR"),
155 nargs='+',
162156 type=str,
163157 )
164158 parser_apply.add_argument(
208202
209203 # bw diff
210204 help_diff = _("Show differences between nodes")
211 parser_diff = subparsers.add_parser("diff", description=help_diff, help=help_diff)
205 parser_diff = subparsers.add_parser(
206 "diff",
207 description=help_diff,
208 help=help_diff,
209 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
210 )
212211 parser_diff.set_defaults(func=bw_diff)
213212 parser_diff.add_argument(
214213 "-b",
267266 help=_("compare metadata instead of configuration"),
268267 )
269268 parser_diff.add_argument(
270 'target',
271 metavar=_("TARGETS"),
269 'targets',
270 metavar=_("TARGET"),
271 nargs='+',
272272 type=str,
273273 help=HELP_get_target_nodes,
274274 )
278278 parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups)
279279 parser_groups.set_defaults(func=bw_groups)
280280 parser_groups.add_argument(
281 "-a", "--attrs",
282 dest='attrs',
283 metavar=_("ATTR"),
284 nargs='+',
285 type=str,
286 help=_("show table with the given attributes for each group "
287 "(e.g. 'all', 'members', 'os', ...)"),
288 )
289 parser_groups.add_argument(
281290 "-i",
282291 "--inline",
283292 action='store_true',
287296 parser_groups.add_argument(
288297 'groups',
289298 default=None,
290 metavar=_("GROUP1,GROUP2..."),
291 nargs='?',
292 type=str,
293 help=_("show the given groups and their subgroups"),
294 )
295 parser_groups.add_argument(
296 'attrs',
297 default=None,
298 metavar=_("ATTR1,ATTR2..."),
299 nargs='?',
300 type=str,
301 help=_("show table with the given attributes for each group "
302 "(e.g. 'all', 'members', 'os', ...)"),
299 metavar=_("GROUP"),
300 nargs='*',
301 type=str,
302 help=_("show the given groups (and their subgroups, unless --attrs is used)"),
303303 )
304304
305305 # bw hash
402402
403403 # bw lock
404404 help_lock = _("Manage locks on nodes used to prevent collisions between BundleWrap users")
405 parser_lock = subparsers.add_parser("lock", description=help_lock, help=help_lock)
405 parser_lock = subparsers.add_parser(
406 "lock",
407 description=help_lock,
408 help=help_lock,
409 )
406410 parser_lock_subparsers = parser_lock.add_subparsers()
407411
408412 # bw lock add
411415 "add",
412416 description=help_lock_add,
413417 help=help_lock_add,
418 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
414419 )
415420 parser_lock_add.set_defaults(func=bw_lock_add)
416421 parser_lock_add.add_argument(
417 'target',
418 metavar=_("TARGETS"),
422 'targets',
423 metavar=_("TARGET"),
424 nargs='+',
419425 type=str,
420426 help=HELP_get_target_nodes,
421427 )
440446 parser_lock_add.add_argument(
441447 "-i",
442448 "--items",
443 default="*",
449 default=["*"],
444450 dest='items',
445 help=_("comma-separated list of item selectors the lock applies to "
446 "(defaults to \"*\" meaning all)"),
451 help=_("""lock only items matching any SELECTOR:
452
453 file:/my_path # this specific item
454 tag:my_tag # items with this tag
455 bundle:my_bundle # items in this bundle
456 """),
457 metavar=_("SELECTOR"),
458 nargs='+',
447459 type=str,
448460 )
449461 bw_lock_add_p_default = int(environ.get("BW_NODE_WORKERS", "4"))
463475 "remove",
464476 description=help_lock_remove,
465477 help=help_lock_remove,
478 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
466479 )
467480 parser_lock_remove.set_defaults(func=bw_lock_remove)
468481 parser_lock_remove.add_argument(
469 'target',
470 metavar=_("TARGETS"),
482 'targets',
483 metavar=_("TARGET"),
484 nargs='+',
471485 type=str,
472486 help=HELP_get_target_nodes,
473487 )
494508 "show",
495509 description=help_lock_show,
496510 help=help_lock_show,
511 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
497512 )
498513 parser_lock_show.set_defaults(func=bw_lock_show)
499514 parser_lock_show.add_argument(
500 'target',
515 'targets',
501516 metavar=_("TARGETS"),
517 nargs='+',
502518 type=str,
503519 help=HELP_get_target_nodes,
504520 )
514530 )
515531
516532 # bw metadata
517 help_metadata = ("View a JSON representation of a node's metadata")
533 help_metadata = ("View a JSON representation of a node's metadata (defaults blue, reactors green, groups yellow, node red) or a table of selected metadata keys from multiple nodes")
518534 parser_metadata = subparsers.add_parser(
519535 "metadata",
520536 description=help_metadata,
521537 help=help_metadata,
538 formatter_class=RawTextHelpFormatter,
522539 )
523540 parser_metadata.set_defaults(func=bw_metadata)
524541 parser_metadata.add_argument(
525 'target',
526 metavar=_("NODE"),
527 type=str,
528 help=_("node to print JSON-formatted metadata for"),
542 'targets',
543 metavar=_("TARGET"),
544 nargs='+',
545 type=str,
546 help=HELP_get_target_nodes,
529547 )
530548 parser_metadata.add_argument(
531 'keys',
549 "-k", "--keys",
532550 default=[],
551 dest='keys',
533552 metavar=_("KEY"),
534553 nargs='*',
535554 type=str,
536 help=_("print only partial metadata from the given space-separated key path (e.g. `bw metadata mynode users jdoe` to show `mynode.metadata['users']['jdoe']`)"),
555 help=_("show only partial metadata from the given key paths (e.g. `bw metadata mynode -k users/jdoe` to show `mynode.metadata['users']['jdoe']`)"),
537556 )
538557 parser_metadata.add_argument(
539 "--blame",
558 "-b", "--blame",
540559 action='store_true',
541560 dest='blame',
542561 help=_("show where each piece of metadata comes from"),
543562 )
544563 parser_metadata.add_argument(
545 "-t",
546 "--table",
547 action='store_true',
548 dest='table',
549 help=_(
550 "show a table of selected metadata values from multiple nodes instead; "
551 "allows for multiple comma-separated paths in KEY; "
552 "allows for node selectors in NODE (e.g. 'NODE1,NODE2,GROUP1,bundle:BUNDLE1...')"
553 ),
564 "-D", "--hide-defaults",
565 action='store_true',
566 dest='hide_defaults',
567 help=_("hide values set by defaults in metadata.py"),
568 )
569 parser_metadata.add_argument(
570 "-G", "--hide-groups",
571 action='store_true',
572 dest='hide_groups',
573 help=_("hide values set in groups.py"),
574 )
575 parser_metadata.add_argument(
576 "-N", "--hide-node",
577 action='store_true',
578 dest='hide_node',
579 help=_("hide values set in nodes.py"),
580 )
581 parser_metadata.add_argument(
582 "-R", "--hide-reactors",
583 action='store_true',
584 dest='hide_reactors',
585 help=_("hide values set by reactors in metadata.py"),
554586 )
555587
556588 # bw nodes
557589 help_nodes = _("List nodes in this repository")
558 parser_nodes = subparsers.add_parser("nodes", description=help_nodes, help=help_nodes)
590 parser_nodes = subparsers.add_parser(
591 "nodes",
592 description=help_nodes,
593 help=help_nodes,
594 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
595 )
559596 parser_nodes.set_defaults(func=bw_nodes)
560597 parser_nodes.add_argument(
561598 "-i",
565602 help=_("keep lists on a single line (for grep)"),
566603 )
567604 parser_nodes.add_argument(
568 'target',
569 default=None,
570 metavar=_("TARGETS"),
571 nargs='?',
605 'targets',
606 default=None,
607 metavar=_("TARGET"),
608 nargs='*',
572609 type=str,
573610 help=HELP_get_target_nodes,
574611 )
575612 parser_nodes.add_argument(
576 'attrs',
577 default=None,
578 metavar=_("ATTR1,ATTR2..."),
579 nargs='?',
613 "-a",
614 "--attrs",
615 default=None,
616 dest='attrs',
617 metavar=_("ATTR"),
618 nargs='+',
580619 type=str,
581620 help=_("show table with the given attributes for each node "
582621 "(e.g. 'all', 'groups', 'bundles', 'hostname', 'os', ...)"),
656695 dest='depends_reverse',
657696 help=_("do not show reverse dependencies ('needed_by')"),
658697 )
659 parser_plot_subparsers_node.add_argument(
660 "--no-depends-static",
661 action='store_false',
662 dest='depends_static',
663 help=_("do not show static dependencies"),
664 )
665698
666699 # bw plot groups-for-node
667700 help_plot_node_groups = _("Show where a specific node gets its groups from")
702735 parser_repo_subparsers_create = parser_repo_subparsers.add_parser("create")
703736 parser_repo_subparsers_create.set_defaults(func=bw_repo_create)
704737
705 # bw repo plugin
706 parser_repo_subparsers_plugin = parser_repo_subparsers.add_parser("plugin")
707 parser_repo_subparsers_plugin_subparsers = parser_repo_subparsers_plugin.add_subparsers()
708
709 # bw repo plugin install
710 parser_repo_subparsers_plugin_install = parser_repo_subparsers_plugin_subparsers.add_parser("install")
711 parser_repo_subparsers_plugin_install.set_defaults(func=bw_repo_plugin_install)
712 parser_repo_subparsers_plugin_install.add_argument(
713 'plugin',
714 metavar=_("PLUGIN_NAME"),
715 type=str,
716 help=_("name of plugin to install"),
717 )
718 parser_repo_subparsers_plugin_install.add_argument(
719 "-f",
720 "--force",
721 action='store_true',
722 dest='force',
723 help=_("overwrite existing files when installing"),
724 )
725
726 # bw repo plugin list
727 parser_repo_subparsers_plugin_list = parser_repo_subparsers_plugin_subparsers.add_parser("list")
728 parser_repo_subparsers_plugin_list.set_defaults(func=bw_repo_plugin_list)
729
730 # bw repo plugin remove
731 parser_repo_subparsers_plugin_remove = parser_repo_subparsers_plugin_subparsers.add_parser("remove")
732 parser_repo_subparsers_plugin_remove.set_defaults(func=bw_repo_plugin_remove)
733 parser_repo_subparsers_plugin_remove.add_argument(
734 'plugin',
735 metavar=_("PLUGIN_NAME"),
736 type=str,
737 help=_("name of plugin to remove"),
738 )
739 parser_repo_subparsers_plugin_remove.add_argument(
740 "-f",
741 "--force",
742 action='store_true',
743 dest='force',
744 help=_("remove files even if locally modified"),
745 )
746
747 # bw repo plugin search
748 parser_repo_subparsers_plugin_search = parser_repo_subparsers_plugin_subparsers.add_parser("search")
749 parser_repo_subparsers_plugin_search.set_defaults(func=bw_repo_plugin_search)
750 parser_repo_subparsers_plugin_search.add_argument(
751 'term',
752 metavar=_("SEARCH_STRING"),
753 nargs='?',
754 type=str,
755 help=_("look for this string in plugin names and descriptions"),
756 )
757
758 # bw repo plugin update
759 parser_repo_subparsers_plugin_update = parser_repo_subparsers_plugin_subparsers.add_parser("update")
760 parser_repo_subparsers_plugin_update.set_defaults(func=bw_repo_plugin_update)
761 parser_repo_subparsers_plugin_update.add_argument(
762 'plugin',
763 default=None,
764 metavar=_("PLUGIN_NAME"),
765 nargs='?',
766 type=str,
767 help=_("name of plugin to update"),
768 )
769 parser_repo_subparsers_plugin_update.add_argument(
770 "-c",
771 "--check-only",
772 action='store_true',
773 dest='check_only',
774 help=_("only show what would be updated"),
775 )
776 parser_repo_subparsers_plugin_update.add_argument(
777 "-f",
778 "--force",
779 action='store_true',
780 dest='force',
781 help=_("overwrite local modifications when updating"),
782 )
783
784738 # bw run
785739 help_run = _("Run a one-off command on a number of nodes")
786 parser_run = subparsers.add_parser("run", description=help_run, help=help_run)
740 parser_run = subparsers.add_parser(
741 "run",
742 description=help_run,
743 help=help_run,
744 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
745 )
787746 parser_run.set_defaults(func=bw_run)
788747 parser_run.add_argument(
789 'target',
790 metavar=_("TARGETS"),
748 'targets',
749 metavar=_("TARGET"),
750 nargs='+',
791751 type=str,
792752 help=HELP_get_target_nodes,
793753 )
853813 "change in future releases). Currently, the default is -IJKM "
854814 "if specific nodes are given and -HIJKMS if testing the "
855815 "entire repo.")
856 parser_test = subparsers.add_parser("test", description=help_test, help=help_test)
816 parser_test = subparsers.add_parser(
817 "test",
818 description=help_test,
819 help=help_test,
820 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
821 )
857822 parser_test.set_defaults(func=bw_test)
858823 parser_test.add_argument(
859 'target',
860 default=None,
861 metavar=_("TARGETS"),
862 nargs='?',
863 type=str,
864 help=HELP_get_target_nodes + _(" (defaults to all)"),
865 )
866 parser_test.add_argument(
867 "-c",
868 "--plugin-conflicts",
869 action='store_true',
870 dest='plugin_conflicts',
871 help=_("check for local modifications to files installed by plugins"),
824 'targets',
825 default=None,
826 metavar=_("TARGET"),
827 nargs='*',
828 type=str,
829 help=HELP_get_target_nodes + _("\n(defaults to all)"),
872830 )
873831 parser_test.add_argument(
874832 "-d",
934892 )
935893 parser_test.add_argument(
936894 "-M",
937 "--metadata-collisions",
938 action='store_true',
939 dest='metadata_collisions',
940 help=_("check for conflicting metadata keys in group metadata"),
895 "--metadata-conflicts",
896 action='store_true',
897 dest='metadata_conflicts',
898 help=_("check for conflicting metadata keys in group metadata, reactors, and defaults"),
941899 )
942900 parser_test.add_argument(
943901 "-o",
947905 help=_("check for bundles not assigned to any node"),
948906 )
949907 parser_test.add_argument(
950 "-s",
951 "--secret-rotation",
952 default=None,
953 dest='ignore_secret_identifiers',
954 help=_("ensure every string passed to repo.vault.[human_]password_for() is used at least "
955 "twice (using it only once means you're probably managing only one end of an "
956 "authentication, making it dangerous to rotate your .secrets.cfg); PATTERNS is a "
957 "comma-separated list of regex patterns for strings to ignore in this check "
958 "(just pass an empty string if you don't need to ignore anything)"),
959 metavar="PATTERNS",
960 type=str,
961 )
962 parser_test.add_argument(
963908 "-S",
964909 "--subgroup-loops",
965910 action='store_true',
969914
970915 # bw verify
971916 help_verify = _("Inspect the health or 'correctness' of a node without changing it")
972 parser_verify = subparsers.add_parser("verify", description=help_verify, help=help_verify)
917 parser_verify = subparsers.add_parser(
918 "verify",
919 description=help_verify,
920 help=help_verify,
921 formatter_class=RawTextHelpFormatter, # for HELP_get_target_nodes
922 )
973923 parser_verify.set_defaults(func=bw_verify)
974924 parser_verify.add_argument(
975 'target',
976 metavar=_("TARGETS"),
925 'targets',
926 metavar=_("TARGET"),
927 nargs='+',
977928 type=str,
978929 help=HELP_get_target_nodes,
979930 )
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from ..deps import prepare_dependencies
41 from ..utils.plot import graph_for_items, plot_group, plot_node_groups
52 from ..utils.cmdline import get_group, get_node
2522
2623
2724 def bw_plot_node(repo, args):
28 node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes'])
25 node = get_node(repo, args['node'])
2926 for line in graph_for_items(
3027 node.name,
3128 prepare_dependencies(node.items, node.os, node.os_version),
3229 cluster=args['cluster'],
3330 concurrency=args['depends_concurrency'],
34 static=args['depends_static'],
3531 regular=args['depends_regular'],
3632 reverse=args['depends_reverse'],
3733 auto=args['depends_auto'],
4036
4137
4238 def bw_plot_node_groups(repo, args):
43 node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes'])
39 node = get_node(repo, args['node'])
4440 for line in plot_node_groups(node):
4541 io.stdout(line)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from sys import exit
4
5 from ..exceptions import NoSuchPlugin, PluginLocalConflict
6 from ..plugins import PluginManager
70 from ..repo import Repository
8 from ..utils.text import blue, bold, mark_for_translation as _, red
9 from ..utils.ui import io
101
112
123 def bw_repo_bundle_create(repo, args):
156
167 def bw_repo_create(path, args):
178 Repository.create(path)
18
19
20 def bw_repo_plugin_install(repo, args):
21 pm = PluginManager(repo.path)
22 try:
23 manifest = pm.install(args['plugin'], force=args['force'])
24 io.stdout(_("{x} Installed '{plugin}' (v{version})").format(
25 x=blue("i"),
26 plugin=args['plugin'],
27 version=manifest['version'],
28 ))
29 if 'help' in manifest:
30 io.stdout("")
31 for line in manifest['help'].split("\n"):
32 io.stdout(line)
33 except NoSuchPlugin:
34 io.stderr(_("{x} No such plugin: {plugin}").format(x=red("!!!"), plugin=args['plugin']))
35 exit(1)
36 except PluginLocalConflict as e:
37 io.stderr(_("{x} Plugin installation failed: {reason}").format(
38 reason=e.message,
39 x=red("!!!"),
40 ))
41 exit(1)
42
43
44 def bw_repo_plugin_list(repo, args):
45 pm = PluginManager(repo.path)
46 for plugin, version in pm.list():
47 io.stdout(_("{plugin} (v{version})").format(plugin=plugin, version=version))
48
49
50 def bw_repo_plugin_remove(repo, args):
51 pm = PluginManager(repo.path)
52 try:
53 pm.remove(args['plugin'], force=args['force'])
54 except NoSuchPlugin:
55 io.stdout(_("{x} Plugin '{plugin}' is not installed").format(
56 x=red("!!!"),
57 plugin=args['plugin'],
58 ))
59 exit(1)
60
61
62 def bw_repo_plugin_search(repo, args):
63 pm = PluginManager(repo.path)
64 for plugin, desc in pm.search(args['term']):
65 io.stdout(_("{plugin} {desc}").format(desc=desc, plugin=bold(plugin)))
66
67
68 def bw_repo_plugin_update(repo, args):
69 pm = PluginManager(repo.path)
70 if args['plugin']:
71 old_version, new_version = pm.update(
72 args['plugin'],
73 check_only=args['check_only'],
74 force=args['force'],
75 )
76 if old_version != new_version:
77 io.stdout(_("{plugin} {old_version} → {new_version}").format(
78 new_version=new_version,
79 old_version=old_version,
80 plugin=bold(args['plugin']),
81 ))
82 else:
83 for plugin, version in pm.list():
84 old_version, new_version = pm.update(
85 plugin,
86 check_only=args['check_only'],
87 force=args['force'],
88 )
89 if old_version != new_version:
90 io.stdout(_("{plugin} {old_version} → {new_version}").format(
91 new_version=new_version,
92 old_version=old_version,
93 plugin=bold(plugin),
94 ))
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime
4 try:
5 from itertools import zip_longest
6 except ImportError: # Python 2
7 from itertools import izip_longest as zip_longest
1 from itertools import zip_longest
82 from sys import exit
93
104 from ..concurrency import WorkerPool
115109
116110 def bw_run(repo, args):
117111 errors = []
118 target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
119 pending_nodes = target_nodes[:]
112 target_nodes = get_target_nodes(repo, args['targets'])
113 pending_nodes = target_nodes.copy()
120114 io.progress_set_total(len(pending_nodes))
121115
122116 repo.hooks.run_start(
123117 repo,
124 args['target'],
118 args['targets'],
125119 target_nodes,
126120 args['command'],
127121 )
175169
176170 repo.hooks.run_end(
177171 repo,
178 args['target'],
172 args['targets'],
179173 target_nodes,
180174 args['command'],
181175 duration=datetime.now() - start_time,
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from operator import itemgetter
41
52 from ..utils.table import ROW_SEPARATOR, render_table
107 def bw_stats(repo, args):
118 items = {}
129 metadata_defaults = set()
13 metadata_processors = set()
1410 metadata_reactors = set()
1511 for node in repo.nodes:
1612 for metadata_default_name, metadata_default in node.metadata_defaults:
1713 metadata_defaults.add(metadata_default_name)
18 # TODO remove this in 4.0
19 for metadata_processor_name, metadata_processor in node._metadata_processors[2]:
20 metadata_processors.add(metadata_processor_name)
2114 for metadata_reactor_name, metadata_reactor in node.metadata_reactors:
2215 metadata_reactors.add(metadata_reactor_name)
2316 for item in node.items:
3427 [str(len(repo.groups)), _("groups")],
3528 [str(len(repo.bundle_names)), _("bundles")],
3629 [str(len(metadata_defaults)), _("metadata defaults")],
37 [str(len(metadata_processors)), _("metadata processors")],
3830 [str(len(metadata_reactors)), _("metadata reactors")],
3931 [str(sum([len(list(node.items)) for node in repo.nodes])), _("items")],
4032 ROW_SEPARATOR,
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from copy import copy
4 from re import compile as compile_regex
51 from sys import exit
62
73 from ..deps import DummyItem
84 from ..exceptions import FaultUnavailable, ItemDependencyLoop
95 from ..itemqueue import ItemTestQueue
10 from ..metadata import check_for_unsolvable_metadata_key_conflicts, check_metadata_keys
11 from ..plugins import PluginManager
6 from ..metadata import check_for_metadata_conflicts
127 from ..repo import Repository
138 from ..utils.cmdline import count_items, get_target_nodes
149 from ..utils.plot import explain_item_dependency_loop
9388 ))
9489
9590
96 def test_metadata_collisions(node):
97 with io.job(_("{node} checking for metadata collisions").format(node=bold(node.name))):
98 check_for_unsolvable_metadata_key_conflicts(node)
99 io.stdout(_("{x} {node} has no metadata collisions").format(
100 x=green("✓"),
101 node=bold(node.name),
102 ))
103
104
105 def test_metadata_keys(node):
106 with io.job(_("{node} checking metadata keys").format(node=bold(node.name))):
107 check_metadata_keys(node)
108 io.stdout(_("{x} {node} has valid metadata keys").format(
91 def test_metadata_conflicts(node):
92 with io.job(_("{node} checking for metadata conflicts").format(node=bold(node.name))):
93 check_for_metadata_conflicts(node)
94 io.stdout(_("{x} {node} has no metadata conflicts").format(
10995 x=green("✓"),
11096 node=bold(node.name),
11197 ))
127113 ))
128114 if orphaned_bundles:
129115 exit(1)
130
131
132 def test_secret_identifiers(repo, ignore_patterns):
133 # create a new object to make sure we don't double-count any calls
134 # from previous tests
135 pristine_repo = Repository(repo.path)
136 pristine_repo.hash() # shortest way to create all configuration
137 patterns = set()
138 for raw_pattern in ignore_patterns.split(","):
139 if raw_pattern:
140 patterns.add(compile_regex(raw_pattern))
141 for identifier, call_count in pristine_repo.vault._call_log.items():
142 if call_count == 1:
143 for pattern in patterns:
144 if pattern.search(identifier):
145 break
146 else:
147 io.stderr(_(
148 "{x} identifier passed only once to repo.vault.[human_]password_for(): {i}"
149 ).format(
150 i=bold(identifier),
151 x=red("✘"),
152 ))
153 exit(1)
154 io.stdout(_(
155 "{x} all arguments to repo.vault.[human_]password_for() used at least twice"
156 ).format(x=green("✓")))
157116
158117
159118 def test_empty_groups(repo):
170129 ))
171130 if empty_groups:
172131 exit(1)
173
174
175 def test_plugin_conflicts(repo):
176 pm = PluginManager(repo.path)
177 for plugin, version in pm.list():
178 if QUIT_EVENT.is_set():
179 break
180 local_changes = pm.local_modifications(plugin)
181 if local_changes:
182 io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format(
183 plugin=plugin,
184 x=red("✘"),
185 ))
186 for path, actual_checksum, should_checksum in local_changes:
187 io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format(
188 actual_checksum=actual_checksum,
189 path=path,
190 should_checksum=should_checksum,
191 ))
192 exit(1)
193 else:
194 io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format(
195 plugin=plugin,
196 x=green("✓"),
197 ))
198132
199133
200134 def test_determinism_config(repo, nodes, iterations):
285219 args['determinism_metadata'] > 1 or
286220 args['hooks_node'] or
287221 args['hooks_repo'] or
288 args['ignore_secret_identifiers'] is not None or
289222 args['items'] or
290 args['metadata_keys'] or
291 args['metadata_collisions'] or
223 args['metadata_conflicts'] or
292224 args['orphaned_bundles'] or
293225 args['empty_groups'] or
294 args['plugin_conflicts'] or
295226 args['subgroup_loops']
296227 )
297 if args['target']:
298 nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
228 if args['targets']:
229 nodes = get_target_nodes(repo, args['targets'])
299230 if not options_selected:
300231 args['hooks_node'] = True
301232 args['items'] = True
302 args['metadata_collisions'] = True
233 args['metadata_conflicts'] = True
303234 args['metadata_keys'] = True
304235 else:
305236 nodes = copy(list(repo.nodes))
307238 args['hooks_node'] = True
308239 args['hooks_repo'] = True
309240 args['items'] = True
310 args['metadata_collisions'] = True
241 args['metadata_conflicts'] = True
311242 args['metadata_keys'] = True
312243 args['subgroup_loops'] = True
313244
314 if args['ignore_secret_identifiers'] is not None and not QUIT_EVENT.is_set():
315 test_secret_identifiers(repo, args['ignore_secret_identifiers'])
316
317 if args['plugin_conflicts'] and not QUIT_EVENT.is_set():
318 test_plugin_conflicts(repo)
319
320245 if args['subgroup_loops'] and not QUIT_EVENT.is_set():
321246 test_subgroup_loops(repo)
322247
326251 if args['orphaned_bundles'] and not QUIT_EVENT.is_set():
327252 test_orphaned_bundles(repo)
328253
329 if args['metadata_keys'] and not QUIT_EVENT.is_set():
254 if args['metadata_conflicts'] and not QUIT_EVENT.is_set():
330255 io.progress_set_total(len(nodes))
331256 for node in nodes:
332257 if QUIT_EVENT.is_set():
333258 break
334 test_metadata_keys(node)
335 io.progress_advance()
336 io.progress_set_total(0)
337
338 if args['metadata_collisions'] and not QUIT_EVENT.is_set():
339 io.progress_set_total(len(nodes))
340 for node in nodes:
341 if QUIT_EVENT.is_set():
342 break
343 test_metadata_collisions(node)
259 test_metadata_conflicts(node)
344260 io.progress_advance()
345261 io.progress_set_total(0)
346262
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime
41 from sys import exit
52
115112 def bw_verify(repo, args):
116113 errors = []
117114 node_stats = {}
118 pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes'])
115 pending_nodes = get_target_nodes(repo, args['targets'])
119116 start_time = datetime.now()
120117 io.progress_set_total(count_items(pending_nodes))
121118
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from ..utils.text import mark_for_translation as _
41 from ..utils.ui import io
52
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED
41 from datetime import datetime
52 from random import randint
129 JOIN_TIMEOUT = 5 # seconds
1310
1411
15 class WorkerPool(object):
12 class WorkerPool:
1613 """
1714 Manages a bunch of worker threads.
1815 """
4845 io.debug(_("worker pool {pool} waiting for next task to complete").format(
4946 pool=self.pool_id,
5047 ))
51 while True:
52 # we must use a timeout here to allow Python <3.3 to call
53 # its SIGINT handler
54 # see also http://stackoverflow.com/q/25676835
55 completed, pending = wait(
56 self.pending_futures.keys(),
57 return_when=FIRST_COMPLETED,
58 timeout=0.1,
59 )
60 if completed:
61 break
48 completed, pending = wait(
49 self.pending_futures.keys(),
50 return_when=FIRST_COMPLETED,
51 )
6252 future = completed.pop()
6353
6454 start_time = self.pending_futures[future]['start_time']
7868 task=task_id,
7969 worker=worker_id,
8070 ))
81 if not hasattr(exception, '__traceback__'): # Python 2
82 exception.__traceback__ = future.exception_info()[1]
8371 exception.__task_id = task_id
8472 raise exception
8573 else:
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from .exceptions import BundleError, ItemDependencyError, NoSuchItem
41 from .items import Item
52 from .items.actions import Action
74 from .utils.ui import io
85
96
10 class DummyItem(object):
7 class DummyItem:
118 bundle = None
129 triggered = False
1310
140137 """
141138 item._flattened_deps = set(item._deps)
142139
143 for dep in item._deps:
140 for dep in item._deps.copy():
144141 try:
145142 dep_item = items[dep]
146143 except KeyError:
147 raise ItemDependencyError(_(
148 "'{item}' in bundle '{bundle}' has a dependency (needs) "
149 "on '{dep}', which doesn't exist"
150 ).format(
151 item=item.id,
152 bundle=item.bundle.name,
153 dep=dep,
154 ))
144 if dep.startswith("tag:"):
145 # sometimes it is useful to be able to depend on a tag
146 # without having to make sure it actually exists
147 item._deps.remove(dep)
148 continue
149 else:
150 raise ItemDependencyError(_(
151 "'{item}' in bundle '{bundle}' has a dependency (needs) "
152 "on '{dep}', which doesn't exist"
153 ).format(
154 item=item.id,
155 bundle=item.bundle.name,
156 dep=dep,
157 ))
155158 # Don't recurse if we have already resolved nested dependencies
156159 # for this item. Also serves as a guard against infinite
157160 # recursion when there are loops.
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from sys import version_info
4
5
6 class UnicodeException(Exception):
7 def __init__(self, msg=""):
8 if version_info >= (3, 0):
9 super(UnicodeException, self).__init__(msg)
10 else:
11 super(UnicodeException, self).__init__(msg.encode('utf-8'))
12
13
14 class ActionFailure(UnicodeException):
0 class ActionFailure(Exception):
151 """
162 Raised when an action failes to meet the expected rcode/output.
173 """
2713 self.obj = obj
2814
2915
30 class FaultUnavailable(UnicodeException):
16 class FaultUnavailable(Exception):
3117 """
3218 Raised when a Fault object cannot be resolved.
3319 """
3420 pass
3521
3622
37 class GracefulApplyException(UnicodeException):
23 class GracefulApplyException(Exception):
3824 """
3925 Raised when a problem has been encountered in `bw apply`, but a more
4026 verbose error has already been printed.
4228 pass
4329
4430
45 class NoSuchBundle(UnicodeException):
31 class NoSuchBundle(Exception):
4632 """
4733 Raised when a bundle of unknown name is requested.
4834 """
4935 pass
5036
5137
52 class NoSuchGroup(UnicodeException):
38 class NoSuchGroup(Exception):
5339 """
5440 Raised when a group of unknown name is requested.
5541 """
5642 pass
5743
5844
59 class NoSuchItem(UnicodeException):
45 class NoSuchItem(Exception):
6046 """
6147 Raised when an item of unknown name is requested.
6248 """
6349 pass
6450
6551
66 class NoSuchNode(UnicodeException):
52 class NoSuchNode(Exception):
6753 """
6854 Raised when a node of unknown name is requested.
6955 """
7056 pass
7157
7258
73 class NoSuchPlugin(UnicodeException):
74 """
75 Raised when a plugin of unknown name is requested.
76 """
77 pass
78
79
80 class RemoteException(UnicodeException):
59 class RemoteException(Exception):
8160 """
8261 Raised when a shell command on a node fails.
8362 """
8463 pass
8564
8665
87 class RepositoryError(UnicodeException):
66 class RepositoryError(Exception):
8867 """
8968 Indicates that somethings is wrong with the current repository.
9069 """
134113 pass
135114
136115
137 class PluginError(RepositoryError):
138 """
139 Indicates an error related to a plugin.
140 """
141 pass
142
143
144 class PluginLocalConflict(PluginError):
145 """
146 Raised when a plugin tries to overwrite locally-modified files.
147 """
148 pass
149
150
151 class SkipNode(UnicodeException):
116 class SkipNode(Exception):
152117 """
153118 Can be raised by hooks to skip a node.
154119 """
162127 pass
163128
164129
165 class UsageException(UnicodeException):
130 class UsageException(Exception):
166131 """
167132 Raised when command line options don't make sense.
168133 """
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
0 from os import mkdir
1 from os.path import exists, join
32 import re
43
4 from tomlkit import dumps as toml_dump, parse as toml_parse
5
56 from .exceptions import NoSuchGroup, NoSuchNode, RepositoryError
6 from .utils import cached_property, names
7 from .utils.dicts import hash_statedict
8 from .utils.text import mark_for_translation as _, validate_name
7 from .utils import cached_property, error_context, get_file_contents, names
8 from .utils.dicts import (
9 dict_to_toml,
10 hash_statedict,
11 set_key_at_path,
12 validate_dict,
13 COLLECTION_OF_STRINGS,
14 TUPLE_OF_INTS,
15 )
16 from .utils.text import mark_for_translation as _, toml_clean, validate_name
917
1018
1119 GROUP_ATTR_DEFAULTS = {
1321 'cmd_wrapper_outer': "sudo sh -c {}",
1422 'dummy': False,
1523 'kubectl_context': None,
24 'locking_node': None,
1625 'os': 'linux',
1726 # Setting os_version to 0 by default will probably yield less
1827 # surprises than setting it to max_int. Users will probably
3241 'use_shadow_passwords': True,
3342 }
3443
44 GROUP_ATTR_TYPES = {
45 'bundles': COLLECTION_OF_STRINGS,
46 'cmd_wrapper_inner': str,
47 'cmd_wrapper_outer': str,
48 'dummy': bool,
49 'file_path': str,
50 'kubectl_context': (str, type(None)),
51 'locking_node': (str, type(None)),
52 'member_patterns': COLLECTION_OF_STRINGS,
53 'members': COLLECTION_OF_STRINGS,
54 'metadata': dict,
55 'os': str,
56 'os_version': TUPLE_OF_INTS,
57 'subgroups': COLLECTION_OF_STRINGS,
58 'subgroup_patterns': COLLECTION_OF_STRINGS,
59 'use_shadow_passwords': bool,
60 }
61
3562
3663 def _build_error_chain(loop_node, last_node, nodes_in_between):
3764 """
5380 return error_chain
5481
5582
56 class Group(object):
83 class Group:
5784 """
5885 A group of nodes.
5986 """
60 def __init__(self, group_name, infodict=None):
61 if infodict is None:
62 infodict = {}
87 def __init__(self, group_name, attributes=None):
88 if attributes is None:
89 attributes = {}
6390
6491 if not validate_name(group_name):
6592 raise RepositoryError(_("'{}' is not a valid group name.").format(group_name))
6693
94 with error_context(group_name=group_name):
95 validate_dict(attributes, GROUP_ATTR_TYPES)
96
97 self._attributes = attributes
98 self._immediate_subgroup_patterns = {
99 re.compile(pattern) for pattern in
100 set(attributes.get('subgroup_patterns', set()))
101 }
102 self._member_patterns = {
103 re.compile(pattern) for pattern in
104 set(attributes.get('member_patterns', set()))
105 }
67106 self.name = group_name
68 self.bundle_names = infodict.get('bundles', [])
69 self.immediate_subgroup_names = infodict.get('subgroups', [])
70 self.immediate_subgroup_patterns = infodict.get('subgroup_patterns', [])
71 self.members_add = infodict.get('members_add', None)
72 self.members_remove = infodict.get('members_remove', None)
73 self.metadata = infodict.get('metadata', {})
74 self.node_patterns = infodict.get('member_patterns', [])
75 self.static_member_names = infodict.get('members', [])
107 self.file_path = attributes.get('file_path')
76108
77109 for attr in GROUP_ATTR_DEFAULTS:
78110 # defaults are applied in node.py
79 setattr(self, attr, infodict.get(attr))
111 setattr(self, attr, attributes.get(attr))
80112
81113 def __lt__(self, other):
82114 return self.name < other.name
113145 yield node
114146
115147 @cached_property
116 def _static_nodes(self):
117 result = set()
118 result.update(self._nodes_from_members)
119 result.update(self._nodes_from_patterns)
120 return result
121
122 @property
123 def _subgroup_names_from_patterns(self):
124 for pattern in self.immediate_subgroup_patterns:
125 compiled_pattern = re.compile(pattern)
126 for group in self.repo.groups:
127 if compiled_pattern.search(group.name) is not None and group != self:
128 yield group.name
129
130 @property
131148 def _nodes_from_members(self):
132 for node_name in self.static_member_names:
149 for node_name in self._attributes.get('members', set()):
133150 try:
134151 yield self.repo.get_node(node_name)
135152 except NoSuchNode:
142159 ))
143160
144161 @property
145 def _nodes_from_patterns(self):
146 for pattern in self.node_patterns:
147 compiled_pattern = re.compile(pattern)
148 for node in self.repo.nodes:
149 if not compiled_pattern.search(node.name) is None:
150 yield node
162 def _subgroup_names_from_patterns(self):
163 for pattern in self._immediate_subgroup_patterns:
164 for group in self.repo.groups:
165 if pattern.search(group.name) is not None and group != self:
166 yield group.name
151167
152168 def _check_subgroup_names(self, visited_names):
153169 """
154170 Recursively finds subgroups and checks for loops.
155171 """
156172 for name in set(
157 list(self.immediate_subgroup_names) +
173 list(self._attributes.get('subgroups', set())) +
158174 list(self._subgroup_names_from_patterns)
159175 ):
160176 if name not in visited_names:
209225 yield self.repo.get_group(group_name)
210226
211227 @cached_property
228 def toml(self):
229 if not self.file_path or not self.file_path.endswith(".toml"):
230 raise ValueError(_("group {} not in TOML format").format(self.name))
231 return toml_parse(get_file_contents(self.file_path))
232
233 def toml_save(self):
234 try:
235 toml_doc = self.toml
236 except ValueError:
237 attributes = self._attributes.copy()
238 del attributes['file_path']
239 toml_doc = dict_to_toml(attributes)
240 self.file_path = join(self.repo.path, "groups", self.name + ".toml")
241 if not exists(join(self.repo.path, "groups")):
242 mkdir(join(self.repo.path, "groups"))
243 with open(self.file_path, 'w') as f:
244 f.write(toml_clean(toml_dump(toml_doc)))
245
246 def toml_set(self, path, value):
247 if not isinstance(path, tuple):
248 path = path.split("/")
249 set_key_at_path(self.toml, path, value)
250
251 @cached_property
212252 def immediate_subgroups(self):
213253 """
214254 Iterator over all immediate subgroups as group objects.
215255 """
216256 for group_name in set(
217 list(self.immediate_subgroup_names) +
257 list(self._attributes.get('subgroups', set())) +
218258 list(self._subgroup_names_from_patterns)
219259 ):
220260 try:
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from .deps import (
41 DummyItem,
52 find_item,
1310 from .utils.ui import io
1411
1512
16 class BaseQueue(object):
13 class BaseQueue:
1714 def __init__(self, items, node_os, node_os_version):
1815 self.items_with_deps = prepare_dependencies(items, node_os, node_os_version)
1916 self.items_without_deps = []
0 # -*- coding: utf-8 -*-
10 """
21 Note that modules in this package have to use absolute imports because
32 Repository.item_classes loads them as files.
43 """
5 from __future__ import unicode_literals
64 from copy import copy
75 from datetime import datetime
86 from inspect import cleandoc
1513 from bundlewrap.utils.text import force_text, mark_for_translation as _
1614 from bundlewrap.utils.text import blue, bold, italic, wrap_question
1715 from bundlewrap.utils.ui import io
16 from bundlewrap.operations import run_local
1817
1918
2019 BUILTIN_ITEM_ATTRIBUTES = {
4948 return result
5049
5150
52 class ItemStatus(object):
51 class ItemStatus:
5352 """
5453 Holds information on a particular Item such as whether it needs
5554 fixing and what's broken.
8988 return copy
9089
9190
92 class Item(object):
91 class Item:
9392 """
9493 A single piece of configuration (e.g. a file, a package, a service).
9594 """
96 BLOCK_CONCURRENT = []
9795 BUNDLE_ATTRIBUTE_NAME = None
9896 ITEM_ATTRIBUTES = {}
9997 ITEM_TYPE_NAME = None
150148 self.name = name
151149 self.node = bundle.node
152150 self.when_creating = {}
151 self._command_results = []
153152 self._faults_missing_for_attributes = set()
154153 self._precedes_items = []
155154
349348 skipped based on the given set of locks.
350349 """
351350 for lock in mine:
352 for selector in lock['items']:
353 if self.covered_by_autoskip_selector(selector):
354 io.debug(_("{item} on {node} whitelisted by lock {lock}").format(
355 item=self.id,
356 lock=lock['id'],
357 node=self.node.name,
358 ))
359 return False
351 if self.covered_by_autoskip_selector(lock['items']):
352 io.debug(_("{item} on {node} whitelisted by lock {lock}").format(
353 item=self.id,
354 lock=lock['id'],
355 node=self.node.name,
356 ))
357 return False
360358 for lock in others:
361 for selector in lock['items']:
362 if self.covered_by_autoskip_selector(selector):
363 io.debug(_("{item} on {node} blacklisted by lock {lock}").format(
364 item=self.id,
365 lock=lock['id'],
366 node=self.node.name,
367 ))
368 return True
359 if self.covered_by_autoskip_selector(lock['items']):
360 io.debug(_("{item} on {node} blacklisted by lock {lock}").format(
361 item=self.id,
362 lock=lock['id'],
363 node=self.node.name,
364 ))
365 return True
369366 return False
370367
371368 def _test(self):
424421 type=cls.ITEM_TYPE_NAME,
425422 ))
426423
424 @classmethod
427425 def _validate_required_attributes(cls, bundle, item_id, attributes):
428426 missing = []
429427 for attrname in cls.REQUIRED_ATTRIBUTES:
440438
441439 def apply(
442440 self,
443 autoskip_selector="",
444 autoonly_selector="",
441 autoskip_selector=[],
442 autoonly_selector=[],
445443 my_soft_locks=(),
446444 other_peoples_soft_locks=(),
447445 interactive=False,
616614 )
617615 return (status_code, details)
618616
617 def run_local(self, command, **kwargs):
618 result = run_local(command, **kwargs)
619 self._command_results.append({
620 'command': command,
621 'result': result,
622 })
623 return result
624
625 def run(self, command, **kwargs):
626 result = self.node.run(command, **kwargs)
627 self._command_results.append({
628 'command': command,
629 'result': result,
630 })
631 return result
632
619633 def ask(self, status_should, status_actual, relevant_keys):
620634 """
621635 Returns a string asking the user if this item should be
641655 True if this item should be skipped based on the given selector
642656 string (e.g. "tag:foo,bundle:bar").
643657 """
644 components = [c.strip() for c in autoskip_selector.split(",")]
658 components = [c.strip() for c in autoskip_selector]
645659 if (
646660 "*" in components or
647661 self.id in components or
661675 """
662676 if not autoonly_selector:
663677 return True
664 components = [c.strip() for c in autoonly_selector.split(",")]
678 components = [c.strip() for c in autoonly_selector]
665679 if (
666680 self.id in components or
667681 "bundle:{}".format(self.bundle.name) in components or
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime
41
52 from bundlewrap.exceptions import ActionFailure, BundleError
182179 item=self.id,
183180 node=bold(self.node.name),
184181 )):
185 result = self.bundle.node.run(
182 result = super().run(
186183 self.attributes['command'],
187184 data_stdin=data_stdin,
188185 may_fail=True,
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from collections import defaultdict
41 from os.path import normpath
5 from pipes import quote
2 from shlex import quote
63
74 from bundlewrap.exceptions import BundleError
85 from bundlewrap.items import Item
9087 return
9188
9289 for path in status.sdict.get('paths_to_purge', []):
93 self.node.run("rm -rf -- {}".format(quote(path)))
90 self.run("rm -rf -- {}".format(quote(path)))
9491
9592 for fix_type in ('mode', 'owner', 'group'):
9693 if fix_type in status.keys_to_fix:
104101 chmod_command = "chmod {} {}"
105102 else:
106103 chmod_command = "chmod {} -- {}"
107 self.node.run(chmod_command.format(
104 self.run(chmod_command.format(
108105 self.attributes['mode'],
109106 quote(self.name),
110107 ))
129126 # one of the two special bits to be set.
130127 if status.sdict is not None and int(status.sdict['mode'], 8) & 0o6000:
131128 if not int(self.attributes['mode'], 8) & 0o4000:
132 self.node.run("chmod u-s {}".format(quote(self.name)))
129 self.run("chmod u-s {}".format(quote(self.name)))
133130 if not int(self.attributes['mode'], 8) & 0o2000:
134 self.node.run("chmod g-s {}".format(quote(self.name)))
131 self.run("chmod g-s {}".format(quote(self.name)))
135132
136133 def _fix_owner(self, status):
137134 group = self.attributes['group'] or ""
141138 command = "chown {}{} {}"
142139 else:
143140 command = "chown {}{} -- {}"
144 self.node.run(command.format(
141 self.run(command.format(
145142 quote(self.attributes['owner'] or ""),
146143 group,
147144 quote(self.name),
149146 _fix_group = _fix_owner
150147
151148 def _fix_type(self, status):
152 self.node.run("rm -rf -- {}".format(quote(self.name)))
153 self.node.run("mkdir -p -- {}".format(quote(self.name)))
149 self.run("rm -rf -- {}".format(quote(self.name)))
150 self.run("mkdir -p -- {}".format(quote(self.name)))
154151 if self.attributes['mode']:
155152 self._fix_mode(status)
156153 if self.attributes['owner'] or self.attributes['group']:
157154 self._fix_owner(status)
158155
159156 def _get_paths_to_purge(self):
160 result = self.node.run("find {} -maxdepth 1 -print0".format(quote(self.name)))
157 result = self.run("find {} -maxdepth 1 -print0".format(quote(self.name)))
161158 for line in result.stdout.split(b"\0"):
162159 line = line.decode('utf-8')
163160 for item_type in ('directory', 'file', 'symlink'):
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from base64 import b64decode
41 from collections import defaultdict
52 from contextlib import contextmanager
63 from datetime import datetime
74 from os.path import basename, dirname, exists, join, normpath
8 from pipes import quote
5 from shlex import quote
96 from subprocess import call
107 from sys import exc_info
118 from traceback import format_exception
257254 command = "chmod {} {}"
258255 else:
259256 command = "chmod {} -- {}"
260 self.node.run(command.format(
257 self.run(command.format(
261258 self.attributes['mode'],
262259 quote(self.name),
263260 ))
270267 command = "chown {}{} {}"
271268 else:
272269 command = "chown {}{} -- {}"
273 self.node.run(command.format(
270 self.run(command.format(
274271 quote(self.attributes['owner'] or ""),
275272 group,
276273 quote(self.name),
279276
280277 def _fix_type(self, status):
281278 if status.sdict:
282 self.node.run("rm -rf -- {}".format(quote(self.name)))
279 self.run("rm -rf -- {}".format(quote(self.name)))
283280 if not status.must_be_deleted:
284 self.node.run("mkdir -p -- {}".format(quote(dirname(self.name))))
281 self.run("mkdir -p -- {}".format(quote(dirname(self.name))))
285282 self._fix_content_hash(status)
286283
287284 def get_auto_deps(self, items):
0 from atexit import register as at_exit
1 from os import remove, setpgrp
2 from os.path import isfile, join
3 from shlex import quote
4 from shutil import rmtree
5 from subprocess import PIPE, Popen
6 from tempfile import mkdtemp, NamedTemporaryFile
7
8 from bundlewrap.exceptions import BundleError, RepositoryError
9 from bundlewrap.items import Item
10 from bundlewrap.utils import cached_property
11 from bundlewrap.utils.text import is_subdirectory, mark_for_translation as _, randstr
12 from bundlewrap.utils.ui import io
13
14
15 REPO_MAP_FILENAME = "git_deploy_repos"
16 REMOTE_STATE_FILENAME = ".bundlewrap_git_deploy"
17
18
19 def is_ref(rev):
20 """
21 Braindead check to see if our rev is a branch or tag name. False
22 negatives are OK since this is only used for optimization.
23 """
24 for char in rev:
25 if char not in "0123456789abcdef":
26 return True
27 return False
28
29
30 def clone_to_dir(remote_url, rev):
31 """
32 Clones the given URL to a temporary directory, using a shallow clone
33 if the given revision is definitely not a commit hash.
34
35 Returns the path to the directory.
36 """
37 tmpdir = mkdtemp()
38 if is_ref(rev):
39 git_cmdline = ["clone", "--bare", "--depth", "1", "--no-single-branch", remote_url, "."]
40 else:
41 git_cmdline = ["clone", "--bare", remote_url, "."]
42 git_command(git_cmdline, tmpdir)
43 return tmpdir
44
45
46 def get_local_repo_path(bw_repo_path, repo_name):
47 """
48 From the given BundleWrap repo, get the filesystem path to the git
49 repo associated with the given internal repo name.
50 """
51 repo_map_path = join(bw_repo_path, REPO_MAP_FILENAME)
52 if not isfile(repo_map_path):
53 io.stderr(_("missing repo map for git_deploy at {}").format(repo_map_path))
54 io.stderr(_("you must create this file with the following format:"))
55 io.stderr(_(" <value of repo attribute on git_deploy item>: "
56 "<absolute path to local git repo>"))
57 io.stderr(_("since the path is local, you should also add the "
58 "{} file to your gitignore").format(REPO_MAP_FILENAME))
59 raise RepositoryError(_("missing repo map for git_deploy"))
60
61 with open(join(bw_repo_path, REPO_MAP_FILENAME)) as f:
62 repo_map = f.readlines()
63
64 for line in repo_map:
65 if not line.strip() or line.startswith("#"):
66 continue
67 try:
68 repo, path = line.split(":", 1)
69 except:
70 raise RepositoryError(_("unable to parse line from {path}: '{line}'").format(
71 line=line,
72 path=repo_map_path,
73 ))
74 if repo_name == repo:
75 return path.strip()
76
77 raise RepositoryError(_("no path found for repo '{repo}' in {path}").format(
78 path=repo_map_path,
79 repo=repo_name,
80 ))
81
82
83 def git_command(cmdline, repo_dir):
84 """
85 Runs the given git command line in the given directory.
86
87 Returns stdout of the command.
88 """
89 cmdline = ["git"] + cmdline
90 io.debug(_("running '{}' in {}").format(
91 " ".join(cmdline),
92 repo_dir,
93 ))
94 git_process = Popen(
95 cmdline,
96 cwd=repo_dir,
97 preexec_fn=setpgrp,
98 stderr=PIPE,
99 stdout=PIPE,
100 )
101 stdout, stderr = git_process.communicate()
102 # FIXME integrate this into Item._command_results
103 if git_process.returncode != 0:
104 io.stderr(_("failed command: {}").format(" ".join(cmdline)))
105 io.stderr(_("stdout:\n{}").format(stdout))
106 io.stderr(_("stderr:\n{}").format(stderr))
107 raise RuntimeError(_("`git {command}` failed in {dir}").format(
108 command=cmdline[1],
109 dir=repo_dir,
110 ))
111 return stdout.decode('utf-8').strip()
112
113
114 class GitDeploy(Item):
115 """
116 Facilitates deployment of a given rev from a local git repo to a
117 node.
118 """
119 BUNDLE_ATTRIBUTE_NAME = "git_deploy"
120 ITEM_ATTRIBUTES = {
121 'repo': None,
122 'rev': None,
123 'use_xattrs': False,
124 }
125 ITEM_TYPE_NAME = "git_deploy"
126 REQUIRED_ATTRIBUTES = ['repo', 'rev']
127
128 def __repr__(self):
129 return "<GitDeploy path:{} repo:{} rev:{}>".format(
130 self.name,
131 self.attributes['repo'],
132 self.attributes['rev'],
133 )
134
135 @cached_property
136 def _expanded_rev(self):
137 git_cmdline = ["rev-parse", self.attributes['rev']]
138 return git_command(
139 git_cmdline,
140 self._repo_dir,
141 )
142
143 @cached_property
144 def _repo_dir(self):
145 if "://" in self.attributes['repo']:
146 repo_dir = clone_to_dir(self.attributes['repo'], self.attributes['rev'])
147 io.debug(_("registering {} for deletion on exit").format(repo_dir))
148 at_exit(rmtree, repo_dir)
149 else:
150 repo_dir = get_local_repo_path(self.node.repo.path, self.attributes['repo'])
151 return repo_dir
152
153 def cdict(self):
154 return {'rev': self._expanded_rev}
155
156 def get_auto_deps(self, items):
157 deps = set()
158 for item in items:
159 if item == self:
160 continue
161 if ((
162 item.ITEM_TYPE_NAME == "file" and
163 is_subdirectory(item.name, self.name)
164 ) or (
165 item.ITEM_TYPE_NAME in ("file", "symlink") and
166 item.name == self.name
167 )):
168 raise BundleError(_(
169 "{item1} (from bundle '{bundle1}') blocking path to "
170 "{item2} (from bundle '{bundle2}')"
171 ).format(
172 item1=item.id,
173 bundle1=item.bundle.name,
174 item2=self.id,
175 bundle2=self.bundle.name,
176 ))
177 if (
178 item.ITEM_TYPE_NAME == "directory" and
179 item.name == self.name
180 ):
181 if item.attributes['purge']:
182 raise BundleError(_(
183 "cannot git_deploy into purged directory {}"
184 ).format(item.name))
185 else:
186 deps.add(item.id)
187 return deps
188
189 def fix(self, status):
190 archive_local = NamedTemporaryFile(delete=False)
191 try:
192 archive_local.close()
193 git_command(
194 ["archive", "-o", archive_local.name, self._expanded_rev],
195 self._repo_dir,
196 )
197 temp_filename = ".bundlewrap_tmp_git_deploy_" + randstr()
198
199 try:
200 self.node.upload(
201 archive_local.name,
202 temp_filename,
203 )
204 self.run("find {} -mindepth 1 -delete".format(quote(self.name)))
205 self.run("tar -xf {} -C {}".format(temp_filename, quote(self.name)))
206 if self.attributes['use_xattrs']:
207 self.run("attr -q -s bw_git_deploy_rev -V {} {}".format(
208 self._expanded_rev,
209 quote(self.name),
210 ))
211 else:
212 self.run("echo {} > {}".format(
213 self._expanded_rev,
214 quote(join(self.name, REMOTE_STATE_FILENAME)),
215 ))
216 self.run("chmod 400 {}".format(
217 quote(join(self.name, REMOTE_STATE_FILENAME)),
218 ))
219 finally:
220 self.run("rm -f {}".format(temp_filename))
221 finally:
222 remove(archive_local.name)
223
224 def sdict(self):
225 if self.attributes['use_xattrs']:
226 status_result = self.run(
227 "attr -q -g bw_git_deploy_rev {}".format(quote(self.name)),
228 may_fail=True,
229 )
230 else:
231 status_result = self.run(
232 "cat {}".format(quote(join(self.name, REMOTE_STATE_FILENAME))),
233 may_fail=True,
234 )
235 if status_result.return_code != 0:
236 return None
237 else:
238 return {'rev': status_result.stdout.decode('utf-8').strip()}
239
240 # FIXME get_auto_deps for dir and ensure dir does not use purge
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from bundlewrap.exceptions import BundleError
41 from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item
52 from bundlewrap.items.users import _USERNAME_VALID_CHARACTERS
6057 gid=self.attributes['gid'],
6158 groupname=self.name,
6259 )
63 self.node.run(command, may_fail=True)
60 self.run(command, may_fail=True)
6461 elif status.must_be_deleted:
65 self.node.run("groupdel {}".format(self.name), may_fail=True)
62 self.run("groupdel {}".format(self.name), may_fail=True)
6663 else:
67 self.node.run(
64 self.run(
6865 "groupmod -g {gid} {groupname}".format(
6966 gid=self.attributes['gid'],
7067 groupname=self.name,
7471
7572 def sdict(self):
7673 # verify content of /etc/group
77 grep_result = self.node.run(
74 grep_result = self.run(
7875 "grep -e '^{}:' /etc/group".format(self.name),
7976 may_fail=True,
8077 )
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from abc import ABCMeta
41 import json
52 from os.path import exists, join
1310 from bundlewrap.utils.dicts import merge_dict, reduce_dict
1411 from bundlewrap.utils.ui import io
1512 from bundlewrap.utils.text import force_text, mark_for_translation as _
16 from six import add_metaclass
1713 import yaml
1814
1915
2319 io.debug(run_result.stderr.decode('utf-8'))
2420
2521
26 @add_metaclass(ABCMeta)
27 class KubernetesItem(Item):
22 class KubernetesItem(Item, metaclass=ABCMeta):
2823 """
2924 A generic Kubernetes item.
3025 """
3732 'context': None,
3833 }
3934 KIND = None
40 KUBERNETES_APIVERSION = "v1"
4135 NAME_REGEX = r"^[a-z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$"
4236 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
4337
6559
6660 def fix(self, status):
6761 if status.must_be_deleted:
68 result = run_local(self._kubectl + ["delete", self.KIND, self.resource_name])
62 result = self.run_local(self._kubectl + ["delete", self.KIND, self.resource_name])
6963 log_error(result)
7064 else:
71 result = run_local(
65 result = self.run_local(
7266 self._kubectl + ["apply", "-f", "-"],
7367 data_stdin=self.manifest.encode('utf-8'),
7468 )
130124
131125 merged_manifest = merge_dict(
132126 {
133 'apiVersion': self.KUBERNETES_APIVERSION,
134127 'kind': self.KIND,
135128 'metadata': {
136129 'name': self.name.split("/")[-1],
177170 return self._manifest_dict['metadata']['name']
178171
179172 def sdict(self):
180 result = run_local(self._kubectl + ["get", "-o", "json", self.KIND, self.resource_name])
173 result = self.run_local(self._kubectl + ["get", "-o", "json", self.KIND, self.resource_name])
181174 if result.return_code == 0:
182175 full_json_response = json.loads(result.stdout.decode('utf-8'))
183176 if full_json_response.get("status", {}).get("phase") == "Terminating":
231224 class KubernetesRawItem(KubernetesItem):
232225 BUNDLE_ATTRIBUTE_NAME = "k8s_raw"
233226 ITEM_TYPE_NAME = "k8s_raw"
234 KUBERNETES_APIVERSION = None
235227 NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-zA-Z0-9-\.]{1,253}/[a-z0-9-\.]{1,253}$"
236228 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
237229
270262 class KubernetesClusterRole(KubernetesItem):
271263 BUNDLE_ATTRIBUTE_NAME = "k8s_clusterroles"
272264 KIND = "ClusterRole"
273 KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1"
274265 ITEM_TYPE_NAME = "k8s_clusterrole"
275266 NAME_REGEX = r"^[a-z0-9-\.]{1,253}$"
276267 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
283274 class KubernetesClusterRoleBinding(KubernetesItem):
284275 BUNDLE_ATTRIBUTE_NAME = "k8s_clusterrolebindings"
285276 KIND = "ClusterRoleBinding"
286 KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1"
287277 ITEM_TYPE_NAME = "k8s_clusterrolebinding"
288278 NAME_REGEX = r"^[a-z0-9-\.]{1,253}$"
289279 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
301291 class KubernetesConfigMap(KubernetesItem):
302292 BUNDLE_ATTRIBUTE_NAME = "k8s_configmaps"
303293 KIND = "ConfigMap"
304 KUBERNETES_APIVERSION = "v1"
305294 ITEM_TYPE_NAME = "k8s_configmap"
306295
307296
308297 class KubernetesCronJob(KubernetesItem):
309298 BUNDLE_ATTRIBUTE_NAME = "k8s_cronjobs"
310299 KIND = "CronJob"
311 KUBERNETES_APIVERSION = "batch/v1beta1"
312300 ITEM_TYPE_NAME = "k8s_cronjob"
313301
314302
315303 class KubernetesCustomResourceDefinition(KubernetesItem):
316304 BUNDLE_ATTRIBUTE_NAME = "k8s_crd"
317305 KIND = "CustomResourceDefinition"
318 KUBERNETES_APIVERSION = "apiextensions.k8s.io/v1"
319306 ITEM_TYPE_NAME = "k8s_crd"
320307 NAME_REGEX = r"^[a-z0-9-\.]{1,253}$"
321308 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
331318 class KubernetesDaemonSet(KubernetesItem):
332319 BUNDLE_ATTRIBUTE_NAME = "k8s_daemonsets"
333320 KIND = "DaemonSet"
334 KUBERNETES_APIVERSION = "apps/v1"
335321 ITEM_TYPE_NAME = "k8s_daemonset"
336322
337323 def get_auto_deps(self, items):
348334 class KubernetesDeployment(KubernetesItem):
349335 BUNDLE_ATTRIBUTE_NAME = "k8s_deployments"
350336 KIND = "Deployment"
351 KUBERNETES_APIVERSION = "apps/v1"
352337 ITEM_TYPE_NAME = "k8s_deployment"
353338
354339 def get_auto_deps(self, items):
365350 class KubernetesIngress(KubernetesItem):
366351 BUNDLE_ATTRIBUTE_NAME = "k8s_ingresses"
367352 KIND = "Ingress"
368 KUBERNETES_APIVERSION = "networking.k8s.io/v1beta1"
369353 ITEM_TYPE_NAME = "k8s_ingress"
370354
371355 def get_auto_deps(self, items):
382366 class KubernetesNamespace(KubernetesItem):
383367 BUNDLE_ATTRIBUTE_NAME = "k8s_namespaces"
384368 KIND = "Namespace"
385 KUBERNETES_APIVERSION = "v1"
386369 ITEM_TYPE_NAME = "k8s_namespace"
387370 NAME_REGEX = r"^[a-z0-9-\.]{1,253}$"
388371 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
394377 class KubernetesNetworkPolicy(KubernetesItem):
395378 BUNDLE_ATTRIBUTE_NAME = "k8s_networkpolicies"
396379 KIND = "NetworkPolicy"
397 KUBERNETES_APIVERSION = "networking.k8s.io/v1"
398380 ITEM_TYPE_NAME = "k8s_networkpolicy"
399381 NAME_REGEX = r"^([a-z0-9-\.]{1,253})?/[a-z0-9-\.]{1,253}$"
400382 NAME_REGEX_COMPILED = re.compile(NAME_REGEX)
403385 class KubernetesPersistentVolumeClain(KubernetesItem):
404386 BUNDLE_ATTRIBUTE_NAME = "k8s_pvc"
405387 KIND = "PersistentVolumeClaim"
406 KUBERNETES_APIVERSION = "v1"
407388 ITEM_TYPE_NAME = "k8s_pvc"
408389
409390
410391 class KubernetesRole(KubernetesItem):
411392 BUNDLE_ATTRIBUTE_NAME = "k8s_roles"
412393 KIND = "Role"
413 KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1"
414394 ITEM_TYPE_NAME = "k8s_role"
415395
416396
417397 class KubernetesRoleBinding(KubernetesItem):
418398 BUNDLE_ATTRIBUTE_NAME = "k8s_rolebindings"
419399 KIND = "RoleBinding"
420 KUBERNETES_APIVERSION = "rbac.authorization.k8s.io/v1"
421400 ITEM_TYPE_NAME = "k8s_rolebinding"
422401
423402 def get_auto_deps(self, items):
429408 class KubernetesSecret(KubernetesItem):
430409 BUNDLE_ATTRIBUTE_NAME = "k8s_secrets"
431410 KIND = "Secret"
432 KUBERNETES_APIVERSION = "v1"
433411 ITEM_TYPE_NAME = "k8s_secret"
434412
435413 def get_auto_deps(self, items):
439417 class KubernetesService(KubernetesItem):
440418 BUNDLE_ATTRIBUTE_NAME = "k8s_services"
441419 KIND = "Service"
442 KUBERNETES_APIVERSION = "v1"
443420 ITEM_TYPE_NAME = "k8s_service"
444421
445422
446423 class KubernetesServiceAccount(KubernetesItem):
447424 BUNDLE_ATTRIBUTE_NAME = "k8s_serviceaccounts"
448425 KIND = "ServiceAccount"
449 KUBERNETES_APIVERSION = "v1"
450426 ITEM_TYPE_NAME = "k8s_serviceaccount"
451427
452428
453429 class KubernetesStatefulSet(KubernetesItem):
454430 BUNDLE_ATTRIBUTE_NAME = "k8s_statefulsets"
455431 KIND = "StatefulSet"
456 KUBERNETES_APIVERSION = "apps/v1"
457432 ITEM_TYPE_NAME = "k8s_statefulset"
458433
459434 def get_auto_deps(self, items):
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from abc import ABCMeta, abstractmethod
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
74 from bundlewrap.utils.text import mark_for_translation as _
8 from six import add_metaclass
95
106
11 @add_metaclass(ABCMeta)
12 class Pkg(Item):
7 class Pkg(Item, metaclass=ABCMeta):
138 """
149 A generic package.
1510 """
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items.pkg import Pkg
1815 }
1916
2017 def pkg_all_installed(self):
21 result = self.node.run("dpkg -l | grep '^ii'")
18 result = self.run("dpkg -l | grep '^ii'")
2219 for line in result.stdout.decode('utf-8').strip().split("\n"):
2320 pkg_name = line[4:].split()[0].replace(":", "_")
2421 yield "{}:{}".format(self.ITEM_TYPE_NAME, pkg_name)
2522
2623 def pkg_install(self):
2724 runlevel = "" if self.when_creating['start_service'] else "RUNLEVEL=1 "
28 self.node.run(
25 self.run(
2926 runlevel +
3027 "DEBIAN_FRONTEND=noninteractive "
3128 "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends "
3431 )
3532
3633 def pkg_installed(self):
37 result = self.node.run(
34 result = self.run(
3835 "dpkg -s {} | grep '^Status: '".format(quote(self.name.replace("_", ":"))),
3936 may_fail=True,
4037 )
5451 return False
5552
5653 def pkg_remove(self):
57 self.node.run(
54 self.run(
5855 "DEBIAN_FRONTEND=noninteractive "
5956 "apt-get -qy purge {}".format(quote(self.name.replace("_", ":")))
6057 )
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.items.pkg import Pkg
63
1714 return ["pkg_dnf", "pkg_yum"]
1815
1916 def pkg_all_installed(self):
20 result = self.node.run("dnf -d0 -e0 list installed")
17 result = self.run("dnf -d0 -e0 list installed")
2118 for line in result.stdout.decode('utf-8').strip().split("\n"):
2219 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0])
2320
2421 def pkg_install(self):
25 self.node.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True)
22 self.run("dnf -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True)
2623
2724 def pkg_installed(self):
28 result = self.node.run(
25 result = self.run(
2926 "dnf -d0 -e0 list installed {}".format(quote(self.name)),
3027 may_fail=True,
3128 )
3229 return result.return_code == 0
3330
3431 def pkg_remove(self):
35 self.node.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True)
32 self.run("dnf -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41 import re
52
63 from bundlewrap.exceptions import BundleError
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.items.pkg import Pkg
63
1714 return ["pkg_opkg"]
1815
1916 def pkg_all_installed(self):
20 result = self.node.run("opkg list-installed")
17 result = self.run("opkg list-installed")
2118 for line in result.stdout.decode('utf-8').strip().split("\n"):
2219 if line:
2320 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0])
2421
2522 def pkg_install(self):
26 self.node.run("opkg install {}".format(quote(self.name)), may_fail=True)
23 self.run("opkg install {}".format(quote(self.name)), may_fail=True)
2724
2825 def pkg_installed(self):
29 result = self.node.run(
26 result = self.run(
3027 "opkg status {} | grep ^Status: | grep installed".format(quote(self.name)),
3128 may_fail=True,
3229 )
3330 return result.return_code == 0
3431
3532 def pkg_remove(self):
36 self.node.run("opkg remove {}".format(quote(self.name)), may_fail=True)
33 self.run("opkg remove {}".format(quote(self.name)), may_fail=True)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import basename, join
4 from pipes import quote
1 from shlex import quote
52
63 from bundlewrap.items.pkg import Pkg
74
2421 return {'installed': self.attributes['installed']}
2522
2623 def pkg_all_installed(self):
27 pkgs = self.node.run("pacman -Qq").stdout.decode('utf-8')
24 pkgs = self.run("pacman -Qq").stdout.decode('utf-8')
2825 for line in pkgs.splitlines():
2926 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split())
3027
3330 local_file = join(self.item_dir, self.attributes['tarball'])
3431 remote_file = "/tmp/{}".format(basename(local_file))
3532 self.node.upload(local_file, remote_file)
36 self.node.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True)
37 self.node.run("rm -- {}".format(quote(remote_file)))
33 self.run("pacman --noconfirm -U {}".format(quote(remote_file)), may_fail=True)
34 self.run("rm -- {}".format(quote(remote_file)))
3835 else:
39 self.node.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True)
36 self.run("pacman --noconfirm -S {}".format(quote(self.name)), may_fail=True)
4037
4138 def pkg_installed(self):
42 result = self.node.run(
39 result = self.run(
4340 "pacman -Q {}".format(quote(self.name)),
4441 may_fail=True,
4542 )
4643 return result.return_code == 0
4744
4845 def pkg_remove(self):
49 self.node.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True)
46 self.run("pacman --noconfirm -Rs {}".format(quote(self.name)), may_fail=True)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import join, split
4 from pipes import quote
1 from shlex import quote
52
63 from bundlewrap.exceptions import BundleError
74 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.items.pkg import Pkg
63
1310 ITEM_TYPE_NAME = "pkg_snap"
1411
1512 def pkg_all_installed(self):
16 result = self.node.run("snap list")
13 result = self.run("snap list")
1714 for line in result.stdout.decode('utf-8').strip().split("\n"):
1815 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(" ")[0])
1916
2017 def pkg_install(self):
21 self.node.run("snap install {}".format(quote(self.name)), may_fail=True)
18 self.run("snap install {}".format(quote(self.name)), may_fail=True)
2219
2320 def pkg_installed(self):
24 result = self.node.run(
21 result = self.run(
2522 "snap list {}".format(quote(self.name)),
2623 may_fail=True,
2724 )
2825 return result.return_code == 0
2926
3027 def pkg_remove(self):
31 self.node.run("snap remove {}".format(quote(self.name)), may_fail=True)
28 self.run("snap remove {}".format(quote(self.name)), may_fail=True)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.items.pkg import Pkg
63
1714 return ["pkg_dnf", "pkg_yum"]
1815
1916 def pkg_all_installed(self):
20 result = self.node.run("yum -d0 -e0 list installed")
17 result = self.run("yum -d0 -e0 list installed")
2118 for line in result.stdout.decode('utf-8').strip().split("\n"):
2219 yield "{}:{}".format(self.ITEM_TYPE_NAME, line.split()[0].split(".")[0])
2320
2421 def pkg_install(self):
25 self.node.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True)
22 self.run("yum -d0 -e0 -y install {}".format(quote(self.name)), may_fail=True)
2623
2724 def pkg_installed(self):
28 result = self.node.run(
25 result = self.run(
2926 "yum -d0 -e0 list installed {}".format(quote(self.name)),
3027 may_fail=True,
3128 )
3229 return result.return_code == 0
3330
3431 def pkg_remove(self):
35 self.node.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True)
32 self.run("yum -d0 -e0 -y remove {}".format(quote(self.name)), may_fail=True)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from passlib.apps import postgres_context
41
52 from bundlewrap.exceptions import BundleError
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from bundlewrap.exceptions import BundleError
63 from bundlewrap.items import Item
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from collections import defaultdict
41 from os.path import dirname, normpath
5 from pipes import quote
2 from shlex import quote
63
74 from bundlewrap.exceptions import BundleError
85 from bundlewrap.items import Item
6461 command = "chown -h {}{} {}"
6562 else:
6663 command = "chown -h {}{} -- {}"
67 self.node.run(command.format(
64 self.run(command.format(
6865 quote(self.attributes['owner'] or ""),
6966 group,
7067 quote(self.name),
7370
7471 def _fix_target(self, status):
7572 if self.node.os in self.node.OS_FAMILY_BSD:
76 self.node.run("ln -sfh -- {} {}".format(
73 self.run("ln -sfh -- {} {}".format(
7774 quote(self.attributes['target']),
7875 quote(self.name),
7976 ))
8077 else:
81 self.node.run("ln -sfT -- {} {}".format(
78 self.run("ln -sfT -- {} {}".format(
8279 quote(self.attributes['target']),
8380 quote(self.name),
8481 ))
8582
8683 def _fix_type(self, status):
87 self.node.run("rm -rf -- {}".format(quote(self.name)))
88 self.node.run("mkdir -p -- {}".format(quote(dirname(self.name))))
89 self.node.run("ln -s -- {} {}".format(
84 self.run("rm -rf -- {}".format(quote(self.name)))
85 self.run("mkdir -p -- {}".format(quote(dirname(self.name))))
86 self.run("ln -s -- {} {}".format(
9087 quote(self.attributes['target']),
9188 quote(self.name),
9289 ))
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from logging import ERROR, getLogger
4 from pipes import quote
1 from shlex import quote
52 from string import ascii_lowercase, digits
63
74 from passlib.hash import bcrypt, md5_crypt, sha256_crypt, sha512_crypt
136133
137134 def fix(self, status):
138135 if status.must_be_deleted:
139 self.node.run("userdel {}".format(self.name), may_fail=True)
136 self.run("userdel {}".format(self.name), may_fail=True)
140137 else:
141138 command = "useradd " if status.must_be_created else "usermod "
142139 for attr, option in sorted(_ATTRIBUTE_OPTIONS.items()):
148145 value = str(self.attributes[attr])
149146 command += "{} {} ".format(option, quote(value))
150147 command += self.name
151 self.node.run(command, may_fail=True)
148 self.run(command, may_fail=True)
152149
153150 def display_dicts(self, cdict, sdict, keys):
154151 for attr_name, attr_display_name in _ATTRIBUTE_NAMES.items():
199196 password_command = "grep -ae '^{}:' /etc/master.passwd"
200197 else:
201198 password_command = "grep -ae '^{}:' /etc/passwd"
202 passwd_grep_result = self.node.run(
199 passwd_grep_result = self.run(
203200 password_command.format(self.name),
204201 may_fail=True,
205202 )
230227 if self.attributes['password_hash'] is not None:
231228 if self.attributes['use_shadow'] and self.node.os not in self.node.OS_FAMILY_BSD:
232229 # verify content of /etc/shadow unless we are on OpenBSD
233 shadow_grep_result = self.node.run(
230 shadow_grep_result = self.run(
234231 "grep -e '^{}:' /etc/shadow".format(self.name),
235232 may_fail=True,
236233 )
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime
41 from getpass import getuser
52 import json
63 from os import environ
7 from pipes import quote
4 from shlex import quote
85 from socket import gethostname
96 from time import time
107
11 from .exceptions import NodeLockedException, RemoteException
8 from .exceptions import NodeLockedException, NoSuchNode, RemoteException
129 from .utils import cached_property, tempfile
1310 from .utils.text import (
1411 blue,
2320 from .utils.ui import io
2421
2522
26 HARD_LOCK_PATH = "/tmp/bundlewrap.lock"
27 HARD_LOCK_FILE = HARD_LOCK_PATH + "/info"
28 SOFT_LOCK_PATH = "/tmp/bundlewrap.softlock.d"
29 SOFT_LOCK_FILE = "/tmp/bundlewrap.softlock.d/{id}"
30
31
32 def get_hard_lock_info(node, local_path):
33 try:
34 node.download(HARD_LOCK_FILE, local_path)
35 with open(local_path, 'r') as fp:
36 return json.load(fp)
37 except (RemoteException, ValueError):
38 io.stderr(_(
39 "{x} {node_bold} corrupted hard lock: "
40 "unable to read or parse lock file contents "
41 "(clear it with `bw run {node} 'rm -Rf {path}'`)"
42 ).format(
43 node_bold=bold(node.name),
44 node=node.name,
45 path=HARD_LOCK_PATH,
46 x=red("!"),
47 ))
48 return {}
23 LOCK_BASE = "/var/lib/bundlewrap"
4924
5025
5126 def identity():
5530 ))
5631
5732
58 class NodeLock(object):
33 class NodeLock:
5934 def __init__(self, node, interactive=False, ignore=False):
6035 self.node = node
6136 self.ignore = ignore
6237 self.interactive = interactive
38 self.locking_node = _get_locking_node(node)
6339
6440 def __enter__(self):
65 if self.node.os not in self.node.OS_FAMILY_UNIX:
41 if self.locking_node.os not in self.locking_node.OS_FAMILY_UNIX:
6642 # no locking required/possible
6743 return self
6844 with tempfile() as local_path:
45 self.locking_node.run("mkdir -p " + quote(LOCK_BASE))
6946 if not self.ignore:
7047 with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))):
71 result = self.node.run("mkdir " + quote(HARD_LOCK_PATH), may_fail=True)
48 result = self.locking_node.run("mkdir " + quote(self._hard_lock_dir()), may_fail=True)
7249 if result.return_code != 0:
73 info = get_hard_lock_info(self.node, local_path)
50 info = self._get_hard_lock_info(local_path)
7451 expired = False
7552 try:
7653 d = info['date']
9774
9875 with io.job(_("{node} uploading lock file").format(node=bold(self.node.name))):
9976 if self.ignore:
100 self.node.run("mkdir -p " + quote(HARD_LOCK_PATH))
77 self.locking_node.run("mkdir -p " + quote(self._hard_lock_dir()))
10178 with open(local_path, 'w') as f:
10279 f.write(json.dumps({
10380 'date': time(),
10481 'user': identity(),
10582 }))
106 self.node.upload(local_path, HARD_LOCK_FILE)
83 self.locking_node.upload(local_path, self._hard_lock_file())
10784
10885 return self
10986
11087 def __exit__(self, type, value, traceback):
111 if self.node.os not in self.node.OS_FAMILY_UNIX:
88 if self.locking_node.os not in self.locking_node.OS_FAMILY_UNIX:
11289 # no locking required/possible
11390 return
11491 with io.job(_("{node} removing hard lock").format(node=bold(self.node.name))):
115 result = self.node.run("rm -R {}".format(quote(HARD_LOCK_PATH)), may_fail=True)
92 result = self.locking_node.run("rm -R {}".format(quote(self._hard_lock_dir())), may_fail=True)
11693
11794 if result.return_code != 0:
11895 io.stderr(_("{x} {node} could not release hard lock").format(
11996 node=bold(self.node.name),
12097 x=red("!"),
12198 ))
99
100 def _get_hard_lock_info(self, local_path):
101 try:
102 self.locking_node.download(self._hard_lock_file(), local_path)
103 with open(local_path, 'r') as fp:
104 return json.load(fp)
105 except (RemoteException, ValueError):
106 io.stderr(_(
107 "{x} {node_bold} corrupted hard lock: "
108 "unable to read or parse lock file contents "
109 "(clear it with `bw run {node} 'rm -Rf {path}'`)"
110 ).format(
111 node_bold=bold(self.locking_node.name),
112 node=self.locking_node.name,
113 path=self._hard_lock_dir(),
114 x=red("!"),
115 ))
116 return {}
117
118 def _hard_lock_dir(self):
119 return LOCK_BASE + "/hard-" + quote(self.node.name)
120
121 def _hard_lock_file(self):
122 return self._hard_lock_dir() + "/info"
122123
123124 def _warning_message_hard(self, info):
124125 return wrap_question(
155156 yield lock
156157
157158
159 def _get_locking_node(node):
160 if node.locking_node is not None:
161 try:
162 return node.repo.get_node(node.locking_node)
163 except NoSuchNode:
164 raise Exception("Invalid locking_node {} for {}".format(
165 node.locking_node,
166 node.name,
167 ))
168 else:
169 return node
170
171
172 def _soft_lock_dir(node_name):
173 return LOCK_BASE + "/soft-" + quote(node_name)
174
175
176 def _soft_lock_file(node_name, lock_id):
177 return _soft_lock_dir(node_name) + "/" + lock_id
178
179
158180 def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None):
159 assert node.os in node.OS_FAMILY_UNIX
181 locking_node = _get_locking_node(node)
182 assert locking_node.os in locking_node.OS_FAMILY_UNIX
160183 if "\n" in comment:
161184 raise ValueError(_("Lock comments must not contain any newlines"))
162185 if not item_selectors:
178201 with tempfile() as local_path:
179202 with open(local_path, 'w') as f:
180203 f.write(content + "\n")
181 node.run("mkdir -p " + quote(SOFT_LOCK_PATH))
182 node.upload(local_path, SOFT_LOCK_FILE.format(id=lock_id), mode='0644')
204 locking_node.run("mkdir -p " + quote(_soft_lock_dir(node.name)))
205 locking_node.upload(local_path, _soft_lock_file(node.name, lock_id), mode='0644')
183206
184207 node.repo.hooks.lock_add(node.repo, node, lock_id, item_selectors, expiry_timestamp, comment)
185208
187210
188211
189212 def softlock_list(node):
190 if node.os not in node.OS_FAMILY_UNIX:
213 locking_node = _get_locking_node(node)
214 if locking_node.os not in locking_node.OS_FAMILY_UNIX:
191215 return []
192216 with io.job(_("{} checking soft locks").format(bold(node.name))):
193 cat = node.run("cat {}".format(SOFT_LOCK_FILE.format(id="*")), may_fail=True)
217 cat = locking_node.run("cat {}".format(_soft_lock_file(node.name, "*")), may_fail=True)
194218 if cat.return_code != 0:
195219 return []
196220 result = []
217241
218242
219243 def softlock_remove(node, lock_id):
220 assert node.os in node.OS_FAMILY_UNIX
244 locking_node = _get_locking_node(node)
245 assert locking_node.os in locking_node.OS_FAMILY_UNIX
221246 io.debug(_("removing soft lock {id} from node {node}").format(
222247 id=lock_id,
223248 node=node.name,
224249 ))
225 node.run("rm {}".format(SOFT_LOCK_FILE.format(id=lock_id)))
250 locking_node.run("rm {}".format(_soft_lock_file(node.name, lock_id)))
226251 node.repo.hooks.lock_remove(node.repo, node, lock_id)
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from copy import copy
41 from hashlib import sha1
52 from json import dumps, JSONEncoder
107 from .utils.text import force_text, mark_for_translation as _
118
129
13 try:
14 text_type = unicode
15 byte_type = str
16 except NameError:
17 text_type = str
18 byte_type = bytes
19
20 METADATA_TYPES = (
10 METADATA_TYPES = ( # only meant for natively atomic types
2111 bool,
22 byte_type,
12 bytes,
2313 Fault,
2414 int,
25 text_type,
15 str,
2616 type(None),
2717 )
2818
29 # constants returned as options by metadata processors
30 DONE = 1
31 RUN_ME_AGAIN = 2
32 DEFAULTS = 3
33 OVERWRITE = 4
34
3519
3620 class DoNotRunAgain(Exception):
3721 """
3822 Raised from metadata reactors to indicate they can be disregarded.
3923 """
4024 pass
25
26
27 def deepcopy_metadata(obj):
28 """
29 Our own version of deepcopy.copy that doesn't pickle.
30 """
31 if isinstance(obj, METADATA_TYPES):
32 return obj
33 elif isinstance(obj, dict):
34 if isinstance(obj, ATOMIC_TYPES[dict]):
35 new_obj = atomic({})
36 else:
37 new_obj = {}
38 for key, value in obj.items():
39 new_key = copy(key)
40 new_obj[new_key] = deepcopy_metadata(value)
41 elif isinstance(obj, (list, tuple)):
42 if isinstance(obj, (ATOMIC_TYPES[list], ATOMIC_TYPES[tuple])):
43 new_obj = atomic([])
44 else:
45 new_obj = []
46 for member in obj:
47 new_obj.append(deepcopy_metadata(member))
48 elif isinstance(obj, set):
49 if isinstance(obj, ATOMIC_TYPES[set]):
50 new_obj = atomic(set())
51 else:
52 new_obj = set()
53 for member in obj:
54 new_obj.add(deepcopy_metadata(member))
55 else:
56 assert False # there should be no other types
57 return new_obj
4158
4259
4360 def validate_metadata(metadata, _top_level=True):
4562 raise TypeError(_("metadata must be a dict"))
4663 if isinstance(metadata, dict):
4764 for key, value in metadata.items():
48 if not isinstance(key, text_type):
65 if not isinstance(key, str):
4966 raise TypeError(_("metadata keys must be str, not: {}").format(repr(key)))
5067 validate_metadata(value, _top_level=False)
5168 elif isinstance(metadata, (tuple, list, set)):
7087 return cls(obj)
7188
7289
73 def blame_changed_paths(old_dict, new_dict, blame_dict, blame_name, defaults=False):
74 def is_mergeable(value1, value2):
75 if isinstance(value1, (list, set, tuple)) and isinstance(value2, (list, set, tuple)):
76 return True
77 elif isinstance(value1, dict) and isinstance(value2, dict):
78 return True
79 return False
80
81 new_paths = map_dict_keys(new_dict)
82
83 # clean up removed paths from blame_dict
84 for path in list(blame_dict.keys()):
85 if path not in new_paths:
86 del blame_dict[path]
87
88 for path in new_paths:
89 new_value = value_at_key_path(new_dict, path)
90 try:
91 old_value = value_at_key_path(old_dict, path)
92 except KeyError:
93 blame_dict[path] = (blame_name,)
94 else:
95 if old_value != new_value:
96 if defaults or is_mergeable(old_value, new_value):
97 blame_dict[path] += (blame_name,)
98 else:
99 blame_dict[path] = (blame_name,)
100 return blame_dict
101
102
103 def changes_metadata(existing_metadata, new_metadata):
104 """
105 Returns True if new_metadata contains any keys or values not present
106 in or different from existing_metadata.
107 """
108 for key, new_value in new_metadata.items():
109 if key not in existing_metadata:
110 return True
111 if isinstance(new_value, dict):
112 if not isinstance(existing_metadata[key], dict):
113 return True
114 if changes_metadata(existing_metadata[key], new_value):
115 return True
116 if isinstance(existing_metadata[key], Fault) and isinstance(new_value, Fault):
117 # Always consider Faults as equal. It would arguably be more correct to
118 # always assume them to be different, but that would mean that we could
119 # never do change detection between two dicts of metadata. So we have no
120 # choice but to warn users in docs that Faults will always be considered
121 # equal to one another.
122 continue
123 if new_value != existing_metadata[key]:
124 return True
125 return False
126
127
128 def check_metadata_keys(node):
129 try:
130 basestring
131 except NameError: # Python 2
132 basestring = str
133 for path in map_dict_keys(node.metadata):
134 value = path[-1]
135 if not isinstance(value, basestring):
136 raise TypeError(_("metadata key for {node} at path '{path}' is not a string").format(
137 node=node.name,
138 path="'->'".join(path[:-1]),
139 ))
140
141
142 def check_metadata_processor_result(input_metadata, result, node_name, metadata_processor_name):
143 """
144 Validates the return value of a metadata processor and splits it
145 into metadata and options.
146 """
147 if not isinstance(result, tuple) or not len(result) >= 2:
148 raise ValueError(_(
149 "metadata processor {metaproc} for node {node} did not return "
150 "a tuple of length 2 or greater"
151 ).format(
152 metaproc=metadata_processor_name,
153 node=node_name,
154 ))
155 result_dict, options = result[0], result[1:]
156 if not isinstance(result_dict, dict):
157 raise ValueError(_(
158 "metadata processor {metaproc} for node {node} did not return "
159 "a dict as the first element"
160 ).format(
161 metaproc=metadata_processor_name,
162 node=node_name,
163 ))
164 if (
165 (DEFAULTS in options or OVERWRITE in options) and
166 id(input_metadata) == id(result_dict)
167 ):
168 raise ValueError(_(
169 "metadata processor {metaproc} for node {node} returned original "
170 "metadata dict plus DEFAULTS or OVERWRITE"
171 ).format(
172 metaproc=metadata_processor_name,
173 node=node_name,
174 ))
175 for option in options:
176 if option not in (DEFAULTS, DONE, RUN_ME_AGAIN, OVERWRITE):
177 raise ValueError(_(
178 "metadata processor {metaproc} for node {node} returned an "
179 "invalid option: {opt}"
180 ).format(
181 metaproc=metadata_processor_name,
182 node=node_name,
183 opt=repr(option),
184 ))
185 if DONE in options and RUN_ME_AGAIN in options:
186 raise ValueError(_(
187 "metadata processor {metaproc} for node {node} cannot return both "
188 "DONE and RUN_ME_AGAIN"
189 ).format(
190 metaproc=metadata_processor_name,
191 node=node_name,
192 ))
193 if DONE not in options and RUN_ME_AGAIN not in options:
194 raise ValueError(_(
195 "metadata processor {metaproc} for node {node} must return either "
196 "DONE or RUN_ME_AGAIN"
197 ).format(
198 metaproc=metadata_processor_name,
199 node=node_name,
200 ))
201 if DEFAULTS in options and OVERWRITE in options:
202 raise ValueError(_(
203 "metadata processor {metaproc} for node {node} cannot return both "
204 "DEFAULTS and OVERWRITE"
205 ).format(
206 metaproc=metadata_processor_name,
207 node=node_name,
208 ))
209 return result_dict, options
210
211
212 def check_for_unsolvable_metadata_key_conflicts(node):
90 def check_for_metadata_conflicts(node):
91 check_for_metadata_conflicts_between_groups(node)
92 check_for_metadata_conflicts_between_defaults_and_reactors(node)
93
94
95 def check_for_metadata_conflicts_between_defaults_and_reactors(node):
96 """
97 Finds conflicting metadata keys in bundle defaults and reactors.
98
99 Dicts can be merged with dicts, sets can be merged with sets, but
100 any other combination is a conflict.
101 """
102 TYPE_DICT = 1
103 TYPE_SET = 2
104 TYPE_OTHER = 3
105
106 def paths_with_types(d):
107 for path in map_dict_keys(d):
108 value = value_at_key_path(d, path)
109 if isinstance(value, dict):
110 yield path, TYPE_DICT
111 elif isinstance(value, set):
112 yield path, TYPE_SET
113 else:
114 yield path, TYPE_OTHER
115
116 for prefix in ("metadata_defaults:", "metadata_reactor:"):
117 paths = {}
118 for identifier, layer in node._metadata_stack._layers.items():
119 if identifier.startswith(prefix):
120 for path, current_type in paths_with_types(layer):
121 try:
122 prev_type, prev_identifier = paths[path]
123 except KeyError:
124 paths[path] = current_type, identifier
125 else:
126 if (
127 prev_type == TYPE_DICT
128 and current_type == TYPE_DICT
129 ):
130 pass
131 elif (
132 prev_type == TYPE_SET
133 and current_type == TYPE_SET
134 ):
135 pass
136 else:
137 raise ValueError(_(
138 "{a} and {b} are clashing over this key path: {path}"
139 ).format(
140 a=identifier,
141 b=prev_identifier,
142 path="/".join(path),
143 ))
144
145
146 def check_for_metadata_conflicts_between_groups(node):
213147 """
214148 Finds metadata keys defined by two groups that are not part of a
215149 shared subgroup hierarchy.
279213 for chain in chains:
280214 metadata = {}
281215 for group in chain:
282 metadata = merge_dict(metadata, group.metadata)
216 metadata = merge_dict(metadata, group._attributes.get('metadata', {}))
283217 chain_metadata.append(metadata)
284218
285219 # create a "key path map" for each chain's metadata
316250 )
317251
318252
319 def deepcopy_metadata(obj):
320 """
321 Our own version of deepcopy.copy that doesn't pickle and ensures
322 a limited range of types is used in metadata.
323 """
324 if isinstance(obj, METADATA_TYPES):
325 return obj
326 elif isinstance(obj, dict):
327 if isinstance(obj, ATOMIC_TYPES[dict]):
328 new_obj = atomic({})
329 else:
330 new_obj = {}
331 for key, value in obj.items():
332 if not isinstance(key, METADATA_TYPES):
333 raise ValueError(_("illegal metadata key type: {}").format(repr(key)))
334 new_key = copy(key)
335 new_obj[new_key] = deepcopy_metadata(value)
336 elif isinstance(obj, (list, tuple)):
337 if isinstance(obj, (ATOMIC_TYPES[list], ATOMIC_TYPES[tuple])):
338 new_obj = atomic([])
339 else:
340 new_obj = []
341 for member in obj:
342 new_obj.append(deepcopy_metadata(member))
343 elif isinstance(obj, set):
344 if isinstance(obj, ATOMIC_TYPES[set]):
345 new_obj = atomic(set())
346 else:
347 new_obj = set()
348 for member in obj:
349 new_obj.add(deepcopy_metadata(member))
350 else:
351 raise ValueError(_("illegal metadata value type: {}").format(repr(obj)))
352 return new_obj
353
354
355253 def find_groups_causing_metadata_conflict(node_name, chain1, chain2, keypath):
356254 """
357255 Given two chains (lists of groups), find one group in each chain
358256 that has conflicting metadata with the other for the given key path.
359257 """
360 chain1_metadata = [list(map_dict_keys(group.metadata)) for group in chain1]
361 chain2_metadata = [list(map_dict_keys(group.metadata)) for group in chain2]
258 chain1_metadata = [
259 list(map_dict_keys(group._attributes.get('metadata', {}))) for group in chain1
260 ]
261 chain2_metadata = [
262 list(map_dict_keys(group._attributes.get('metadata', {}))) for group in chain2
263 ]
362264
363265 bad_keypath = None
364266
404306 raise ValueError(_("illegal metadata value type: {}").format(repr(obj)))
405307
406308
407 def metadata_to_json(metadata):
309 def metadata_to_json(metadata, sort_keys=True):
408310 return dumps(
409311 metadata,
410312 cls=MetadataJSONEncoder,
411313 indent=4,
412 sort_keys=True,
314 sort_keys=sort_keys,
413315 )
414316
415317
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime, timedelta
41 from hashlib import md5
5 from os import environ
2 from os import environ, mkdir
3 from os.path import exists, join
64 from threading import Lock
5
6 from tomlkit import dumps as toml_dump, parse as toml_parse
77
88 from . import operations
99 from .bundle import Bundle
1313 find_item,
1414 )
1515 from .exceptions import (
16 DontCache,
1716 GracefulApplyException,
1817 ItemDependencyLoop,
1918 NodeLockedException,
2221 RepositoryError,
2322 SkipNode,
2423 )
25 from .group import GROUP_ATTR_DEFAULTS
24 from .group import GROUP_ATTR_DEFAULTS, GROUP_ATTR_TYPES
2625 from .itemqueue import ItemQueue
2726 from .items import Item
2827 from .lock import NodeLock
2928 from .metadata import hash_metadata
30 from .utils import cached_property, names
31 from .utils.dicts import hash_statedict
32 from .utils.metastack import Metastack
29 from .utils import cached_property, error_context, get_file_contents, names, NO_DEFAULT
30 from .utils.dicts import (
31 dict_to_toml,
32 hash_statedict,
33 set_key_at_path,
34 validate_dict,
35 value_at_key_path,
36 COLLECTION_OF_STRINGS,
37 )
3338 from .utils.text import (
3439 blue,
3540 bold,
3944 green,
4045 mark_for_translation as _,
4146 red,
47 toml_clean,
4248 validate_name,
4349 yellow,
4450 )
4551 from .utils.ui import io
4652
4753
48 class ApplyResult(object):
54 NODE_ATTR_TYPES = GROUP_ATTR_TYPES.copy()
55 NODE_ATTR_TYPES['groups'] = COLLECTION_OF_STRINGS
56 NODE_ATTR_TYPES['hostname'] = str
57
58
59 class ApplyResult:
4960 """
5061 Holds information about an apply run for a node.
5162 """
122133 if formatted_result is not None:
123134 if status_code == Item.STATUS_FAILED:
124135 io.stderr(formatted_result)
136 if item._command_results:
137 io.stderr(format_item_command_results(item._command_results))
138 # free up memory
139 del item._command_results
125140 else:
126141 io.stdout(formatted_result)
127142
268283 parent_groups[group].remove(top_level_group)
269284
270285 return order
286
287
288 def format_item_command_results(results):
289 output = ""
290
291 for i in range(len(results)):
292 stdout = results[i]['result'].stdout_text.strip()
293 stderr = results[i]['result'].stderr_text.strip()
294
295 # show command
296 output += "\n{b}".format(b=red('│'))
297 output += "\n{b} {command} (return code: {code}{no_output})".format(
298 b=red('├─'),
299 command=bold(results[i]['command']),
300 code=bold(results[i]['result'].return_code),
301 no_output='' if stdout or stderr else '; no output'
302 )
303
304 # show output
305 lines = []
306 if stdout or stderr:
307 output += "\n{b}".format(b=red("│ "))
308 if stdout:
309 lines += stdout.strip().split('\n')
310 if stderr:
311 lines += stderr.strip().split('\n')
312
313 for k in range(len(lines)):
314 output += "\n{b} {line}".format(b=red("│ "), line=lines[k])
315
316 output += red("\n╵ ")
317 return output.lstrip('\n')
271318
272319
273320 def format_item_result(result, node, bundle, item, interactive=False, details=None):
318365 )
319366
320367
321 class Node(object):
368 class Node:
322369 OS_FAMILY_BSD = (
323370 'freebsd',
324371 'macos',
358405 if not validate_name(name):
359406 raise RepositoryError(_("'{}' is not a valid node name").format(name))
360407
408 with error_context(node_name=name):
409 validate_dict(attributes, NODE_ATTR_TYPES)
410
361411 self._add_host_keys = environ.get('BW_ADD_HOST_KEYS', False) == "1"
362 self._bundles = attributes.get('bundles', [])
363 self._compiling_metadata = Lock()
364 self._dynamic_group_lock = Lock()
365 self._dynamic_groups_resolved = False # None means we're currently doing it
366 self._groups = set(attributes.get('groups', set()))
367 self._metadata_so_far = {}
368 self._node_metadata = attributes.get('metadata', {})
412 self._attributes = attributes
369413 self._ssh_conn_established = False
370414 self._ssh_first_conn_lock = Lock()
371 self._template_node_name = attributes.get('template_node')
415 self.file_path = attributes.get('file_path')
372416 self.hostname = attributes.get('hostname', name)
373417 self.name = name
374418
383427
384428 @cached_property
385429 def bundles(self):
386 if self._dynamic_group_lock.acquire(False):
387 self._dynamic_group_lock.release()
388 else:
389 raise RepositoryError(_(
390 "node bundles cannot be queried with members_add/remove"
391 ))
392430 with io.job(_("{node} loading bundles").format(node=bold(self.name))):
393 added_bundles = []
394 found_bundles = []
431 bundle_names = set(self._attributes.get('bundles', set()))
432
395433 for group in self.groups:
396 for bundle_name in group.bundle_names:
397 found_bundles.append(bundle_name)
398
399 for bundle_name in found_bundles + list(self._bundles):
400 if bundle_name not in added_bundles:
401 added_bundles.append(bundle_name)
402 try:
403 yield Bundle(self, bundle_name)
404 except NoSuchBundle:
405 raise NoSuchBundle(_(
406 "Node '{node}' wants bundle '{bundle}', but it doesn't exist."
407 ).format(
408 bundle=bundle_name,
409 node=self.name,
410 ))
434 for bundle_name in set(group._attributes.get('bundles', set())):
435 bundle_names.add(bundle_name)
436
437 for bundle_name in bundle_names:
438 try:
439 yield Bundle(self, bundle_name)
440 except NoSuchBundle:
441 raise NoSuchBundle(_(
442 "Node '{node}' wants bundle '{bundle}', but it doesn't exist."
443 ).format(
444 bundle=bundle_name,
445 node=self.name,
446 ))
411447
412448 @cached_property
413449 def cdict(self):
424460 True if this node should be skipped based on the given selector
425461 string (e.g. "node:foo,group:bar").
426462 """
427 components = [c.strip() for c in autoskip_selector.split(",")]
463 components = [c.strip() for c in autoskip_selector]
428464 if "node:{}".format(self.name) in components:
429465 return True
430466 for group in self.groups:
440476 def groups(self):
441477 _groups = set()
442478
443 for group_name in self._groups:
444 _groups.add(self.repo.get_group(group_name))
479 for group_name in set(self._attributes.get('groups', set())):
480 with error_context(node=self.name):
481 _groups.add(self.repo.get_group(group_name))
445482
446483 for group in self.repo.groups:
447 if self in group._static_nodes:
484 if group in _groups:
485 # we're already in this group, no need to check it again
486 continue
487 if self in group._nodes_from_members:
448488 _groups.add(group)
449
450 # lock to avoid infinite recursion when .members_add/remove
451 # use stuff like node.in_group() that in turn calls this function
452 if self._dynamic_group_lock.acquire(False):
453 cache_result = True
454 self._dynamic_groups_resolved = None
455 # first we remove ourselves from all static groups whose
456 # .members_remove matches us
457 for group in list(_groups):
458 if group.members_remove is not None and group.members_remove(self):
459 try:
460 _groups.remove(group)
461 except KeyError:
462 pass
463 # now add all groups whose .members_add (but not .members_remove)
464 # matches us
465 _groups = _groups.union(self._groups_dynamic)
466 self._dynamic_groups_resolved = True
467 self._dynamic_group_lock.release()
468 else:
469 cache_result = False
470
471 # we have to add parent groups at the very end, since we might
472 # have added or removed subgroups thru .members_add/remove
489 continue
490 for pattern in group._member_patterns:
491 if pattern.search(self.name) is not None:
492 _groups.add(group)
493
473494 while True:
474495 # Since we're only looking at *immediate* parent groups,
475496 # we have to keep doing this until we stop adding parent
477498 _original_groups = _groups.copy()
478499 for group in list(_groups):
479500 for parent_group in group.immediate_parent_groups:
480 if cache_result:
481 with self._dynamic_group_lock:
482 self._dynamic_groups_resolved = None
483 if (
484 not parent_group.members_remove or
485 not parent_group.members_remove(self)
486 ):
487 _groups.add(parent_group)
488 self._dynamic_groups_resolved = True
489 else:
490 _groups.add(parent_group)
501 _groups.add(parent_group)
491502 if _groups == _original_groups:
492503 # we didn't add any new parent groups, so we can stop
493504 break
494505
495 if cache_result:
496 return sorted(_groups)
497 else:
498 raise DontCache(sorted(_groups))
499
500 @property
501 def _groups_dynamic(self):
502 """
503 Returns all groups whose members_add matches this node.
504 """
505 _groups = set()
506 for group in self.repo.groups:
507 if group.members_add is not None and group.members_add(self):
508 _groups.add(group)
509 if group.members_remove is not None and group.members_remove(self):
510 try:
511 _groups.remove(group)
512 except KeyError:
513 pass
514506 return _groups
515507
516508 def has_any_bundle(self, bundle_list):
688680 Returns full metadata for a node. MUST NOT be used from inside a
689681 metadata processor. Use .partial_metadata instead.
690682 """
691 if self._dynamic_groups_resolved is None:
692 # return only metadata set directly at the node level if
693 # we're still in the process of figuring out which groups
694 # we belong to
695 return self._node_metadata
696 else:
697 return self.repo._metadata_for_node(self.name, partial=False)
683 return self.repo._metadata_for_node(self.name, partial=False)
698684
699685 @property
700686 def metadata_blame(self):
701687 return self.repo._metadata_for_node(self.name, partial=False, blame=True)
702688
689 @property
690 def _metadata_stack(self):
691 return self.repo._metadata_for_node(self.name, partial=False, stack=True)
692
693 def metadata_get(self, path, default=NO_DEFAULT):
694 if not isinstance(path, (tuple, list)):
695 path = path.split("/")
696 try:
697 return value_at_key_path(self.metadata, path)
698 except KeyError:
699 if default != NO_DEFAULT:
700 return default
701 else:
702 raise
703
703704 def metadata_hash(self):
704705 return hash_metadata(self.metadata)
705706
706707 @property
707708 def metadata_defaults(self):
708 return self._metadata_processors[0]
709
710 @property
711 def _metadata_processors(self):
712 def tuple_with_name(kind, bundle, metadata_processor):
713 return (
714 "{}:{}.{}".format(
715 kind,
716 bundle.name,
717 metadata_processor.__name__,
718 ),
719 metadata_processor,
720 )
721
722 defaults = []
723 reactors = set()
724 classic_metaprocs = set()
725
726709 for bundle in self.bundles:
727 if bundle._metadata_processors[0]:
728 defaults.append((
710 if bundle._metadata_defaults_and_reactors[0]:
711 yield (
729712 "metadata_defaults:{}".format(bundle.name),
730 bundle._metadata_processors[0],
731 ))
732 for reactor in bundle._metadata_processors[1]:
733 reactors.add(tuple_with_name("metadata_reactor", bundle, reactor))
734 for classic_metaproc in bundle._metadata_processors[2]:
735 classic_metaprocs.add(tuple_with_name("metadata_processor", bundle, classic_metaproc))
736
737 return defaults, reactors, classic_metaprocs
713 bundle._metadata_defaults_and_reactors[0],
714 )
738715
739716 @property
740717 def metadata_reactors(self):
741 return self._metadata_processors[1]
718 for bundle in self.bundles:
719 for reactor in bundle._metadata_defaults_and_reactors[1]:
720 yield (
721 "metadata_reactor:{}.{}".format(
722 bundle.name,
723 reactor.__name__,
724 ),
725 reactor,
726 )
742727
743728 @property
744729 def partial_metadata(self):
745730 """
746 Only to be used from inside metadata processors. Can't use the
731 Only to be used from inside metadata reactors. Can't use the
747732 normal .metadata there because it might deadlock when nodes
748733 have interdependent metadata.
749734
750 It's OK for metadata processors to work with partial metadata
735 It's OK for metadata reactors to work with partial metadata
751736 because they will be fed all metadata updates until no more
752 changes are made by any metadata processor.
737 changes are made by any metadata reactor.
753738 """
754
755 partial = self.repo._metadata_for_node(self.name, partial=True)
756
757 # TODO remove this mechanism in bw 4.0, always return Metastacks
758 if self.repo._in_new_metareactor:
759 return Metastack(partial)
760 else:
761 return partial
739 return self.repo._metadata_for_node(self.name, partial=True)
762740
763741 def run(self, command, data_stdin=None, may_fail=False, log_output=False):
764742 assert self.os in self.OS_FAMILY_UNIX
804782 wrapper_outer=self.cmd_wrapper_outer,
805783 )
806784
807 @property
808 def template_node(self):
809 if not self._template_node_name:
810 return None
811 else:
812 target_node = self.repo.get_node(self._template_node_name)
813 if target_node._template_node_name:
814 raise RepositoryError(_(
815 "{template_node} cannot use template_node because {node} uses {template_node} "
816 "as template_node"
817 ).format(node=self.name, template_node=target_node.name))
818 else:
819 return target_node
785 @cached_property
786 def toml(self):
787 if not self.file_path or not self.file_path.endswith(".toml"):
788 raise ValueError(_("node {} not in TOML format").format(self.name))
789 return toml_parse(get_file_contents(self.file_path))
790
791 def toml_save(self):
792 try:
793 toml_doc = self.toml
794 except ValueError:
795 attributes = self._attributes.copy()
796 del attributes['file_path']
797 toml_doc = dict_to_toml(attributes)
798 self.file_path = join(self.repo.path, "nodes", self.name + ".toml")
799 if not exists(join(self.repo.path, "nodes")):
800 mkdir(join(self.repo.path, "nodes"))
801 with open(self.file_path, 'w') as f:
802 f.write(toml_clean(toml_dump(toml_doc)))
803
804 def toml_set(self, path, value):
805 if not isinstance(path, tuple):
806 path = path.split("/")
807 set_key_at_path(self.toml, path, value)
820808
821809 def upload(self, local_path, remote_path, mode=None, owner="", group="", may_fail=False):
822810 assert self.os in self.OS_FAMILY_UNIX
864852 attr_source = "group:{}".format(group.name)
865853 attr_value = getattr(group, attr)
866854
867 if self.template_node:
868 attr_source = "template_node"
869 attr_value = getattr(self.template_node, attr)
870
871855 if getattr(self, "_{}".format(attr)) is not None:
872856 attr_source = "node"
873857 attr_value = getattr(self, "_{}".format(attr))
881865 attr=attr,
882866 source=attr_source,
883867 ))
884 if self._dynamic_groups_resolved:
885 return attr_value
886 else:
887 raise DontCache(attr_value)
888 method.__name__ = str("_group_attr_{}".format(attr)) # required for cached_property
889 # str() for Python 2 compatibility
868 return attr_value
869 method.__name__ = "_group_attr_{}".format(attr) # required for cached_property
890870 return cached_property(method)
891871
892872 for attr, default in GROUP_ATTR_DEFAULTS.items():
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime
4 from pipes import quote
1 from shlex import quote
52 from select import select
63 from shlex import split
74 from subprocess import Popen, PIPE
7168 ))
7269
7370
74 class RunResult(object):
71 class RunResult:
7572 def __init__(self):
7673 self.duration = None
7774 self.return_code = None
+0
-189
bundlewrap/plugins.py less more
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from json import dumps, loads
4 from os import chmod, remove
5 from os.path import exists, join
6 from stat import S_IREAD, S_IRGRP, S_IROTH
7
8 from requests import get
9
10 from .exceptions import NoSuchPlugin, PluginError, PluginLocalConflict
11 from .utils import download, hash_local_file
12 from .utils.text import mark_for_translation as _
13 from .utils.ui import io
14
15
16 BASE_URL = "https://raw.githubusercontent.com/bundlewrap/plugins/master"
17
18
19 class PluginManager(object):
20 def __init__(self, path, base_url=BASE_URL):
21 self.base_url = base_url
22 self.path = path
23 if exists(join(self.path, "plugins.json")):
24 with open(join(self.path, "plugins.json")) as f:
25 self.plugin_db = loads(f.read())
26 else:
27 self.plugin_db = {}
28
29 @property
30 def index(self):
31 return get(
32 "{}/index.json".format(self.base_url)
33 ).json()
34
35 def install(self, plugin, force=False):
36 if plugin in self.plugin_db:
37 raise PluginError(_("plugin '{plugin}' is already installed").format(plugin=plugin))
38
39 manifest = self.manifest_for_plugin(plugin)
40
41 for file in manifest['provides']:
42 target_path = join(self.path, file)
43 if exists(target_path) and not force:
44 raise PluginLocalConflict(_(
45 "cannot install '{plugin}' because it provides "
46 "'{path}' which already exists"
47 ).format(path=target_path, plugin=plugin))
48
49 url = "{}/{}/{}".format(self.base_url, plugin, file)
50 download(url, target_path)
51
52 # make file read-only to discourage users from editing them
53 # which will block future updates of the plugin
54 chmod(target_path, S_IREAD | S_IRGRP | S_IROTH)
55
56 self.record_as_installed(plugin, manifest)
57
58 return manifest
59
60 def list(self):
61 for plugin, info in self.plugin_db.items():
62 yield (plugin, info['version'])
63
64 def local_modifications(self, plugin):
65 try:
66 plugin_data = self.plugin_db[plugin]
67 except KeyError:
68 raise NoSuchPlugin(_(
69 "The plugin '{plugin}' is not installed."
70 ).format(plugin=plugin))
71 local_changes = []
72 for filename, checksum in plugin_data['files'].items():
73 target_path = join(self.path, filename)
74 actual_checksum = hash_local_file(target_path)
75 if actual_checksum != checksum:
76 local_changes.append((
77 target_path,
78 actual_checksum,
79 checksum,
80 ))
81 return local_changes
82
83 def manifest_for_plugin(self, plugin):
84 r = get(
85 "{}/{}/manifest.json".format(self.base_url, plugin)
86 )
87 if r.status_code == 404:
88 raise NoSuchPlugin(plugin)
89 else:
90 return r.json()
91
92 def record_as_installed(self, plugin, manifest):
93 file_hashes = {}
94
95 for file in manifest['provides']:
96 target_path = join(self.path, file)
97 file_hashes[file] = hash_local_file(target_path)
98
99 self.plugin_db[plugin] = {
100 'files': file_hashes,
101 'version': manifest['version'],
102 }
103 self.write_db()
104
105 def remove(self, plugin, force=False):
106 if plugin not in self.plugin_db:
107 raise NoSuchPlugin(_("plugin '{plugin}' is not installed").format(plugin=plugin))
108
109 for file, db_checksum in self.plugin_db[plugin]['files'].items():
110 file_path = join(self.path, file)
111 if not exists(file_path):
112 continue
113
114 current_checksum = hash_local_file(file_path)
115 if db_checksum != current_checksum and not force:
116 io.stderr(_(
117 "not removing '{path}' because it has been modified since installation"
118 ).format(path=file_path))
119 continue
120
121 remove(file_path)
122
123 del self.plugin_db[plugin]
124 self.write_db()
125
126 def search(self, term):
127 term = term.lower()
128 for plugin_name, plugin_data in self.index.items():
129 if term in plugin_name.lower() or term in plugin_data['desc'].lower():
130 yield (plugin_name, plugin_data['desc'])
131
132 def update(self, plugin, check_only=False, force=False):
133 if plugin not in self.plugin_db:
134 raise PluginError(_("plugin '{plugin}' is not installed").format(plugin=plugin))
135
136 # before updating anything, we need to check for local modifications
137 local_changes = self.local_modifications(plugin)
138 if local_changes and not force:
139 files = [path for path, c1, c2 in local_changes]
140 raise PluginLocalConflict(_(
141 "cannot update '{plugin}' because the following files have been modified locally:"
142 "\n{files}"
143 ).format(files="\n".join(files), plugin=plugin))
144
145 manifest = self.manifest_for_plugin(plugin)
146
147 for file in manifest['provides']:
148 file_path = join(self.path, file)
149 if exists(file_path) and file not in self.plugin_db[plugin]['files'] and not force:
150 # new version added a file that already existed locally
151 raise PluginLocalConflict(_(
152 "cannot update '{plugin}' because it would overwrite '{path}'"
153 ).format(path=file, plugin=plugin))
154
155 old_version = self.plugin_db[plugin]['version']
156 new_version = manifest['version']
157
158 if not check_only and old_version != new_version:
159 # actually install files
160 for file in manifest['provides']:
161 target_path = join(self.path, file)
162 url = "{}/{}/{}".format(self.base_url, plugin, file)
163 download(url, target_path)
164
165 # make file read-only to discourage users from editing them
166 # which will block future updates of the plugin
167 chmod(target_path, S_IREAD | S_IRGRP | S_IROTH)
168
169 # check for files that have been removed in the new version
170 for file, db_checksum in self.plugin_db[plugin]['files'].items():
171 if file not in manifest['provides']:
172 file_path = join(self.path, file)
173 current_checksum = hash_local_file(file_path)
174 if db_checksum != current_checksum and not force:
175 io.stderr(_(
176 "not removing '{path}' because it has been modified since installation"
177 ).format(path=file_path))
178 continue
179 remove(file_path)
180
181 self.record_as_installed(plugin, manifest)
182
183 return (old_version, new_version)
184
185 def write_db(self):
186 with open(join(self.path, "plugins.json"), 'w') as f:
187 f.write(dumps(self.plugin_db, indent=4, sort_keys=True))
188 f.write("\n")
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from imp import load_source
0 from importlib.machinery import SourceFileLoader
41 from inspect import isabstract
52 from os import environ, listdir, mkdir, walk
63 from os.path import abspath, dirname, isdir, isfile, join
74 from threading import Lock
85
96 from pkg_resources import DistributionNotFound, require, VersionConflict
7 from tomlkit import parse as parse_toml
108
119 from . import items, VERSION_STRING
1210 from .bundle import FILENAME_BUNDLE
1816 RepositoryError,
1917 )
2018 from .group import Group
21 from .metadata import (
22 blame_changed_paths,
23 changes_metadata,
24 check_metadata_processor_result,
25 deepcopy_metadata,
26 DEFAULTS,
27 DONE,
28 OVERWRITE,
29 DoNotRunAgain,
30 )
19 from .metadata import DoNotRunAgain
3120 from .node import _flatten_group_hierarchy, Node
3221 from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy
33 from .utils import cached_property, get_file_contents, names
22 from .utils import (
23 cached_property,
24 error_context,
25 get_file_contents,
26 names,
27 randomize_order,
28 )
3429 from .utils.scm import get_git_branch, get_git_clean, get_rev
35 from .utils.dicts import hash_statedict, merge_dict
30 from .utils.dicts import hash_statedict
3631 from .utils.metastack import Metastack
3732 from .utils.text import bold, mark_for_translation as _, red, validate_name
3833 from .utils.ui import io, QUIT_EVENT
160155 self.__registered_hooks[filename].append(name)
161156
162157
163 class LibsProxy(object):
158 class LibsProxy:
164159 def __init__(self, path):
165160 self.__module_cache = {}
166161 self.__path = path
172167 filename = attrname + ".py"
173168 filepath = join(self.__path, filename)
174169 try:
175 m = load_source('bundlewrap.repo.libs_{}'.format(attrname), filepath)
170 m = SourceFileLoader(
171 'bundlewrap.repo.libs_{}'.format(attrname),
172 filepath,
173 ).load_module()
176174 except:
177175 io.stderr(_("Exception while trying to load {}:").format(filepath))
178176 raise
180178 return self.__module_cache[attrname]
181179
182180
183 class Repository(object):
181 class Repository:
184182 def __init__(self, repo_path=None):
185183 if repo_path is None:
186184 self.path = "/dev/null"
194192 self.node_dict = {}
195193 self._get_all_attr_code_cache = {}
196194 self._get_all_attr_result_cache = {}
197 self._node_metadata_blame = {}
198195 self._node_metadata_complete = {}
199 self._node_metadata_partial = {}
200 self._node_metadata_static_complete = set()
201196 self._node_metadata_lock = Lock()
202197
203198 if repo_path is not None:
302297
303298 open(join(bundle_dir, FILENAME_BUNDLE), 'a').close()
304299
305 def create_node(self, node_name):
306 """
307 Creates an adhoc node with the given name.
308 """
309 node = Node(node_name)
310 self.add_node(node)
311 return node
312
313300 def get_all_attrs_from_file(self, path, base_env=None):
314301 """
315302 Reads all 'attributes' (if it were a module) from a source file.
325312
326313 if path not in self._get_all_attr_code_cache:
327314 source = get_file_contents(path)
328 self._get_all_attr_code_cache[path] = \
329 compile(source, path, mode='exec')
315 with error_context(path=path):
316 self._get_all_attr_code_cache[path] = \
317 compile(source, path, mode='exec')
330318
331319 code = self._get_all_attr_code_cache[path]
332320 env = base_env.copy()
333 try:
321 with error_context(path=path):
334322 exec(code, env)
335 except:
336 io.stderr("Exception while executing {}".format(path))
337 raise
338323
339324 if not base_env:
340325 self._get_all_attr_result_cache[path] = env
341326
342327 return env
343328
344 def nodes_or_groups_from_file(self, path, attribute):
329 def nodes_or_groups_from_file(self, path, attribute, preexisting):
345330 try:
346331 flat_dict = self.get_all_attrs_from_file(
347332 path,
348333 base_env={
334 attribute: preexisting,
349335 'libs': self.libs,
350336 'repo_path': self.path,
351337 'vault': self.vault,
355341 raise RepositoryError(_(
356342 "{} must define a '{}' variable"
357343 ).format(path, attribute))
344 if not isinstance(flat_dict, dict):
345 raise ValueError(_("'{v}' in '{p}' must be a dict").format(
346 v=attribute,
347 p=path,
348 ))
358349 for name, infodict in flat_dict.items():
350 infodict.setdefault('file_path', path)
359351 yield (name, infodict)
352
353 def nodes_or_groups_from_dir(self, directory):
354 path = join(self.path, directory)
355 if not isdir(path):
356 return
357 for root_dir, _dirs, files in walk(path):
358 for filename in files:
359 filepath = join(root_dir, filename)
360 if not filename.endswith(".toml") or \
361 not isfile(filepath) or \
362 filename.startswith("_"):
363 continue
364 infodict = dict(parse_toml(get_file_contents(filepath)))
365 infodict['file_path'] = filepath
366 yield filename[:-5], infodict
360367
361368 def items_from_dir(self, path):
362369 """
452459 """
453460 return self.nodes_in_all_groups([group_name])
454461
455 def _metadata_for_node(self, node_name, partial=False, blame=False):
462 def _metadata_for_node(self, node_name, partial=False, blame=False, stack=False):
456463 """
457464 Returns full or partial metadata for this node.
458465
459466 Partial metadata may only be requested from inside a metadata
460 processor.
467 reactor.
461468
462469 If necessary, this method will build complete metadata for this
463470 node and all related nodes. Related meaning nodes that this node
464471 depends on in one of its metadata processors.
465472 """
473 if partial:
474 if node_name in self._node_metadata_complete:
475 # We already completed metadata for this node, but partial must
476 # return a Metastack, so we build a single-layered one just for
477 # the interface.
478 metastack = Metastack()
479 metastack._set_layer(
480 "flattened",
481 self._node_metadata_complete[node_name],
482 )
483 return metastack
484 else:
485 # Return the WIP Metastack or an empty one if we didn't start
486 # yet.
487 self._nodes_we_need_metadata_for.add(node_name)
488 return self._metastacks.setdefault(node_name, Metastack())
489
490 if blame or stack:
491 # cannot return cached result here, force rebuild
492 try:
493 del self._node_metadata_complete[node_name]
494 except KeyError:
495 pass
496
466497 try:
467498 return self._node_metadata_complete[node_name]
468499 except KeyError:
469500 pass
470501
471 if partial:
472 self._node_metadata_partial.setdefault(node_name, {})
473 return self._node_metadata_partial[node_name]
502 # Different worker threads might request metadata at the same time.
503 # This creates problems for the following variables:
504 #
505 # self._metastacks
506 # self._nodes_we_need_metadata_for
507 #
508 # Chaos would ensue if we allowed multiple instances of
509 # _build_node_metadata() running in parallel, messing with these
510 # vars. So we use a lock and reset the vars before and after.
474511
475512 with self._node_metadata_lock:
476513 try:
479516 except KeyError:
480517 pass
481518
482 self._node_metadata_partial[node_name] = {}
483 self._build_node_metadata(blame=blame)
484
519 # set up temporary vars
520 self._metastacks = {}
521 self._nodes_we_need_metadata_for = {node_name}
522
523 self._build_node_metadata()
524
525 io.debug("completed metadata for {} nodes".format(
526 len(self._nodes_we_need_metadata_for),
527 ))
485528 # now that we have completed all metadata for this
486529 # node and all related nodes, copy that data over
487530 # to the complete dict
488 self._node_metadata_complete.update(self._node_metadata_partial)
489
490 # reset temporary vars
491 self._node_metadata_partial = {}
492 self._node_metadata_static_complete = set()
531 for node_name in self._nodes_we_need_metadata_for:
532 self._node_metadata_complete[node_name] = \
533 self._metastacks[node_name]._as_dict()
493534
494535 if blame:
495 return self._node_metadata_blame[node_name]
536 blame_result = self._metastacks[node_name]._as_blame()
537 elif stack:
538 stack_result = self._metastacks[node_name]
539
540 # reset temporary vars (this isn't strictly necessary, but might
541 # free up some memory and avoid confusion)
542 self._metastacks = {}
543 self._nodes_we_need_metadata_for = set()
544
545 if blame:
546 return blame_result
547 elif stack:
548 return stack_result
496549 else:
497550 return self._node_metadata_complete[node_name]
498551
499 def _build_node_metadata(self, blame=False):
552 def _build_node_metadata(self):
500553 """
501554 Builds complete metadata for all nodes that appear in
502 self._node_metadata_partial.keys().
503 """
504 # TODO remove this mechanism in bw 4.0
505 self._in_new_metareactor = False
506
507 # these processors have indicated that they do not need to be run again
508 blacklisted_metaprocs = set()
509
555 self._nodes_we_need_metadata_for.
556 """
557 # Prevents us from reassembling static metadata needlessly and
558 # helps us detect nodes pulled into self._nodes_we_need_metadata_for
559 # by node.partial_metadata.
560 nodes_with_completed_static_metadata = set()
561 # these reactors have indicated that they do not need to be run again
562 do_not_run_again = set()
563 # these reactors have raised KeyErrors
510564 keyerrors = {}
511
565 # loop detection
512566 iterations = 0
513 reactors_that_returned_something_in_last_iteration = set()
567 reactors_that_changed_something_in_last_iteration = set()
568
514569 while not QUIT_EVENT.is_set():
515570 iterations += 1
516571 if iterations > MAX_METADATA_ITERATIONS:
517 proclist = ""
518 for node, metaproc in sorted(reactors_that_returned_something_in_last_iteration):
519 proclist += node + " " + metaproc + "\n"
572 reactors = ""
573 for node, reactor in sorted(reactors_that_changed_something_in_last_iteration):
574 reactors += node + " " + reactor + "\n"
520575 raise ValueError(_(
521576 "Infinite loop detected between these metadata reactors:\n"
522 ) + proclist)
577 ) + reactors)
523578
524579 # First, get the static metadata out of the way
525 for node_name in list(self._node_metadata_partial):
580 for node_name in list(self._nodes_we_need_metadata_for):
526581 if QUIT_EVENT.is_set():
527582 break
528583 node = self.get_node(node_name)
529 node_blame = self._node_metadata_blame.setdefault(node_name, {})
530584 # check if static metadata for this node is already done
531 if node_name in self._node_metadata_static_complete:
585 if node_name in nodes_with_completed_static_metadata:
532586 continue
533
534 with io.job(_("{node} building group metadata").format(node=bold(node.name))):
587 self._metastacks[node_name] = Metastack()
588
589 with io.job(_("{node} adding metadata defaults").format(node=bold(node.name))):
590 # randomize order to increase chance of exposing clashing defaults
591 for defaults_name, defaults in randomize_order(node.metadata_defaults):
592 self._metastacks[node_name]._set_layer(
593 defaults_name,
594 defaults,
595 )
596
597 with io.job(_("{node} adding group metadata").format(node=bold(node.name))):
535598 group_order = _flatten_group_hierarchy(node.groups)
536599 for group_name in group_order:
537 new_metadata = merge_dict(
538 self._node_metadata_partial[node.name],
539 self.get_group(group_name).metadata,
600 self._metastacks[node_name]._set_layer(
601 "group:{}".format(group_name),
602 self.get_group(group_name)._attributes.get('metadata', {}),
540603 )
541 if blame:
542 blame_changed_paths(
543 self._node_metadata_partial[node.name],
544 new_metadata,
545 node_blame,
546 "group:{}".format(group_name),
547 )
548 self._node_metadata_partial[node.name] = new_metadata
549
550 with io.job(_("{node} merging node metadata").format(node=bold(node.name))):
551 # deepcopy_metadata is important here because up to this point
552 # different nodes from the same group might still share objects
553 # nested deeply in their metadata. This becomes a problem if we
554 # start messing with these objects in metadata processors. Every
555 # time we would edit one of these objects, the changes would be
556 # shared amongst multiple nodes.
557 for source_node in (node.template_node, node):
558 if not source_node: # template_node might be None
559 continue
560 new_metadata = deepcopy_metadata(merge_dict(
561 self._node_metadata_partial[node.name],
562 source_node._node_metadata,
563 ))
564 if blame:
565 blame_changed_paths(
566 self._node_metadata_partial[node.name],
567 new_metadata,
568 node_blame,
569 "node:{}".format(source_node.name),
570 )
571 self._node_metadata_partial[node.name] = new_metadata
572
573 # At this point, static metadata from groups and nodes has been merged.
574 # Next, we look at defaults from metadata.py.
575
576 for node_name in list(self._node_metadata_partial):
577 # check if static metadata for this node is already done
578 if node_name in self._node_metadata_static_complete:
579 continue
580
581 node_blame = self._node_metadata_blame[node_name]
582 with io.job(_("{node} running metadata defaults").format(node=bold(node.name))):
583 for defaults_name, defaults in node.metadata_defaults:
584 if blame:
585 blame_changed_paths(
586 self._node_metadata_partial[node.name],
587 defaults,
588 node_blame,
589 defaults_name,
590 defaults=True,
591 )
592 self._node_metadata_partial[node.name] = merge_dict(
593 defaults,
594 self._node_metadata_partial[node.name],
595 )
604
605 with io.job(_("{node} adding node metadata").format(node=bold(node.name))):
606 self._metastacks[node_name]._set_layer(
607 "node:{}".format(node_name),
608 node._attributes.get('metadata', {}),
609 )
596610
597611 # This will ensure node/group metadata and defaults are
598612 # skipped over in future iterations.
599 self._node_metadata_static_complete.add(node_name)
600
601 # TODO remove this in 4.0
602 # Now for the interesting part: We run all metadata processors
603 # until none of them return DONE anymore (indicating that they're
604 # just waiting for another metaproc to maybe insert new data,
605 # which isn't happening if none return DONE)
606 metaproc_returned_DONE = False
613 nodes_with_completed_static_metadata.add(node_name)
607614
608615 # Now for the interesting part: We run all metadata reactors
609616 # until none of them return changed metadata anymore.
610 reactor_returned_changed_metadata = False
611 reactors_that_returned_something_in_last_iteration = set()
612
613 for node_name in list(self._node_metadata_partial):
617 any_reactor_returned_changed_metadata = False
618 reactors_that_changed_something_in_last_iteration = set()
619
620 # randomize order to increase chance of exposing unintended
621 # non-deterministic effects of execution order
622 for node_name in randomize_order(self._nodes_we_need_metadata_for):
614623 if QUIT_EVENT.is_set():
615624 break
616625 node = self.get_node(node_name)
617 node_blame = self._node_metadata_blame[node_name]
618626
619627 with io.job(_("{node} running metadata reactors").format(node=bold(node.name))):
620 # TODO remove this mechanism in bw 4.0
621 self._in_new_metareactor = True
622
623 for metadata_reactor_name, metadata_reactor in node.metadata_reactors:
624 if (node_name, metadata_reactor_name) in blacklisted_metaprocs:
628 for reactor_name, reactor in randomize_order(node.metadata_reactors):
629 if (node_name, reactor_name) in do_not_run_again:
625630 continue
626 io.debug(_(
627 "running metadata reactor {metaproc} for node {node}"
628 ).format(
629 metaproc=metadata_reactor_name,
630 node=node.name,
631 ))
632 if blame:
633 # We need to deepcopy here because otherwise we have no chance of
634 # figuring out what changed...
635 input_metadata = deepcopy_metadata(
636 self._node_metadata_partial[node.name]
637 )
638 else:
639 # ...but we can't always do it for performance reasons.
640 input_metadata = self._node_metadata_partial[node.name]
641631 try:
642 stack = Metastack()
643 stack._set_layer("flattened", input_metadata)
644 new_metadata = metadata_reactor(stack)
632 new_metadata = reactor(self._metastacks[node.name])
645633 except KeyError as exc:
646 keyerrors[(node_name, metadata_reactor_name)] = exc
634 keyerrors[(node_name, reactor_name)] = exc
647635 except DoNotRunAgain:
648 blacklisted_metaprocs.add((node_name, metadata_reactor_name))
636 do_not_run_again.add((node_name, reactor_name))
649637 except Exception as exc:
650638 io.stderr(_(
651639 "{x} Exception while executing metadata reactor "
652640 "{metaproc} for node {node}:"
653641 ).format(
654642 x=red("!!!"),
655 metaproc=metadata_reactor_name,
643 metaproc=reactor_name,
656644 node=node.name,
657645 ))
658646 raise exc
659647 else:
660648 # reactor terminated normally, clear any previously stored exception
661649 try:
662 del keyerrors[(node_name, metadata_reactor_name)]
650 del keyerrors[(node_name, reactor_name)]
663651 except KeyError:
664652 pass
665 reactors_that_returned_something_in_last_iteration.add(
666 (node_name, metadata_reactor_name),
667 )
668 if not reactor_returned_changed_metadata:
669 reactor_returned_changed_metadata = changes_metadata(
670 self._node_metadata_partial[node.name],
653
654 try:
655 this_changed = self._metastacks[node_name]._set_layer(
656 reactor_name,
671657 new_metadata,
672658 )
673
674 if blame:
675 blame_changed_paths(
676 self._node_metadata_partial[node.name],
677 new_metadata,
678 node_blame,
679 "metadata_reactor:{}".format(metadata_reactor_name),
659 except TypeError as exc:
660 # TODO catch validation errors better
661 io.stderr(_(
662 "{x} Exception after executing metadata reactor "
663 "{metaproc} for node {node}:"
664 ).format(
665 x=red("!!!"),
666 metaproc=reactor_name,
667 node=node.name,
668 ))
669 raise exc
670 if this_changed:
671 reactors_that_changed_something_in_last_iteration.add(
672 (node_name, reactor_name),
680673 )
681 self._node_metadata_partial[node.name] = merge_dict(
682 self._node_metadata_partial[node.name],
683 new_metadata,
684 )
685
686 # TODO remove this mechanism in bw 4.0
687 self._in_new_metareactor = False
688
689 ### TODO remove this block in 4.0 BEGIN
690 with io.job(_("{node} running metadata processors").format(node=bold(node.name))):
691 for metadata_processor_name, metadata_processor in node._metadata_processors[2]:
692 if (node_name, metadata_processor_name) in blacklisted_metaprocs:
693 continue
694 io.debug(_(
695 "running metadata processor {metaproc} for node {node}"
696 ).format(
697 metaproc=metadata_processor_name,
698 node=node.name,
699 ))
700 if blame:
701 # We need to deepcopy here because otherwise we have no chance of
702 # figuring out what changed...
703 input_metadata = deepcopy_metadata(self._node_metadata_partial[node.name])
704 else:
705 # ...but we can't always do it for performance reasons.
706 input_metadata = self._node_metadata_partial[node.name]
707 try:
708 processed = metadata_processor(input_metadata)
709 except Exception as exc:
710 io.stderr(_(
711 "{x} Exception while executing metadata processor "
712 "{metaproc} for node {node}:"
713 ).format(
714 x=red("!!!"),
715 metaproc=metadata_processor_name,
716 node=node.name,
717 ))
718 raise exc
719 processed_dict, options = check_metadata_processor_result(
720 input_metadata,
721 processed,
722 node.name,
723 metadata_processor_name,
724 )
725 if DONE in options:
726 io.debug(_(
727 "metadata processor {metaproc} for node {node} "
728 "has indicated that it need NOT be run again"
729 ).format(
730 metaproc=metadata_processor_name,
731 node=node.name,
732 ))
733 blacklisted_metaprocs.add((node_name, metadata_processor_name))
734 metaproc_returned_DONE = True
735 else:
736 io.debug(_(
737 "metadata processor {metaproc} for node {node} "
738 "has indicated that it must be run again"
739 ).format(
740 metaproc=metadata_processor_name,
741 node=node.name,
742 ))
743
744 blame_defaults = False
745 if DEFAULTS in options:
746 processed_dict = merge_dict(
747 processed_dict,
748 self._node_metadata_partial[node.name],
749 )
750 blame_defaults = True
751 elif OVERWRITE in options:
752 processed_dict = merge_dict(
753 self._node_metadata_partial[node.name],
754 processed_dict,
755 )
756
757 if blame:
758 blame_changed_paths(
759 self._node_metadata_partial[node.name],
760 processed_dict,
761 node_blame,
762 "metadata_processor:{}".format(metadata_processor_name),
763 defaults=blame_defaults,
764 )
765
766 self._node_metadata_partial[node.name] = processed_dict
767 ### TODO remove this block in 4.0 END
768
769 if not metaproc_returned_DONE and not reactor_returned_changed_metadata:
770 if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()):
674 any_reactor_returned_changed_metadata = True
675
676 if not any_reactor_returned_changed_metadata:
677 if nodes_with_completed_static_metadata != self._nodes_we_need_metadata_for:
771678 # During metadata reactor execution, partial metadata may
772679 # have been requested for nodes we did not previously
773 # consider. Since partial metadata may default to
774 # just an empty dict, we still need to make sure to
775 # generate static metadata for these new nodes, as
776 # that may trigger additional runs of metadata
777 # reactors.
680 # consider. We still need to make sure to generate static
681 # metadata for these new nodes, as that may trigger
682 # additional results from metadata reactors.
778683 continue
779684 else:
685 # Now that we're done, re-sort static metadata to
686 # overrule reactors.
687 for node_name, metastack in self._metastacks.items():
688 for identifier in list(metastack._layers.keys()):
689 if (
690 identifier.startswith("group:") or
691 identifier.startswith("node:")
692 ):
693 metastack._layers[identifier] = metastack._layers.pop(identifier)
780694 break
781695
782696 if keyerrors:
843757 self.bundle_names.append(dir_entry)
844758
845759 # populate groups
760 toml_groups = dict(self.nodes_or_groups_from_dir("groups"))
846761 self.group_dict = {}
847 for group in self.nodes_or_groups_from_file(self.groups_file, 'groups'):
762 for group in self.nodes_or_groups_from_file(self.groups_file, 'groups', toml_groups):
848763 self.add_group(Group(*group))
849764
850765 # populate items
853768 self.item_classes.append(item_class)
854769
855770 # populate nodes
771 toml_nodes = dict(self.nodes_or_groups_from_dir("nodes"))
856772 self.node_dict = {}
857 for node in self.nodes_or_groups_from_file(self.nodes_file, 'nodes'):
773 for node in self.nodes_or_groups_from_file(self.nodes_file, 'nodes', toml_nodes):
858774 self.add_node(Node(*node))
859775
860776 @cached_property
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from base64 import b64encode, urlsafe_b64decode
4 try:
5 from configparser import SafeConfigParser
6 except ImportError: # Python 2
7 from ConfigParser import SafeConfigParser
1 from configparser import ConfigParser
82 import hashlib
93 import hmac
104 from os import environ
6357 yield character
6458
6559
66 class SecretProxy(object):
60 class SecretProxy:
6761 @staticmethod
6862 def random_key():
6963 """
7468 def __init__(self, repo):
7569 self.repo = repo
7670 self.keys = self._load_keys()
77 self._call_log = {}
7871
7972 def _decrypt(self, cryptotext=None, key=None):
8073 """
242235 return random(h.digest())
243236
244237 def _load_keys(self):
245 config = SafeConfigParser()
238 config = ConfigParser()
246239 secrets_file = join(self.repo.path, FILENAME_SECRETS)
247240 try:
248241 config.read(secrets_file)
256249
257250 def decrypt(self, cryptotext, key=None):
258251 return Fault(
252 'bw secrets decrypt',
259253 self._decrypt,
260254 cryptotext=cryptotext,
261255 key=key,
263257
264258 def decrypt_file(self, source_path, key=None):
265259 return Fault(
260 'bw secrets decrypt_file',
266261 self._decrypt_file,
267262 source_path=source_path,
268263 key=key,
270265
271266 def decrypt_file_as_base64(self, source_path, key=None):
272267 return Fault(
268 'bw secrets decrypt_file_as_base64',
273269 self._decrypt_file_as_base64,
274270 source_path=source_path,
275271 key=key,
321317 def human_password_for(
322318 self, identifier, digits=2, key='generate', per_word=3, words=4,
323319 ):
324 self._call_log.setdefault(identifier, 0)
325 self._call_log[identifier] += 1
326 return Fault(
320 return Fault(
321 'bw secrets human_password_for',
327322 self._generate_human_password,
328323 identifier=identifier,
329324 digits=digits,
333328 )
334329
335330 def password_for(self, identifier, key='generate', length=32, symbols=False):
336 self._call_log.setdefault(identifier, 0)
337 self._call_log[identifier] += 1
338 return Fault(
331 return Fault(
332 'bw secrets password_for',
339333 self._generate_password,
340334 identifier=identifier,
341335 key=key,
345339
346340 def random_bytes_as_base64_for(self, identifier, key='generate', length=32):
347341 return Fault(
342 'bw secrets random_bytes_as_base64',
348343 self._generate_random_bytes_as_base64,
349344 identifier=identifier,
350345 key=key,
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from base64 import b64encode
41 from codecs import getwriter
52 from contextlib import contextmanager
74 from inspect import isgenerator
85 from os import chmod, close, makedirs, remove
96 from os.path import dirname, exists
7 from random import shuffle
108 import stat
119 from sys import stderr, stdout
1210 from tempfile import mkstemp
1513
1614 from ..exceptions import DontCache, FaultUnavailable
1715
18 __GETATTR_CODE_CACHE = {}
19 __GETATTR_RESULT_CACHE = {}
20 __GETATTR_NODEFAULT = "very_unlikely_default_value"
21
22
16
17 class NO_DEFAULT: pass
2318 MODE644 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
24
25 try:
26 STDERR_WRITER = getwriter('utf-8')(stderr.buffer)
27 STDOUT_WRITER = getwriter('utf-8')(stdout.buffer)
28 except AttributeError: # Python 2
29 STDERR_WRITER = getwriter('utf-8')(stderr)
30 STDOUT_WRITER = getwriter('utf-8')(stdout)
19 STDERR_WRITER = getwriter('utf-8')(stderr.buffer)
20 STDOUT_WRITER = getwriter('utf-8')(stdout.buffer)
3121
3222
3323 def cached_property(prop):
5343
5444
5545 def download(url, path):
56 if not exists(dirname(path)):
57 makedirs(dirname(path))
58 if exists(path):
59 chmod(path, MODE644)
60 with open(path, 'wb') as f:
61 r = get(url, stream=True)
62 r.raise_for_status()
63 for block in r.iter_content(1024):
64 if not block:
65 break
66 else:
67 f.write(block)
68
69
70 class Fault(object):
46 with error_context(url=url, path=path):
47 if not exists(dirname(path)):
48 makedirs(dirname(path))
49 if exists(path):
50 chmod(path, MODE644)
51 with open(path, 'wb') as f:
52 r = get(url, stream=True)
53 r.raise_for_status()
54 for block in r.iter_content(1024):
55 if not block:
56 break
57 else:
58 f.write(block)
59
60
61 class ErrorContext(Exception):
62 pass
63
64
65 @contextmanager
66 def error_context(**kwargs):
67 """
68 This can be used to provide context for critical exceptions. Since
69 we're processing lots of different dicts, a "KeyError: foo" will
70 often not be helpful, since it's not clear which dict is missing the
71 key.
72
73
74 >>> with error_context(arbitrary_kwarg="helpful hint"):
75 ... {}["foo"]
76 ...
77 Traceback (most recent call last):
78 [...]
79 KeyError: 'foo'
80
81 The above exception was the direct cause of the following exception:
82
83 Traceback (most recent call last):
84 [...]
85 bundlewrap.utils.ErrorContext: ACTUAL EXCEPTION ABOVE
86 {'arbitrary_kwarg': 'helpful hint'}
87
88
89 Careful though: Only use this in places where you don't expect
90 exceptions to occur, since they will indiscriminately be reraised as
91 ErrorContext.
92 """
93 try:
94 yield
95 except Exception as exc:
96 raise ErrorContext("ACTUAL EXCEPTION ABOVE\n" + repr(kwargs)) from exc
97
98
99 class Fault:
71100 """
72101 A proxy object for lazy access to things that may not really be
73102 available at the time of use.
75104 This let's us gracefully skip items that require information that's
76105 currently not available.
77106 """
78 def __init__(self, callback, **kwargs):
107 def __init__(self, fault_identifier, callback, **kwargs):
108 if isinstance(fault_identifier, list):
109 self.id_list = fault_identifier
110 else:
111 self.id_list = [fault_identifier]
112
113 for key, value in sorted(kwargs.items()):
114 self.id_list.append(hash(key))
115 self.id_list.append(hash(value))
116
79117 self._available = None
80118 self._exc = None
81119 self._value = None
97135 if isinstance(other, Fault):
98136 def callback():
99137 return self.value + other.value
100 return Fault(callback)
138 return Fault(self.id_list + other.id_list, callback)
101139 else:
102140 def callback():
103141 return self.value + other
104 return Fault(callback)
142 return Fault(self.id_list + ['raw {}'.format(repr(other))], callback)
143
144 def __eq__(self, other):
145 if not isinstance(other, Fault):
146 return False
147 else:
148 return self.id_list == other.id_list
149
150 def __hash__(self):
151 return hash(tuple(self.id_list))
105152
106153 def __len__(self):
107154 return len(self.value)
115162 def b64encode(self):
116163 def callback():
117164 return b64encode(self.value.encode('UTF-8')).decode('UTF-8')
118 return Fault(callback)
165 return Fault(self.id_list + ['b64encode'], callback)
119166
120167 def format_into(self, format_string):
121168 def callback():
122169 return format_string.format(self.value)
123 return Fault(callback)
170 return Fault(self.id_list + ['format_into ' + format_string], callback)
124171
125172 @property
126173 def is_available(self):
139186 def method(self, *args, **kwargs):
140187 def callback():
141188 return getattr(self.value, method_name)(*args, **kwargs)
142 return Fault(callback)
189 return Fault(self.id_list + [method_name], callback)
143190 return method
144191
145192
157204
158205
159206 def get_file_contents(path):
160 with open(path, 'rb') as f:
161 content = f.read()
207 with error_context(path=path):
208 with open(path, 'rb') as f:
209 content = f.read()
162210 return content
163211
164212
178226 """
179227 for obj in obj_list:
180228 yield obj.name
229
230
231 def randomize_order(obj):
232 if isinstance(obj, dict):
233 result = list(obj.items())
234 else:
235 result = list(obj)
236 shuffle(result)
237 return result
181238
182239
183240 def sha1(data):
189246 return hasher.hexdigest()
190247
191248
192 class SkipList(object):
249 class SkipList:
193250 """
194251 Used to maintain a list of nodes that have already been visited.
195252 """
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
20 from sys import exit
31
42 from ..exceptions import NoSuchGroup, NoSuchItem, NoSuchNode
3937 exit(1)
4038
4139
42 def get_node(repo, node_name, adhoc_nodes=False):
40 def get_node(repo, node_name):
4341 try:
4442 return repo.get_node(node_name)
4543 except NoSuchNode:
46 if adhoc_nodes:
47 return repo.create_node(node_name)
48 else:
49 io.stderr(_("{x} No such node: {node}").format(
50 node=node_name,
51 x=red("!!!"),
52 ))
53 exit(1)
44 io.stderr(_("{x} No such node: {node}").format(
45 node=node_name,
46 x=red("!!!"),
47 ))
48 exit(1)
5449
5550
56 HELP_get_target_nodes = _("""expression to select target nodes, i.e.:
57 "node1,node2,group3,bundle:foo,!bundle:bar,!group:group4,lambda:node.metadata['magic']<3"
58 to select 'node1', 'node2', all nodes in 'group3', all nodes with the
59 bundle 'foo', all nodes without bundle 'bar', all nodes not in 'group4'
60 and all nodes whose 'magic' metadata is less than three (any exceptions
61 in lambda expressions are ignored)
51 HELP_get_target_nodes = _("""expression to select target nodes:
52
53 my_node # to select a single node
54 my_group # all nodes in this group
55 bundle:my_bundle # all nodes with this bundle
56 !bundle:my_bundle # all nodes without this bundle
57 !group:my_group # all nodes not in this group
58 "lambda:node.metadata_get('foo/magic', 47) < 3"
59 # all nodes whose metadata["foo"]["magic"] is less than three
6260 """)
6361
6462
65 def get_target_nodes(repo, target_string, adhoc_nodes=False):
66 targets = []
67 for name in target_string.split(","):
63 def get_target_nodes(repo, target_strings):
64 targets = set()
65 for name in target_strings:
6866 name = name.strip()
6967 if name.startswith("bundle:"):
7068 bundle_name = name.split(":", 1)[1]
7169 for node in repo.nodes:
7270 if bundle_name in names(node.bundles):
73 targets.append(node)
71 targets.add(node)
7472 elif name.startswith("!bundle:"):
7573 bundle_name = name.split(":", 1)[1]
7674 for node in repo.nodes:
7775 if bundle_name not in names(node.bundles):
78 targets.append(node)
76 targets.add(node)
7977 elif name.startswith("!group:"):
8078 group_name = name.split(":", 1)[1]
8179 for node in repo.nodes:
8280 if group_name not in names(node.groups):
83 targets.append(node)
81 targets.add(node)
8482 elif name.startswith("lambda:"):
8583 expression = eval("lambda node: " + name.split(":", 1)[1])
8684 for node in repo.nodes:
87 try:
88 if expression(node):
89 targets.append(node)
90 except:
91 pass
85 if expression(node):
86 targets.add(node)
9287 else:
9388 try:
94 targets.append(repo.get_node(name))
89 targets.add(repo.get_node(name))
9590 except NoSuchNode:
9691 try:
97 targets += list(repo.get_group(name).nodes)
92 group = repo.get_group(name)
9893 except NoSuchGroup:
99 if adhoc_nodes:
100 targets.append(repo.create_node(name))
101 else:
102 io.stderr(_("{x} No such node or group: {name}").format(
103 x=red("!!!"),
104 name=name,
105 ))
106 exit(1)
107 return sorted(set(targets))
94 io.stderr(_("{x} No such node or group: {name}").format(
95 x=red("!!!"),
96 name=name,
97 ))
98 exit(1)
99 else:
100 targets.update(group.nodes)
101 return targets
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from difflib import unified_diff
41 from hashlib import sha1
52 from json import dumps, JSONEncoder
63
4 from tomlkit import document as toml_document
5
76 from . import Fault
87 from .text import bold, green, red
98 from .text import force_text, mark_for_translation as _
109
1110
12 try:
13 text_type = unicode
14 byte_type = str
15 except NameError:
16 text_type = str
17 byte_type = bytes
18
19 try:
20 from types import MappingProxyType
21 except ImportError:
22 # XXX Not available in Python 2, but that's EOL anyway and we're
23 # going to drop support for it very soon. The following at least
24 # creates a new object, so updates to it will not be persistent.
25 MappingProxyType = dict
26
2711 DIFF_MAX_INLINE_LENGTH = 36
2812 DIFF_MAX_LINE_LENGTH = 1024
2913
3014
31 class _Atomic(object):
15 class _Atomic:
3216 """
3317 This and the following related classes are used to mark objects as
3418 non-mergeable for the purposes of merge_dict().
3519 """
3620 pass
3721
38
39 class _AtomicDict(dict, _Atomic):
40 pass
41
42
43 class _AtomicList(list, _Atomic):
44 pass
45
46
47 class _AtomicSet(set, _Atomic):
48 pass
49
50
51 class _AtomicTuple(tuple, _Atomic):
52 pass
22 class _AtomicDict(dict, _Atomic): pass
23 class _AtomicList(list, _Atomic): pass
24 class _AtomicSet(set, _Atomic): pass
25 class _AtomicTuple(tuple, _Atomic): pass
5326
5427
5528 ATOMIC_TYPES = {
5831 set: _AtomicSet,
5932 tuple: _AtomicTuple,
6033 }
34
35
36 def dict_to_toml(dict_obj):
37 toml_doc = toml_document()
38 for key, value in dict_obj.items():
39 if isinstance(value, tuple):
40 toml_doc[key] = list(value)
41 elif isinstance(value, set):
42 toml_doc[key] = sorted(value)
43 elif isinstance(value, dict):
44 toml_doc[key] = dict_to_toml(value)
45 else:
46 toml_doc[key] = value
47 return toml_doc
6148
6249
6350 def diff_keys(sdict1, sdict2):
158145
159146 TYPE_DIFFS = {
160147 bool: diff_value_bool,
161 byte_type: diff_value_text,
148 bytes: diff_value_text,
162149 float: diff_value_int,
163150 int: diff_value_int,
164151 list: diff_value_list,
165152 set: diff_value_list,
166 text_type: diff_value_text,
153 str: diff_value_text,
167154 tuple: diff_value_list,
168155 }
169156
186173 return sorted(obj)
187174 else:
188175 return JSONEncoder.default(self, obj)
189
190
191 def freeze_object(obj):
192 """
193 Returns a read-only version of the given object (if possible).
194 """
195 if isinstance(obj, dict):
196 keys = set(obj.keys())
197 for k in keys:
198 obj[k] = freeze_object(obj[k])
199 return MappingProxyType(obj)
200 elif isinstance(obj, (list, tuple)):
201 result = []
202 for i in obj:
203 result.append(freeze_object(i))
204 return tuple(result)
205 elif isinstance(obj, set):
206 result = set()
207 for i in obj:
208 result.add(freeze_object(i))
209 return frozenset(obj)
210 else:
211 return obj
212176
213177
214178 def hash_statedict(sdict):
355319 )
356320
357321
322 class COLLECTION_OF_STRINGS: pass
323 class TUPLE_OF_INTS: pass
324
325
326 def validate_dict(candidate, schema, required_keys=None):
327 if not isinstance(candidate, dict):
328 raise ValueError(_("not a dict: {}").format(repr(candidate)))
329 for key, value in candidate.items():
330 if key not in schema:
331 raise ValueError(_("illegal key: {}").format(key))
332 allowed_types = schema[key]
333 if allowed_types == COLLECTION_OF_STRINGS:
334 if not isinstance(value, (list, set, tuple)):
335 raise ValueError(_("key '{k}' is {i}, but should be one of: {t}").format(
336 k=key,
337 i=type(value),
338 t=(list, set, tuple),
339 ))
340 for inner_value in value:
341 if not isinstance(inner_value, str):
342 raise ValueError(_("non-string member in '{k}': {v}").format(
343 k=key,
344 v=repr(inner_value),
345 ))
346 elif allowed_types == TUPLE_OF_INTS:
347 if not isinstance(value, tuple):
348 raise ValueError(_("key '{k}' is {i}, but should be a tuple").format(
349 k=key,
350 i=type(value),
351 ))
352 for inner_value in value:
353 if not isinstance(inner_value, int):
354 raise ValueError(_("non-int member in '{k}': {v}").format(
355 k=key,
356 v=repr(inner_value),
357 ))
358 elif not isinstance(value, allowed_types):
359 raise ValueError(_("key '{k}' is {i}, but should be one of: {t}").format(
360 k=key,
361 i=type(value),
362 t=allowed_types,
363 ))
364 for key in required_keys or []:
365 if key not in candidate:
366 raise ValueError(_("missing required key: {}").format(key))
367
368
358369 def validate_statedict(sdict):
359370 """
360371 Raises ValueError if the given statedict is invalid.
362373 if sdict is None:
363374 return
364375 for key, value in sdict.items():
365 if not isinstance(force_text(key), text_type):
376 if not isinstance(force_text(key), str):
366377 raise ValueError(_("non-text statedict key: {}").format(key))
367378
368379 if type(value) not in TYPE_DIFFS and value is not None:
382393 ))
383394
384395
396 def delete_key_at_path(d, path):
397 if len(path) == 1:
398 del d[path[0]]
399 else:
400 delete_key_at_path(d[path[0]], path[1:])
401
402
403 def replace_key_at_path(d, path, new_key):
404 if len(path) == 1:
405 value = d[path[0]]
406 del d[path[0]]
407 d[new_key] = value
408 else:
409 replace_key_at_path(d[path[0]], path[1:], new_key)
410
411
412 def set_key_at_path(d, path, value):
413 if len(path) == 1:
414 d[path[0]] = value
415 else:
416 if path[0] not in d: # setdefault doesn't work with tomlkit
417 d[path[0]] = {}
418 set_key_at_path(d[path[0]], path[1:], value)
419
420
385421 def value_at_key_path(dict_obj, path):
386422 """
387423 Given the list of keys in `path`, recursively traverse `dict_obj`
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from collections import OrderedDict
41 from sys import version_info
52
6 from ..metadata import validate_metadata, value_at_key_path
7 from .dicts import freeze_object, map_dict_keys, merge_dict
8
9
10 _NO_DEFAULT = "<NO METASTACK DEFAULT PROVIDED>"
3 from ..metadata import deepcopy_metadata, validate_metadata, value_at_key_path
4 from . import NO_DEFAULT
5 from .dicts import map_dict_keys, merge_dict
116
127
138 class Metastack:
2520 else:
2621 self._layers = {}
2722
28 def get(self, path, default=_NO_DEFAULT):
23 def get(self, path, default=NO_DEFAULT):
2924 """
3025 Get the value at the given path, merging all layers together.
26
3127 Path may either be string like
28
3229 'foo/bar'
30
3331 accessing the 'bar' key in the dict at the 'foo' key
3432 or a tuple like
33
3534 ('fo/o', 'bar')
35
3636 accessing the 'bar' key in the dict at the 'fo/o' key.
3737 """
3838 if not isinstance(path, (tuple, list)):
5555 result = merge_dict(result, {'data': value})
5656
5757 if undef:
58 if default != _NO_DEFAULT:
58 if default != NO_DEFAULT:
5959 return default
6060 else:
6161 raise KeyError('/'.join(path))
6262 else:
63 return freeze_object(result['data'])
63 return deepcopy_metadata(result['data'])
6464
6565 def _as_dict(self):
6666 final_dict = {}
0 import re
1
20 from . import names
31 from .text import mark_for_translation as _, red
42
3432 items,
3533 cluster=True,
3634 concurrency=True,
37 static=True,
3835 regular=True,
3936 reverse=True,
4037 auto=True,
113110
114111
115112 def plot_group(groups, nodes, show_nodes):
113 groups = sorted(groups)
114 nodes = sorted(nodes)
115
116116 yield "digraph bundlewrap"
117117 yield "{"
118118
132132 yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name)
133133
134134 for group in groups:
135 for subgroup in group.immediate_subgroup_names:
135 for subgroup in sorted(group._attributes.get('subgroups', set())):
136136 yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup)
137 for subgroup in group._subgroup_names_from_patterns:
137 for subgroup in sorted(group._subgroup_names_from_patterns):
138138 yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup)
139139
140140 if show_nodes:
141141 for group in groups:
142 for node in group._nodes_from_members:
143 yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(
144 group.name, node.name)
145
146 for node in group._nodes_from_patterns:
147 yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(
148 group.name, node.name)
149
150142 for node in nodes:
151 if group in node._groups_dynamic:
152 yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format(
143 if group in set(node._attributes.get('groups', set())):
144 yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(
145 node.name, group.name)
146 elif node in group._nodes_from_members:
147 yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(
153148 group.name, node.name)
154
149 else:
150 for pattern in sorted(group._member_patterns):
151 if pattern.search(node.name) is not None:
152 yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(
153 group.name, node.name)
154 break
155155 yield "}"
156156
157157
168168 "fontname=Helvetica]")
169169 yield "edge [arrowhead=vee]"
170170
171 for group in node.groups:
171 for group in sorted(node.groups):
172172 yield "\"{}\" [fontcolor=white,style=filled];".format(group.name)
173173
174174 yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name)
175175
176 for group in node.groups:
177 for subgroup in group.immediate_subgroup_names:
176 for group in sorted(node.groups):
177 for subgroup in sorted(group._attributes.get('subgroups', set())):
178178 if subgroup in names(node.groups):
179 yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup)
180 for pattern in group.immediate_subgroup_patterns:
181 compiled_pattern = re.compile(pattern)
182 for group2 in node.groups:
183 if compiled_pattern.search(group2.name) is not None and group2 != group:
184 yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, group2.name)
185
186 for group in node.groups:
187 if node in group._nodes_from_members:
179 yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(
180 group.name, subgroup)
181 for pattern in sorted(group._immediate_subgroup_patterns):
182 for group2 in sorted(node.groups):
183 if pattern.search(group2.name) is not None and group2 != group:
184 yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(
185 group.name, group2.name)
186
187 if group in node._attributes.get('groups', set()):
188 yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(
189 node.name, group.name)
190 elif node in group._nodes_from_members:
188191 yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(
189192 group.name, node.name)
190 elif node in group._nodes_from_patterns:
191 yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(
192 group.name, node.name)
193 elif group in node._groups_dynamic:
194 yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format(
195 group.name, node.name)
196
193 else:
194 for pattern in sorted(group._member_patterns):
195 if pattern.search(node.name) is not None:
196 yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(
197 group.name, node.name)
197198 yield "}"
198199
199200
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41
52 from . import cached_property
63 from .text import force_text, mark_for_translation as _
3835 return file_stat
3936
4037
41 class PathInfo(object):
38 class PathInfo:
4239 """
4340 Serves as a proxy to get_path_type.
4441 """
5855 @property
5956 def group(self):
6057 return self.stat['group']
61
62 @property
63 def is_binary_file(self):
64 return self.is_file and not self.is_text_file
6558
6659 @property
6760 def is_directory(self):
113106 # contains backslash-escaped characters – we must lstrip() that
114107 return force_text(result.stdout).strip().lstrip("\\").split()[0]
115108
116 @cached_property
117 def sha256(self):
118 if self.node.os == 'macos':
119 result = self.node.run("shasum -a 256 -- {}".format(quote(self.path)))
120 elif self.node.os in self.node.OS_FAMILY_BSD:
121 result = self.node.run("sha256 -q -- {}".format(quote(self.path)))
122 else:
123 result = self.node.run("sha256sum -- {}".format(quote(self.path)))
124 return force_text(result.stdout).strip().split()[0]
125
126109 @property
127110 def size(self):
128111 return self.stat['size']
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from pipes import quote
0 from shlex import quote
41 from subprocess import CalledProcessError, check_output, STDOUT
52
63 from .text import mark_for_translation as _
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os import environ
41
52 from .text import ansi_clean
7875 return result
7976
8077
81 def _border_center(column_widths):
78 def _border_center(column_widths): # FIXME unused?
8279 result = FRAME_CENTER_LEFT
8380 result += FRAME_CENTER_COLUMN_SEPARATOR.join(
8481 [FRAME_COLUMN_FILLER * width for width in column_widths]
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 import platform
41 from subprocess import Popen, PIPE
52
3431
3532 tmpdir.mkdir("data")
3633 tmpdir.mkdir("hooks")
34 tmpdir.mkdir("libs")
3735
3836 groupspy = tmpdir.join("groups.py")
39 groupspy.write("# -*- coding: utf-8 -*-\ngroups = {}\n".format(repr(groups)))
37 groupspy.write("groups = {}\n".format(repr(groups)))
4038
4139 nodespy = tmpdir.join("nodes.py")
42 nodespy.write("# -*- coding: utf-8 -*-\nnodes = {}\n".format(repr(nodes)))
40 nodespy.write("nodes = {}\n".format(repr(nodes)))
4341
4442 secrets = tmpdir.join(FILENAME_SECRETS)
4543 secrets.write("[generate]\nkey = {}\n\n[encrypt]\nkey = {}\n".format(
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import datetime, timedelta
41 from io import BytesIO
52 from os import environ
4441
4542
4643 @ansi_wrapper
47 def inverse(text):
48 return "\033[0m\033[7m{}\033[0m".format(text)
49
50
51 @ansi_wrapper
5244 def italic(text):
5345 return "\033[3m{}\033[0m".format(text)
5446
121113 def force_text(data):
122114 """
123115 Try to return a text aka unicode object from the given data.
124 Also has Python 2/3 compatibility baked in. Oh the humanity.
125116 """
126117 if isinstance(data, bytes):
127118 return data.decode('utf-8', 'replace')
186177 return output
187178
188179
189 class LineBuffer(object):
180 class LineBuffer:
190181 def __init__(self, target):
191182 self.buffer = b""
192183 self.record = BytesIO()
257248 else:
258249 raise ValueError(_("{} is not a valid duration string").format(repr(duration)))
259250 return timedelta(days=days, seconds=seconds)
251
252
253 def toml_clean(s):
254 """
255 Removes duplicate sections from TOML, e.g.:
256
257 [foo] <--- this line will be removed since it's redundant
258 [foo.bar]
259 baz = 1
260 """
261 lines = list(s.splitlines())
262 result = []
263 previous = ""
264 for line in lines.copy():
265 if line.startswith("[") and line.endswith("]"):
266 if line[1:].startswith(previous + "."):
267 result.pop()
268 previous = line[1:-1]
269 else:
270 previous = ""
271 result.append(line)
272 return "\n".join(result) + "\n"
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from contextlib import contextmanager
41 from datetime import datetime
5 from errno import EPIPE
62 import fcntl
73 from functools import wraps
84 from os import _exit, environ, getpid, kill
3632 TTY = STDOUT_WRITER.isatty()
3733
3834
39 if sys.version_info >= (3, 0):
40 broken_pipe_exception = BrokenPipeError
41 else:
42 broken_pipe_exception = IOError
43
44
4535 def add_debug_indicator(f):
4636 @wraps(f)
4737 def wrapped(self, msg, **kwargs):
125115 View the given list of Unicode lines in a pager (e.g. `less`).
126116 """
127117 lines = list(lines)
128 line_width = max([len(ansi_clean(line)) for line in lines])
129 if TTY and line_width > term_width():
130 pager = Popen([environ.get("PAGER", "/usr/bin/less")], stdin=PIPE)
118 if TTY:
119 write_to_stream(STDOUT_WRITER, SHOW_CURSOR)
120 env = environ.copy()
121 env["LESS"] = env.get("LESS", "") + " -FR"
122 pager = Popen(
123 [environ.get("PAGER", "/usr/bin/less")],
124 env=env,
125 stdin=PIPE,
126 )
131127 pager.stdin.write("\n".join(lines).encode('utf-8'))
132128 pager.stdin.close()
133129 pager.communicate()
130 write_to_stream(STDOUT_WRITER, HIDE_CURSOR)
134131 else:
135132 for line in lines:
136133 io.stdout(line)
143140 else:
144141 stream.write(ansi_clean(msg))
145142 stream.flush()
146 except broken_pipe_exception as e:
147 if broken_pipe_exception == IOError:
148 if e.errno != EPIPE:
149 raise
150
151
152 class DrainableStdin(object):
143 except BrokenPipeError:
144 pass
145
146
147 class DrainableStdin:
153148 def get_input(self):
154149 while True:
155150 if QUIT_EVENT.is_set():
162157 termios.tcflush(sys.stdin, termios.TCIFLUSH)
163158
164159
165 class IOManager(object):
160 class IOManager:
166161 """
167162 Threadsafe singleton class that handles all IO.
168163 """
0 bundlewrap (4.0.0-1) unstable; urgency=medium
1
2 bundlewrap 4.0.0 makes several backwards-incompatible changes, and depending
3 on the features you use, you might have to update your repository.
4
5 For full details, the upstream changelog that contains additional
6 information is available at:
7
8 /usr/share/doc/bundlewrap/changelog.gz
9
10 A full guide on how to migrate is available at:
11
12 https://docs.bundlewrap.org/guide/migrate_34/
13
14 -- Jonathan Carter <jcc@debian.org> Sun, 19 Jul 2020 19:22:41 +0200
0 bundlewrap (4.0.0-1) unstable; urgency=medium
1
2 * New upstream release
3 * Add python3-tomlkit to build-dependencies (needed for tests)
4
5 -- Jonathan Carter <jcc@debian.org> Tue, 30 Jun 2020 20:38:26 +0200
6
07 bundlewrap (3.10.0-1) unstable; urgency=medium
18
29 * New upstream release
119119 Step 3: Implement methods
120120 -------------------------
121121
122 You should probably start with `sdict()`. Use `self.node.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object.
122 You should probably start with `sdict()`. Use `self.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object.
123123
124 The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.node.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values.
124 The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values.
125125
126126 `block_concurrent()` must return a list of item types (e.g. `['pkg_apt']`) that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time).
127127
+0
-71
docs/content/guide/dev_plugin.md less more
0 # Writing your own plugins
1
2 [Plugins](../repo/plugins.md) can provide almost any file in a BundleWrap repository: bundles, custom items, hooks, libs, etc.
3
4 Notable exceptions are `nodes.py` and `groups.py`. If your plugin wants to extend those, use a [lib](../repo/libs.md) instead and ask users to add the result of a function call in your lib to their nodes or groups dicts.
5
6 <div class="alert alert-warning">If your plugin depends on other libraries, make sure that it catches ImportErrors in a way that makes it obvious for the user what's missing. Keep in mind that people will often just <code>git pull</code> their repo and not install your plugin themselves.</div>
7
8 <br>
9
10 ## Starting a new plugin
11
12 ### Step 1: Clone the plugins repo
13
14 Create a clone of the [official plugins repo](https://github.com/bundlewrap/plugins) on GitHub.
15
16 ### Step 2: Create a branch
17
18 You should work on a branch specific to your plugin.
19
20 ### Step 3: Copy your plugin files
21
22 Now take the files that make up your plugin and move them into a subfolder of the plugins repo. The subfolder must be named like your plugin.
23
24 ### Step 4: Create required files
25
26 In your plugin subfolder, create a file called `manifest.json` from this template:
27
28 {
29 "desc": "Concise description (keep it somewhere around 80 characters)",
30 "help": "Optional verbose help text to be displayed after installing. May\ninclude\nnewlines.",
31 "provides": [
32 "bundles/example/items.py",
33 "hooks/example.py"
34 ],
35 "version": 1
36 }
37
38 The `provides` section must contain a list of all files provided by your plugin.
39
40 You also have to create an `AUTHORS` file containing your name and email address.
41
42 Last but not least we require a `LICENSE` file with an OSI-approved Free Software license.
43
44 ### Step 5: Update the plugin index
45
46 Run the `update_index.py` script at the root of the plugins repo.
47
48 ### Step 6: Run tests
49
50 Run the `test.py` script at the root of the plugins repo. It will tell you if there is anything wrong with your plugin.
51
52 ### Step 7: Commit
53
54 Commit all changes to your branch
55
56 ### Step 8: Create pull request
57
58 Create a pull request on GitHub to request inclusion of your new plugin in the official repo. Once your branch is merged, your plugin will become available to be installed by `bw repo plugin install yourplugin` and appear on [plugins.bundlewrap.org](http://plugins.bundlewrap.org).
59
60 <br>
61
62 ## Updating an existing plugin
63
64 To release a new version of your plugin:
65
66 * Increase the version number in `manifest.json`
67 * Update the list of provided files in `manifest.json`
68 * If you're updating someone elses plugin, you should get their consent and add your name to `AUTHORS`
69
70 Then just follow the instructions above from step 5 onward.
6262 * `dpkg` (only used with [pkg_apt](../items/pkg_apt.md) items)
6363 * `echo`
6464 * `file`
65 * `find` (only used with [directory purging](../items/directory.md#purge))
65 * `find`
6666 * `grep`
6767 * `groupadd`
6868 * `groupmod`
7777 * `sha1sum`
7878 * `stat`
7979 * `systemctl` (only used with [svc_systemd](../items/svc_systemd.md) items)
80 * `tar` (only used with [git_deploy](../items/git_deploy.md) items)
8081 * `useradd`
8182 * `usermod`
8283
6565 },
6666 }
6767
68 All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `apiVersion`, `Kind`, and `metadata/name` keys, but you can override them if you must.
68 All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `Kind` and `metadata/name` keys, but you can override them if you must.
6969
7070 Alternatively, you can keep your resource definitions in manifest files:
7171
2929 ✓ node1 lock Y1KD removed</code></pre>
3030
3131 Expired locks are automatically and silently purged whenever BundleWrap has the opportunity. Be sure to check out `bw lock add --help` for how to customize expiration time, add a short comment explaining the reason for the lock, or lock only certain items. Using `bw apply` on a soft locked node is not an error and affected items will simply be skipped.
32
33 ## Locking non-UNIX nodes
34
35 Most of the time, BundleWrap assumes that your target system is a UNIX-like operating system. It then stores locks as files in the node's local file system.
36
37 BundleWrap supports managing non-UNIX nodes, too, such as Kubernetes. You can also write your own custom item types to manage hardware. In those situations, BundleWrap has no place to store lock files.
38
39 You can solve this by designating another regular UNIX node as a "locking node":
40
41 <pre><code class="nohighlight">nodes['my.k8s.cluster'] = {
42 'locking_node': 'my.openbsd.box',
43 'os': 'kubernetes',
44 'metadata': {
45 ...
46 },
47 }</code></pre>
48
49 `my.openbsd.box` is the name of another regular node, which must be managed by BundleWrap. You can now use all the usual locking mechanisms when working with `my.k8s.cluster` and its locks will be stored on `my.openbsd.box`. (They will, of course, not conflict with regular locks for `my.openbsd.box`.)
50
51 A locking node can host locks for as many other nodes as you wish.
0 # Migrating from BundleWrap 3.x to 4.x
1
2 As per [semver](http://semver.org), BundleWrap 4.0 breaks compatibility with repositories created for BundleWrap 3.x. This document provides a guide on how to upgrade your repositories to BundleWrap 4.x. Please read the entire document before proceeding.
3
4 <br>
5
6 ## metadata.py
7
8 Metadata processors have been split into defaults and reactors. See [metadata.py](../repo/metadata.py.md) for details.
9
10 Generally speaking, metadata processors that returned `DONE, DEFAULTS` can be turned into defaults.
11
12 @metadata_processor
13 def foo(metadata):
14 return {"bar": 47}
15
16 becomes
17
18 defaults = {
19 "bar": 47,
20 }
21
22 Metadata processors that return `OVERWRITE, RUN_ME_AGAIN` or otherwise depend on other metadata need to be turned into reactors:
23
24 @metadata_processor
25 def foo(metadata):
26 metadata["bar"] = metadata["baz"] + 5
27 return metadata, OVERWRITE, RUN_ME_AGAIN
28
29 becomes
30
31 @metadata_reactor
32 def foo(metadata):
33 return {
34 "bar": metadata.get("baz") + 5,
35 }
36
37 <br>
38
39 ## members_add and members_remove
40
41 These must be replaced by other mechanism, such as the newly-available `groups` attribute on individual nodes. Also note that you can now do `bw apply 'lambda:node.metadata["env"] == "prod"'` so you may no longer have a need to create groups based on metadata.
42
43 <br>
44
45 ## Plugins
46
47 The plugin system has been removed since it saw barely any use. The most popular plugin, the `git_deploy` item is now built into BundleWrap itself.
48
49 rm plugins.json
50 rm items/git_deploy.py
51
52 <br>
53
54 ## Command line argument parsing
55
56 Previously, `bw` used a comma-separated syntax to specify targets for certain actions such as `bw apply`. We now use a space separated style:
57
58 bw apply node1,node2
59
60 becomes
61
62 bw apply node1 node2
63
64 This may appear trivial, but might lead to confusion with people not used to providing multiple multi-value space-separated arguments on the command line.
65
66 bw nodes -a all node1
67
68 becomes
69
70 bw nodes -a all -- node1
71
72 The `--` is necessary so we can tell when the argument list for `-a` ends. Here is another example:
73
74 bw nodes -a hostname,bundles node1,node2
75
76 becomes
77
78 bw nodes -a hostname bundles -- node1 node2
79
80 While a little more verbose, this style let's us use proper shell quoting for argument tokens.
81
82 <br>
83
84 ## Minor changes
85
86 For everything else, please consult the [changelog](https://github.com/bundlewrap/bundlewrap/blob/master/CHANGELOG.md#400).
0 # TOML nodes and groups
1
2 The primary way to define nodes is in [nodes.py](../repo/nodes.py.md). However, BundleWrap also provides a built-in alternative that you can use to define each node in a [TOML](https://github.com/toml-lang/toml) file. Doing this has pros and cons, which is why you can choose which way is best for you.
3
4 *Pros*
5
6 * One file per node
7 * Node files are machine-readable and -writeable
8 * Easier on the eyes for nodes with simple metadata
9
10 *Cons*
11
12 * Does not support [Fault objects](../api/#bundlewraputilsfault)
13 * Does not support [atomic()](../repo/groups.py.md#metadata)
14 * Does not support `None`
15 * Does not support sets or tuples
16 * More difficult to read for long, deeply nested metadata
17
18 <br>
19
20 ## Using TOML nodes
21
22 First, you have to make sure your `nodes.py` doesn't overwrite your TOML nodes. Check if your `nodes.py` overwrites the `nodes` dict:
23
24 nodes = { # bad
25 "my_node": {...},
26 }
27
28 TOML nodes will be added to the `nodes.py` context automatically, so change your `nodes.py` to add to them (or just leave the file empty):
29
30 nodes["my_node"] = { # good
31 ...
32 }
33
34 Now you are all set to create your first TOML node. Create a file called `nodes/nodenamegoeshere.toml`:
35
36 hostname = "tomlnode.example.com"
37 bundles = [
38 "bundle1",
39 "bundle2",
40 ]
41
42 [metadata]
43 foo = "bar"
44
45 [metadata.baz]
46 frob = 47
47
48 And that's it. This node will now be added to your other nodes. You may use subdirectories of `nodes/`, but the node name will always just be the filename minus the ".toml" extension.
49
50 <br>
51
52 ## Converting existing nodes
53
54 This is an easy one line operation:
55
56 bw debug -n nodenamegoeshere -c "node.toml_save()"
57
58 Don't forget to remove the original node though.
59
60 <br>
61
62 ## Editing TOML nodes from Python
63
64 BundleWrap uses [tomlkit](https://github.com/sdispater/tomlkit) internally and exposes a `TOMLDocument` instance as `node.toml` for you to modify:
65
66 $ bw debug -n nodenamegoeshere
67 >>> node.file_path
68 nodes/nodenamegoeshere.toml
69 >>> node.toml['bundles'].append("bundle3")
70 >>> node.toml_save()
71
72 For your convenience, `.toml_set()` is also provided to easily set nested dict values:
73
74 >>> node.toml_set("metadata/foo/bar/baz", 47)
75 >>> node.toml_save()
76
77 This should make it pretty straightforward to make changes to lots of nodes without the headaches of using `sed` or something of that nature to edit Python code in `nodes.py`.
78
79 <br>
80
81 ## TOML groups
82
83 They work exactly the same way as nodes, but have their own `groups/` directory. `.toml`, `.toml_set()` and `toml_save()` are also found on `Group` objects.
2727 <a href="/repo/libs">libs/</a>
2828 <a href="/guide/secrets">.secrets.cfg</a>
2929 <a href="/repo/groups.py">groups.py</a>
30 nodes/
31 <a href="/guide/toml">nodename.toml</a>
3032 <a href="/repo/nodes.py">nodes.py</a>
31 <a href="/repo/plugins">plugins.json</a>
3233 <a href="/repo/requirements.txt">requirements.txt</a>
3334 </div>
0 # Deploying from git
1
2 The `git_deploy` item lets you deploy the *contents* of a git repository to a node - without requiring the node to have access to that repository or exposing the `.git/` directory to the node.
3
4 directories = {
5 # git_deploy will not create this by itself
6 "/var/tmp/example": {},
7 }
8
9 git_deploy = {
10 "/var/tmp/example": {
11 'repo': "example",
12 'rev': "master",
13 'use_xattrs': True,
14 },
15 }
16
17 `git_deploy` items will only upload a tarball with the data from the git repo, no part of the git history is leaked to the node.
18
19 Requires git to be installed on the machine running BundleWrap.
20
21 <br>
22
23 # git_deploy_repos
24
25 Put this in a file called git_deploy_repos in your repository root:
26
27 example: /Users/jdoe/Projects/example
28
29 This file should also be added to your `.gitignore` if you are sharing that repo with a team. Each team member must provide a mapping of the repo name used in the bundle ("example" in this case) to a local filesystem path with a git repository. It is each user's responsibility to make sure the clone in that location is up to date.
30
31 <br>
32
33 # Attribute reference
34
35 See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes)
36
37 <hr>
38
39 ## repo
40
41 The short name of a repo as it appears in `git_deploy_repos`.
42
43 Alternatively, it can point directly to a git URL:
44
45 git_deploy = {
46 "/var/tmp/example": {
47 'repo': "https://github.com/bundlewrap/bundlewrap.git",
48 [...]
49 },
50 }
51
52 Note however that this has a severe performance penalty, as a new clone of that repo has to be made every time the status of the item is checked.
53
54 <br>
55
56 ## rev
57
58 The `rev` attribute can contain anything `git rev-parse` can resolve into a commit hash (branch names, tags, first few characters of full commit hash). Note that you should probably use tags here. *Never* use HEAD (use a branch name like 'master' instead).
59
60 <br>
61
62 ## use_xattrs
63
64 BundleWrap needs to store the deployed commit hash on the node. The `use_xattrs` attribute controls how this is done. If set to `True`, the `attr` command on the node is used to store the hash as an extended file system attribute. Since `attr` might not be installed on the node, the default is to place a dotfile in the target directory instead (keep that in mind when deploying websites etc.).
88 Manage resources in Kubernetes clusters.
99
1010 k8s_namespaces = {
11 "my-app": {},
11 "my-app": {
12 'manifest': {
13 'apiVersion': "v1",
14 },
15 },
1216 "my-previous-app": {'delete': True},
1317 }
1418
2731 ## Resource types
2832
2933 <table>
30 <tr><th>Resource type</th><th>Bundle attribute</th><th>apiVersion</th></tr>
31 <tr><td>Cluster Role</td><td>k8s_clusterroles</td><td>rbac.authorization.k8s.io/v1</td></tr>
32 <tr><td>Cluster Role Binding</td><td>k8s_clusterrolebindings</td><td>rbac.authorization.k8s.io/v1</td></tr>
33 <tr><td>Config Map</td><td>k8s_configmaps</td><td>v1</td></tr>
34 <tr><td>Cron Job</td><td>k8s_cronjobs</td><td>batch/v1beta1</td></tr>
35 <tr><td>Custom Resource Definition</td><td>k8s_crd</td><td>apiextensions.k8s.io/v1</td></tr>
36 <tr><td>Daemon Set</td><td>k8s_daemonsets</td><td>apps/v1</td></tr>
37 <tr><td>Deployment</td><td>k8s_deployments</td><td>apps/v1</td></tr>
38 <tr><td>Ingress</td><td>k8s_ingresses</td><td>networking.k8s.io/v1beta1</td></tr>
39 <tr><td>Namespace</td><td>k8s_namespaces</td><td>v1</td></tr>
40 <tr><td>Network Policy</td><td>k8s_networkpolicies</td><td>networking.k8s.io/v1</td></tr>
41 <tr><td>Persistent Volume Claim</td><td>k8s_pvc</td><td>v1</td></tr>
42 <tr><td>Role</td><td>k8s_roles</td><td>rbac.authorization.k8s.io/v1</td></tr>
43 <tr><td>Role Binding</td><td>k8s_rolebindings</td><td>rbac.authorization.k8s.io/v1</td></tr>
44 <tr><td>Service</td><td>k8s_services</td><td>v1</td></tr>
45 <tr><td>Service Account</td><td>k8s_serviceaccounts</td><td>v1</td></tr>
46 <tr><td>Secret</td><td>k8s_secrets</td><td>v1</td></tr>
47 <tr><td>StatefulSet</td><td>k8s_statefulsets</td><td>apps/v1</td></tr>
48 <tr><td>(any)</td><td>k8s_raw</td><td>(any)</td></tr>
34 <tr><th>Resource type</th><th>Bundle attribute</th></tr>
35 <tr><td>Cluster Role</td><td>k8s_clusterroles</td></tr>
36 <tr><td>Cluster Role Binding</td><td>k8s_clusterrolebindings</td></tr>
37 <tr><td>Config Map</td><td>k8s_configmaps</td></tr>
38 <tr><td>Cron Job</td><td>k8s_cronjobs</td></tr>
39 <tr><td>Custom Resource Definition</td><td>k8s_crd</td></tr>
40 <tr><td>Daemon Set</td><td>k8s_daemonsets</td></tr>
41 <tr><td>Deployment</td><td>k8s_deployments</td></tr>
42 <tr><td>Ingress</td><td>k8s_ingresses</td></tr>
43 <tr><td>Namespace</td><td>k8s_namespaces</td></tr>
44 <tr><td>Network Policy</td><td>k8s_networkpolicies</td></tr>
45 <tr><td>Persistent Volume Claim</td><td>k8s_pvc</td></tr>
46 <tr><td>Role</td><td>k8s_roles</td></tr>
47 <tr><td>Role Binding</td><td>k8s_rolebindings</td></tr>
48 <tr><td>Service</td><td>k8s_services</td></tr>
49 <tr><td>Service Account</td><td>k8s_serviceaccounts</td></tr>
50 <tr><td>Secret</td><td>k8s_secrets</td></tr>
51 <tr><td>StatefulSet</td><td>k8s_statefulsets</td></tr>
52 <tr><td>(any)</td><td>k8s_raw</td></tr>
4953 </table>
5054
5155 You can define [Custom Resources](https://kubernetes.io/docs/concepts/api-extension/custom-resources/) like this:
5357 k8s_crd = {
5458 "custom-thing": {
5559 'manifest': {
60 'apiVersion': "apiextensions.k8s.io/v1beta1",
5661 'spec': {
5762 'names': {
5863 'kind': "CustomThing",
1313
1414 ## Contributing code
1515
16 <div class="alert alert-info">Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat. If your idea is not a good fit for all or most BundleWrap users, it can still be included <a href="../dev_plugins">as a plugin</a>.</div>
16 <div class="alert alert-info">Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat.</div>
1717
1818 Here are the steps:
1919
2222 3. Same goes for documentation.
2323 4. Set up a [virtualenv](http://virtualenv.readthedocs.org/en/latest/) and run `pip install -r requirements.txt`.
2424 5. Make sure you can connect to your localhost via `ssh` without using a password and that you are able to run `sudo`.
25 6. Run `py.test`.
25 6. Run `py.test tests/`.
2626 7. Review and sign the Copyright Assignment Agreement (CAA) by adding your name and email to the `AUTHORS` file. (This step can be skipped if your contribution is too small to be considered intellectual property, e.g. spelling fixes)
2727 8. Open a pull request on [GitHub](https://github.com/bundlewrap/bundlewrap).
2828 9. Feel great. Thank you.
3333
3434 ### Is BundleWrap secure?
3535
36 BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and plugins. We also recommend following commit logs to your repos.
36 BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and code. We also recommend following commit logs to your repos.
3737
3838 <br>
3939
2525 This section is a reference for all possible attributes you can define for a group:
2626
2727 groups = {
28 'group1': {
29 # THIS PART IS EXPLAINED HERE
30 'bundles': ["bundle1", "bundle2"],
31 'members': ["node1"],
32 'members_add': lambda node: node.os == 'debian',
33 'members_remove': lambda node: node.os == 'ubuntu',
34 'member_patterns': [r"^cluster1\."],
35 'metadata': {'foo': "bar"},
36 'os': 'linux',
37 'subgroups': ["group2", "group3"],
38 'subgroup_patterns': [r"^group.*pattern$"],
39 },
28 'group1': {
29 # THIS PART IS EXPLAINED HERE
30 'bundles': ["bundle1", "bundle2"],
31 'members': ["node1"],
32 'member_patterns': [r"^cluster1\."],
33 'metadata': {'foo': "bar"},
34 'os': 'linux',
35 'subgroups': ["group2", "group3"],
36 'subgroup_patterns': [r"^group.*pattern$"],
37 },
4038 }
4139
4240 Note that many attributes from [nodes.py](nodes.py.md) (e.g. `bundles`) may also be set at group level, but aren't explicitly documented here again.
5452 ## members
5553
5654 A tuple or list of node names that belong to this group.
57
58 <br>
59
60 ## members_add and members_remove
61
62 For these attributes you can provide a function that takes a node object as its only argument. The function must return a boolean. The function will be called once for every node in the repo. If `True`, this node will be added (`members_add`) to or removed (`members_remove`) from this group.
63
64 <div class="alert alert-warning">Inside your function you may query node attributes and groups, but you will not see groups or attributes added as a result of a different <code>members_add</code> / <code>members_remove</code> function. Only attributes and groups that have been set statically will be available. You can, however, remove a node with <code>members_remove</code> that you added with <code>members_add</code> (but not vice-versa).<br>You should also avoid using <code>node.metadata</code> here. Since metadata ultimately depends on group memberships, only metadata set in <code>nodes.py</code> will be returned here.</div>
6555
6656 <br>
6757
3838 <tr><td><a href="../../items/action">action</a></td><td><code>actions</code></td><td>Actions allow you to run commands on every <code>bw apply</code></td></tr>
3939 <tr><td><a href="../../items/directory">directory</a></td><td><code>directories</code></td><td>Manages permissions and ownership for directories</td></tr>
4040 <tr><td><a href="../../items/file">file</a></td><td><code>files</code></td><td>Manages contents, permissions, and ownership for files</td></tr>
41 <tr><td><a href="../../items/git_deploy">git_deploy</a></td><td><code>git_deploy</code></td><td>Deploys the contents of a git repository</td></tr>
4142 <tr><td><a href="../../items/group">group</a></td><td><code>groups</code></td><td>Manages groups by wrapping <code>groupadd</code>, <code>groupmod</code> and <code>groupdel</code></td></tr>
4243 <tr><td><a href="../../items/k8s">k8s_*</a></td><td><code>k8s_*</code></td><td>Manages resources in Kubernetes clusters by wrapping <code>kubectl</code></td></tr>
4344 <tr><td><a href="../../items/pkg_apt">pkg_apt</a></td><td><code>pkg_apt</code></td><td>Installs and removes packages with APT</td></tr>
112113
113114 * if you need all items of a certain type to depend on something or
114115 * if you need all items in a bundle to depend on something or
115 * if you need an item in a bundle you can't edit (e.g. because it's provided by a community-maintained [plugin](plugins.md)) to depend on something in your bundles
116 * if you need an item in a bundle you can't edit to depend on something in your bundles
116117
117118 <br>
118119
134135
135136 In this simplified example we save ourselves from duplicating the logic that gets the current MySQL version from metadata (which is probably overkill here, but you might encounter more complex situations).
136137
138 Tags also allow for optional dependencies, since items can depend on tags that don't exist. So for example if you need to do something after items from another bundle have been completed, but that bundle might not always be there, you can depend on a tag given to the items of the other bundle.
139
137140 <br>
138141
139142 ## triggers and triggered
2323 "bar": metadata.get("foo"),
2424 }
2525
26 While this looks simple enough, there are some important caveats. First and foremost: Metadata reactors must assume to be called many times. This is to give you an opportunity to react to metadata provided by other reactors. All reactors will be run again and again until none of them return any changed metadata. Anything you return from a reactor will overwrite existing metadata.
26 While this looks simple enough, there are some important caveats. First and foremost: Metadata reactors must assume to be called many times. This is to give you an opportunity to react to metadata provided by other reactors. All reactors will be run again and again until none of them return any changed metadata. Anything you return from a reactor will overwrite defaults, while metadata from `groups.py` and `nodes.py` will still overwrite metadata from reactors. Collection types like sets and dicts will be merged.
2727
2828 The parameter `metadata` is not a dictionary but an instance of `Metastack`. You cannot modify the contents of this object. It provides `.get("some/path", "default")` to query a key path (equivalent to `metadata["some"]["path"]` in a dict) and accepts an optional default value. It will raise a `KeyError` when called for a non-existant path without a default.
2929
3030 While node and group metadata and metadata defaults will always be available to reactors, you should not rely on that for the simple reason that you may one day move some metadata from those static sources into another reactor, which may be run later. Thus you may need to wait for some iterations before that data shows up in `metadata`. Note that BundleWrap will catch any `KeyError`s raised in metadata reactors and only report them if they don't go away after all other relevant reactors are done.
3131
3232 To avoid deadlocks when accessing *other* nodes' metadata from within a metadata reactor, use `other_node.partial_metadata` instead of `other_node.metadata`. For the same reason, always use the `metadata` parameter to access the current node's metadata, never `node.metadata`.
33
34 <div class="alert alert-danger">Be careful when returning <a href="../../guide/api#bundlewraputilsfault">Fault</a> objects from reactors. <strong>All</strong> Fault objects (including those returned from <code>repo.vault.*</code>) will be considered <strong>equal</strong> to one another when BundleWrap inspects the returned metadata to check if anything changed compared to what was returned in an earlier iteration.</div>
3533
3634
3735 ### DoNotRunAgain
5050 nodes['node-1'] = {
5151 'hostname': "node-1.example.com",
5252 }
53
54 Alternatively, consider using [TOML nodes](../guide/toml.md).
5355
5456 <br>
5557
136138
137139 <br>
138140
139 ### template_node
140
141 Copy all attributes and merge all metadata from this node. This is useful for temporary clones of single specific nodes, where you don't want to create a group to deduplicate all the node-level configuration.
142
143 Cannot be set at group level.
144
145 <br>
146
147141 ## OS compatibility overrides
148142
149143 ### cmd_wrapper_outer
+0
-39
docs/content/repo/plugins.md less more
0 # Plugins
1
2 The plugin system in BundleWrap is an easy way of integrating third-party code into your repository.
3
4 <div class="alert alert-warning">While plugins are subject to some superficial code review by BundleWrap developers before being accepted, we cannot make any guarantees as to the quality and trustworthiness of plugins. Always do your due diligence before running third-party code.</div>
5
6 <br>
7
8 ## Finding plugins
9
10 It's as easy as `bw repo plugin search <term>`. Or you can browse [plugins.bundlewrap.org](http://plugins.bundlewrap.org).
11
12 <br>
13
14 ## Installing plugins
15
16 You probably guessed it: `bw repo plugin install <plugin>`
17
18 Installing the first plugin in your repo will create a file called `plugins.json`. You should commit this file (and any files installed by the plugin of course) to version control.
19
20 <div class="alert alert-info">Avoid editing files provided by plugins at all costs. Local modifications will prevent future updates to the plugin.</div>
21
22 <br>
23
24 ## Updating plugins
25
26 You can update all installed plugins with this command: `bw repo plugin update`
27
28 <br>
29
30 ## Removing a plugin
31
32 `bw repo plugin remove <plugin>`
33
34 <br>
35
36 ## Writing your own
37
38 See the [guide on publishing your own plugins](../guide/dev_plugin.md).
1818 - Locking: guide/locks.md
1919 - Kubernetes: guide/kubernetes.md
2020 - Custom items: guide/dev_item.md
21 - Writing plugins: guide/dev_plugin.md
2221 - Python API: guide/api.md
2322 - OS compatibility: guide/os_compatibility.md
23 - TOML nodes and groups: guide/toml.md
2424 - Migrating to 2.0: guide/migrate_12.md
2525 - Migrating to 3.0: guide/migrate_23.md
26 - Migrating to 4.0: guide/migrate_34.md
2627 - Repository:
2728 - Overview: repo/layout.md
2829 - nodes.py: repo/nodes.py.md
3233 - bundles/.../metadata.py: repo/metadata.py.md
3334 - hooks/: repo/hooks.md
3435 - libs/: repo/libs.md
35 - Plugins: repo/plugins.md
3636 - Items:
3737 - action: items/action.md
3838 - directory: items/directory.md
3939 - file: items/file.md
40 - git_deploy: items/git_deploy.md
4041 - group: items/group.md
4142 - k8s_*: items/k8s.md
4243 - pkg_apt: items/pkg_apt.md
55 python_files=*.py
66 python_classes=Test
77 python_functions=test_*
8
9 [bdist_wheel]
10 universal = 1
0 from sys import version_info
1
20 from setuptools import find_packages, setup
31
42
5 dependencies = [
6 "cryptography",
7 "Jinja2",
8 "Mako",
9 "passlib",
10 "pyyaml",
11 "requests >= 1.0.0",
12 "six",
13 ]
14 if version_info < (3, 2, 0):
15 dependencies.append("futures")
16
173 setup(
184 name="bundlewrap",
19 version="3.10.0",
5 version="4.0.0",
206 description="Config management with Python",
217 long_description=(
228 "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n"
4127 "Natural Language :: English",
4228 "Operating System :: POSIX :: Linux",
4329 "Programming Language :: Python",
44 "Programming Language :: Python :: 2.7",
45 "Programming Language :: Python :: 3.4",
46 "Programming Language :: Python :: 3.5",
4730 "Programming Language :: Python :: 3.6",
4831 "Programming Language :: Python :: 3.7",
4932 "Programming Language :: Python :: 3.8",
5033 "Topic :: System :: Installation/Setup",
5134 "Topic :: System :: Systems Administration",
5235 ],
53 install_requires=dependencies,
54 extras_require={ # used for wheels
55 ':python_version=="2.7"': ["futures"],
56 },
36 install_requires=[
37 "cryptography",
38 "Jinja2",
39 "Mako",
40 "passlib",
41 "pyyaml",
42 "requests >= 1.0.0",
43 "tomlkit",
44 ],
5745 zip_safe=False,
5846 )
+0
-57
tests/integration/bw_adhoc_nodes.py less more
0 from os.path import exists, join
1
2 from bundlewrap.utils.testing import host_os, make_repo, run
3
4
5 def test_apply(tmpdir):
6 make_repo(
7 tmpdir,
8 bundles={
9 "bundle1": {
10 'files': {
11 join(str(tmpdir), "test"): {
12 'content': "test",
13 },
14 },
15 },
16 },
17 groups={
18 "adhoc-localhost": {
19 'bundles': ["bundle1"],
20 'member_patterns': ["localhost"],
21 'os': host_os(),
22 },
23 },
24 )
25
26 assert not exists(join(str(tmpdir), "test"))
27 stdout, stderr, rcode = run("bw -A apply localhost", path=str(tmpdir))
28 assert rcode == 0
29 assert exists(join(str(tmpdir), "test"))
30
31
32 def test_apply_fail(tmpdir):
33 make_repo(
34 tmpdir,
35 bundles={
36 "bundle1": {
37 'files': {
38 join(str(tmpdir), "test"): {
39 'content': "test",
40 },
41 },
42 },
43 },
44 groups={
45 "adhoc-localhost": {
46 'bundles': ["bundle1"],
47 'member_patterns': ["localhost"],
48 'os': host_os(),
49 },
50 },
51 )
52
53 assert not exists(join(str(tmpdir), "test"))
54 stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir))
55 assert rcode == 1
56 assert not exists(join(str(tmpdir), "test"))
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from bundlewrap.utils.testing import host_os, make_repo, run
41
52
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import exists, join
41
52 from bundlewrap.utils.testing import host_os, make_repo, run
3633 },
3734 )
3835
39 run("bw apply -o bundle:test localhost", path=str(tmpdir))
36 run("bw apply -o bundle:test -- localhost", path=str(tmpdir))
4037 assert exists(join(str(tmpdir), "foo"))
4138 assert exists(join(str(tmpdir), "bar"))
4239 assert not exists(join(str(tmpdir), "baz"))
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import exists, join
41
52 from bundlewrap.utils.testing import host_os, make_repo, run
2421 },
2522 },
2623 )
27 result = run("bw apply --skip bundle:test localhost", path=str(tmpdir))
24 result = run("bw apply --skip bundle:test -- localhost", path=str(tmpdir))
2825 assert result[2] == 0
2926 assert not exists(join(str(tmpdir), "foo"))
3027
4441 nodes={
4542 "localhost": {
4643 'bundles': ["test"],
44 'groups': {"foo"},
4745 'os': host_os(),
4846 },
4947 },
5048 groups={
51 "foo": {'members': ["localhost"]},
49 "foo": {},
5250 },
5351 )
54 result = run("bw apply --skip group:foo localhost", path=str(tmpdir))
52 result = run("bw apply --skip group:foo -- localhost", path=str(tmpdir))
5553 assert result[2] == 0
5654 assert not exists(join(str(tmpdir), "foo"))
5755
7573 },
7674 },
7775 )
78 result = run("bw apply --skip file:{} localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir))
76 result = run("bw apply --skip file:{} -- localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir))
7977 assert result[2] == 0
8078 assert not exists(join(str(tmpdir), "foo"))
8179
9997 },
10098 },
10199 )
102 result = run("bw apply --skip node:localhost localhost", path=str(tmpdir))
100 result = run("bw apply --skip node:localhost -- localhost", path=str(tmpdir))
103101 assert result[2] == 0
104102 assert not exists(join(str(tmpdir), "foo"))
105103
124122 },
125123 },
126124 )
127 result = run("bw apply --skip tag:nope localhost", path=str(tmpdir))
125 result = run("bw apply --skip tag:nope -- localhost", path=str(tmpdir))
128126 assert result[2] == 0
129127 assert not exists(join(str(tmpdir), "foo"))
130128
148146 },
149147 },
150148 )
151 result = run("bw apply --skip file: localhost", path=str(tmpdir))
149 result = run("bw apply --skip file: -- localhost", path=str(tmpdir))
152150 assert result[2] == 0
153151 assert not exists(join(str(tmpdir), "foo"))
154152
178176 },
179177 },
180178 )
181 result = run("bw apply --skip tag:nope localhost", path=str(tmpdir))
179 result = run("bw apply --skip tag:nope -- localhost", path=str(tmpdir))
182180 assert result[2] == 0
183181 assert not exists(join(str(tmpdir), "foo"))
184182 assert not exists(join(str(tmpdir), "bar"))
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os import mkdir
41 from os.path import exists, join
52
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from base64 import b64encode
41 from os.path import exists, join
52
215212 with open(join(str(tmpdir), "foo"), 'rb') as f:
216213 content = f.read()
217214 assert content == b"${node.name}"
215
216
217 def test_fault_content_unavailable_skipped(tmpdir):
218 make_repo(
219 tmpdir,
220 bundles={
221 "test": {},
222 },
223 nodes={
224 "localhost": {
225 'bundles': ["test"],
226 'os': host_os(),
227 },
228 },
229 )
230 with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f:
231 f.write("""
232 files = {
233 "/tmp/bw_test_faultunavailable": {
234 'content': repo.vault.password_for("fault", key="missing"),
235 },
236 }
237 """)
238 stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir))
239 assert rcode == 0
240 assert b"file:/tmp/bw_test_faultunavailable skipped (Fault unavailable)" in stdout
241 assert not exists("/tmp/bw_test_faultunavailable")
0 from os.path import exists, join
1
2 from bundlewrap.utils.testing import host_os, make_repo, run
3
4
5 def test_deploy_from_url(tmpdir):
6 make_repo(
7 tmpdir,
8 bundles={
9 "test": {
10 'git_deploy': {
11 join(str(tmpdir), "git_deployed_bw"): {
12 'repo': "https://github.com/bundlewrap/bundlewrap.git",
13 'rev': "master",
14 },
15 },
16 'directories': {
17 join(str(tmpdir), "git_deployed_bw"): {},
18 },
19 },
20 },
21 nodes={
22 "localhost": {
23 'bundles': ["test"],
24 'os': host_os(),
25 },
26 },
27 )
28
29 assert not exists(join(str(tmpdir), "git_deployed_bw", "LICENSE"))
30 stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir))
31 assert rcode == 0
32 assert exists(join(str(tmpdir), "git_deployed_bw", "LICENSE"))
33 assert not exists(join(str(tmpdir), "git_deployed_bw", ".git"))
34
35
36 def test_cannot_deploy_into_purged(tmpdir):
37 make_repo(
38 tmpdir,
39 bundles={
40 "test": {
41 'git_deploy': {
42 join(str(tmpdir), "git_deployed_bw"): {
43 'repo': "https://github.com/bundlewrap/bundlewrap.git",
44 'rev': "master",
45 },
46 },
47 'directories': {
48 join(str(tmpdir), "git_deployed_bw"): {
49 'purge': True,
50 },
51 },
52 },
53 },
54 nodes={
55 "localhost": {
56 'bundles': ["test"],
57 'os': host_os(),
58 },
59 },
60 )
61
62 stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir))
63 assert rcode == 1
64 assert b"cannot git_deploy into purged directory" in stderr
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from json import loads
41 from os import environ
52
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
20 from os.path import exists, join
31
42 from bundlewrap.utils.testing import host_os, make_repo, run
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import exists, join
41
52 from bundlewrap.utils.testing import host_os, make_repo, run
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os import mkdir, readlink, symlink
41 from os.path import join
52
88 "node2": {'metadata': {"key": "value2"}},
99 },
1010 )
11 stdout, stderr, rcode = run("bw diff -m node1,node2", path=str(tmpdir))
11 stdout, stderr, rcode = run("bw diff -m node1 node2", path=str(tmpdir))
1212 assert b"value1" in stdout
1313 assert b"value2" in stdout
1414 assert stderr == b""
3939 },
4040 },
4141 )
42 stdout, stderr, rcode = run("bw diff -i file:/tmp/test node1,node2", path=str(tmpdir))
42 stdout, stderr, rcode = run("bw diff -i file:/tmp/test -- node1 node2", path=str(tmpdir))
4343 assert b"one" in stdout
4444 assert b"two" in stdout
4545 assert stderr == b""
7777 },
7878 },
7979 )
80 stdout, stderr, rcode = run("bw diff node1,node2", path=str(tmpdir))
80 stdout, stderr, rcode = run("bw diff node1 node2", path=str(tmpdir))
8181 assert b"/tmp/foo" in stdout
8282 assert b"/tmp/bar" not in stdout
8383 assert stderr == b""
0 from json import loads
1 from os.path import join
0 from bundlewrap.utils.testing import make_repo, run
21
3 from bundlewrap.utils.testing import make_repo, run
2
3 def test_group_members(tmpdir):
4 make_repo(
5 tmpdir,
6 nodes={
7 "node1": {},
8 "node2": {},
9 "node3": {},
10 },
11 groups={
12 "group1": {},
13 "group2": {
14 'members': {"node2"},
15 },
16 "group3": {
17 'members': {"node2", "node3"},
18 },
19 },
20 )
21 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1 group2 group3 -a nodes", path=str(tmpdir))
22 assert stdout == b"""group1\t
23 group2\tnode2
24 group3\tnode2,node3
25 """
26 assert stderr == b""
27 assert rcode == 0
428
529
630 def test_group_members_at_node(tmpdir):
1337 },
1438 groups={
1539 "group1": {},
16 "group2": {
17 'members': ["node2"],
18 },
19 "group3": {
20 'members': ["node3"],
21 },
40 "group2": {},
41 "group3": {},
2242 },
2343 )
24 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir))
44 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1 group2 group3 -a nodes", path=str(tmpdir))
2545 assert stdout == b"""group1\tnode1,node2
26 group2\tnode1,node2
27 group3\tnode3
46 group2\tnode1
47 group3\t
2848 """
2949 assert stderr == b""
3050 assert rcode == 0
31
32
33 def test_group_members_add(tmpdir):
34 make_repo(
35 tmpdir,
36 nodes={
37 "node1": {'os': 'centos'},
38 "node2": {'os': 'debian'},
39 "node3": {'os': 'ubuntu'},
40 },
41 )
42 with open(join(str(tmpdir), "groups.py"), 'w') as f:
43 f.write("""
44 groups = {
45 "group1": {
46 'members_add': lambda node: node.os == 'centos',
47 },
48 "group2": {
49 'members': ["node2"],
50 'members_add': lambda node: node.os != 'centos',
51 },
52 "group3": {
53 'members_add': lambda node: not node.in_group("group2"),
54 },
55 "group4": {
56 'members': ["node3"],
57 },
58 }
59 """)
60 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir))
61 assert stdout == b"""group1\tnode1
62 group2\tnode2,node3
63 group3\tnode1,node3
64 group4\tnode3
65 """
66 assert stderr == b""
67 assert rcode == 0
68
69
70 def test_group_members_remove(tmpdir):
71 make_repo(
72 tmpdir,
73 nodes={
74 "node1": {'os': 'centos'},
75 "node2": {'os': 'debian'},
76 "node3": {'os': 'ubuntu'},
77 "node4": {'os': 'ubuntu'},
78 },
79 )
80 with open(join(str(tmpdir), "groups.py"), 'w') as f:
81 f.write("""
82 groups = {
83 "group1": {
84 'members_add': lambda node: node.os == 'ubuntu',
85 },
86 "group2": {
87 'members_add': lambda node: node.os == 'ubuntu',
88 'members_remove': lambda node: node.name == "node3",
89 },
90 "group3": {
91 'members_add': lambda node: not node.in_group("group3"),
92 },
93 "group4": {
94 'subgroups': ["group3"],
95 'members_remove': lambda node: node.os == 'debian',
96 },
97 }
98 """)
99 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3,group4 nodes", path=str(tmpdir))
100 assert stdout == b"""group1\tnode3,node4
101 group2\tnode4
102 group3\tnode1,node2,node3,node4
103 group4\tnode1,node3,node4
104 """
105 assert stderr == b""
106 assert rcode == 0
107
108
109 def test_group_members_partial_metadata(tmpdir):
110 make_repo(
111 tmpdir,
112 nodes={
113 "node1": {
114 'metadata': {'foo': 1},
115 },
116 "node2": {},
117 },
118 )
119 with open(join(str(tmpdir), "groups.py"), 'w') as f:
120 f.write("""
121 groups = {
122 "group1": {
123 'members_add': lambda node: node.metadata.get('foo') == 1,
124 },
125 "group2": {
126 'members': ["node2"],
127 'metadata': {'foo': 1},
128 },
129 }
130 """)
131 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2 nodes", path=str(tmpdir))
132 assert stdout == b"""group1\tnode1
133 group2\tnode2
134 """
135 assert stderr == b""
136 assert rcode == 0
137
138
139 def test_group_members_remove_based_on_metadata(tmpdir):
140 make_repo(
141 tmpdir,
142 nodes={
143 "node1": {
144 'metadata': {'remove': False},
145 },
146 "node2": {},
147 },
148 )
149 with open(join(str(tmpdir), "groups.py"), 'w') as f:
150 f.write("""
151 groups = {
152 "group1": {
153 'members_add': lambda node: not node.metadata.get('remove', False),
154 'members_remove': lambda node: node.metadata.get('remove', False),
155 },
156 "group2": {
157 'members': ["node2"],
158 'metadata': {'remove': True},
159 },
160 "group3": {
161 'subgroups': ["group1"],
162 'members_remove': lambda node: node.name.endswith("1") and node.metadata.get('redherring', True),
163 },
164 }
165 """)
166 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i group1,group2,group3 nodes", path=str(tmpdir))
167 assert stdout == b"""group1\tnode1,node2
168 group2\tnode2
169 group3\tnode2
170 """
171 assert stderr == b""
172 assert rcode == 0
173
174 # make sure there is no metadata deadlock
175 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
176 assert loads(stdout.decode('utf-8')) == {'remove': False}
177 assert stderr == b""
178 assert rcode == 0
179
180
181 def test_group_members_removed_from_supergroup(tmpdir):
182 make_repo(
183 tmpdir,
184 nodes={
185 'node_in_group': {
186 'hostname': "localhost",
187 },
188 'node_NOT_in_group': {
189 'hostname': "localhost",
190 'metadata': {
191 'remove_from_group': True,
192 },
193 },
194 },
195 )
196 with open(join(str(tmpdir), "groups.py"), 'w') as f:
197 f.write("""
198 groups = {
199 'super_group': {
200 'subgroups': ['intermediate_group'],
201 },
202 'intermediate_group': {
203 'members_remove': lambda node: node.metadata.get('remove_from_group', False),
204 'subgroups': ['inner_group'],
205 },
206 'inner_group': {
207 'member_patterns': (
208 r".*",
209 ),
210 },
211 }
212 """)
213 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw groups -i inner_group,intermediate_group,intermediate_group nodes", path=str(tmpdir))
214 assert stdout == b"""inner_group\tnode_NOT_in_group,node_in_group
215 intermediate_group\tnode_in_group
216 intermediate_group\tnode_in_group
217 """
218 assert stderr == b""
219 assert rcode == 0
283283 make_repo(
284284 tmpdir,
285285 groups={
286 "group1": {'members': ["node1", "node2"]},
287 "group2": {'members': ["node3"]},
288 },
289 nodes={
290 "node1": {},
291 "node2": {},
292 "node3": {},
286 "group1": {},
287 "group2": {},
288 },
289 nodes={
290 "node1": {'groups': {"group1"}},
291 "node2": {'groups': {"group1"}},
292 "node3": {'groups': {"group2"}},
293293 },
294294 )
295295
302302 make_repo(
303303 tmpdir,
304304 groups={
305 "group1": {'members': ["node1", "node2"]},
306 "group2": {'members': ["node3"]},
307 },
308 nodes={
309 "node1": {},
310 "node2": {},
311 "node3": {},
305 "group1": {},
306 "group2": {},
307 },
308 nodes={
309 "node1": {'groups': {"group1"}},
310 "node2": {'groups': {"group1"}},
311 "node3": {'groups': {"group2"}},
312312 },
313313 )
314314
321321 make_repo(
322322 tmpdir,
323323 groups={
324 "group1": {'members': ["node1", "node2"]},
325 "group2": {'members': ["node3"]},
326 },
327 nodes={
328 "node1": {},
329 "node2": {},
330 "node3": {},
324 "group1": {},
325 "group2": {},
326 },
327 nodes={
328 "node1": {'groups': {"group1"}},
329 "node2": {'groups': {"group1"}},
330 "node3": {'groups': {"group2"}},
331331 },
332332 )
333333
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from bundlewrap.utils.testing import make_repo, run
41
52
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
20 from re import search
31
42 from bundlewrap.utils.testing import host_os, make_repo, run
2725 },
2826 },
2927 )
30 run("rm -f /tmp/bw_test_lock_add")
31 stdout, stderr, rcode = run("BW_IDENTITY=jdoe bw lock add -c höhöhö -e 1m -i file:/tmp/bw_test_lock_add localhost", path=str(tmpdir))
28 run("sudo rm -f /tmp/bw_test_lock_add")
29 stdout, stderr, rcode = run("BW_IDENTITY=jdoe bw lock add -c höhöhö -e 1m -i file:/tmp/bw_test_lock_add -- localhost", path=str(tmpdir))
3230 assert rcode == 0
3331 lock_id = get_lock_id(stdout.decode('utf-8'))
3432 assert len(lock_id) == 4
35 stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir))
33 stdout, stderr, rcode = run("bw -d apply localhost", path=str(tmpdir))
3634 assert rcode == 0
3735 stdout, stderr, rcode = run("cat /tmp/bw_test_lock_add", path=str(tmpdir))
3836 assert rcode != 0
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from json import loads
41 from os.path import join
52
4542 tmpdir,
4643 nodes={
4744 "node1": {
45 'groups': {"group1"},
4846 'metadata': {
4947 "foo": {
5048 "bar": "baz",
5452 },
5553 groups={
5654 "group1": {
57 'members': ["node1"],
5855 'metadata': {
5956 "ding": 5,
6057 "foo": {
7774 assert rcode == 0
7875
7976
80 def test_template_node(tmpdir):
81 make_repo(
82 tmpdir,
83 nodes={
84 "node1": {
85 'template_node': "node2",
86 },
87 "node2": {
88 'metadata': {
89 "foo": 2,
90 },
91 },
92 },
93 groups={
94 "group1": {
95 'members': ["node1"],
96 'metadata': {
97 "foo": 3,
98 },
99 },
100 },
101 )
102 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
103 assert loads(stdout.decode()) == {"foo": 2}
104 assert stderr == b""
105 assert rcode == 0
106
107
108 def test_template_node_override(tmpdir):
109 make_repo(
110 tmpdir,
111 nodes={
112 "node1": {
113 'metadata': {
114 "foo": 1,
115 },
116 'template_node': "node2",
117 },
118 "node2": {
119 'metadata': {
120 "foo": 2,
121 },
122 },
123 },
124 groups={
125 "group1": {
126 'members': ["node1"],
127 'metadata': {
128 "foo": 3,
129 },
130 },
131 },
132 )
133 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
134 assert loads(stdout.decode()) == {"foo": 1}
135 assert stderr == b""
136 assert rcode == 0
137
138
13977 def test_metadatapy(tmpdir):
14078 make_repo(
14179 tmpdir,
14381 nodes={
14482 "node1": {
14583 'bundles': ["test"],
146 'metadata': {"foo": "bar"},
147 },
148 },
149 )
150 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
151 f.write(
152 """@metadata_processor
153 def foo(metadata):
154 metadata["baz"] = node.name
155 return metadata, DONE
156 """)
157 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
158 assert loads(stdout.decode()) == {
159 "baz": "node1",
160 "foo": "bar",
161 }
162 assert stderr == b""
163 assert rcode == 0
164
165
166 def test_metadatapy_defaults(tmpdir):
167 make_repo(
168 tmpdir,
169 bundles={"test": {}},
170 nodes={
171 "node1": {
172 'bundles': ["test"],
173 'metadata': {"foo": "bar"},
174 },
175 },
176 )
177 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
178 f.write(
179 """@metadata_processor
84 'metadata': {
85 "foo": {
86 "bar": "shizzle",
87 },
88 },
89 },
90 },
91 )
92 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
93 f.write(
94 """@metadata_reactor
18095 def foo(metadata):
18196 return {
182 "foo": "baz",
183 "baz": "foo",
184 }, DONE, DEFAULTS
185 """)
186 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
187 assert loads(stdout.decode()) == {
188 "baz": "foo",
97 "baz": node.name,
98 "frob": metadata.get("foo/bar", "shnozzle") + "ay",
99 "gob": metadata.get("shlop", "mop"),
100 }
101 """)
102 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
103 assert loads(stdout.decode()) == {
104 "baz": "node1",
105 "foo": {
106 "bar": "shizzle",
107 },
108 "frob": "shizzleay",
109 "gob": "mop",
110 }
111 assert stderr == b""
112 assert rcode == 0
113
114
115 def test_metadatapy_defaults(tmpdir):
116 make_repo(
117 tmpdir,
118 bundles={"test": {}},
119 nodes={
120 "node1": {
121 'bundles': ["test"],
122 'metadata': {"foo": "bar"},
123 },
124 },
125 )
126 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
127 f.write(
128 """defaults = {
129 "baz": node.name,
130 "foo": "baz",
131 }
132 """)
133 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
134 assert loads(stdout.decode()) == {
135 "baz": "node1",
189136 "foo": "bar",
190137 }
191138 assert stderr == b""
211158 """)
212159 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
213160 f.write(
214 """@metadata_processor
161 """defaults = {
162 "foo": {
163 "bar": "frob",
164 "baz": "gobble",
165 },
166 }
167 """)
168 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
169 assert loads(stdout.decode()) == {
170 "foo": {"bar": "baz"},
171 }
172 assert stderr == b""
173 assert rcode == 0
174
175
176 def test_metadatapy_update(tmpdir):
177 make_repo(
178 tmpdir,
179 bundles={"test": {}},
180 nodes={
181 "node1": {
182 'bundles': ["test"],
183 'metadata': {"foo": "bar"},
184 },
185 },
186 )
187 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
188 f.write(
189 """@metadata_reactor
215190 def foo(metadata):
216191 return {
217 "foo": {
218 "bar": "frob",
219 "baz": "gobble",
220 },
221 }, DONE, DEFAULTS
222 """)
223 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
224 assert loads(stdout.decode()) == {
225 "foo": {"bar": "baz"},
226 }
227 assert stderr == b""
228 assert rcode == 0
229
230
231 def test_metadatapy_update(tmpdir):
232 make_repo(
233 tmpdir,
234 bundles={"test": {}},
235 nodes={
236 "node1": {
237 'bundles': ["test"],
238 'metadata': {"foo": "bar"},
239 },
240 },
241 )
242 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
243 f.write(
244 """@metadata_processor
245 def foo(metadata):
246 return {
247 "foo": "baz",
248 "baz": "foo",
249 }, DONE, OVERWRITE
250 """)
251 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
252 assert loads(stdout.decode()) == {
253192 "baz": "foo",
254193 "foo": "baz",
255194 }
256 assert stderr == b""
257 assert rcode == 0
258
259
260 def test_metadatapy_invalid_number_of_elements(tmpdir):
261 make_repo(
262 tmpdir,
263 bundles={"test": {}},
264 nodes={
265 "node1": {
266 'bundles': ["test"],
267 'metadata': {"foo": "bar"},
268 },
269 },
270 )
271 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
272 f.write(
273 """@metadata_processor
274 def foo(metadata):
275 return metadata
276 """)
277 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
278 assert rcode != 0
279
280
281 def test_metadatapy_invalid_first_element_not_dict(tmpdir):
282 make_repo(
283 tmpdir,
284 bundles={"test": {}},
285 nodes={
286 "node1": {
287 'bundles': ["test"],
288 'metadata': {"foo": "bar"},
289 },
290 },
291 )
292 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
293 f.write(
294 """@metadata_processor
295 def foo(metadata):
296 return DONE, metadata
297 """)
298 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
299 assert rcode != 0
300
301
302 def test_metadatapy_invalid_defaults_plus_original_dict(tmpdir):
303 make_repo(
304 tmpdir,
305 bundles={"test": {}},
306 nodes={
307 "node1": {
308 'bundles': ["test"],
309 'metadata': {"foo": "bar"},
310 },
311 },
312 )
313 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
314 f.write(
315 """@metadata_processor
316 def foo(metadata):
317 return metadata, DONE, DEFAULTS
318 """)
319 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
320 assert rcode != 0
321
322
323 def test_metadatapy_invalid_overwrite_plus_original_dict(tmpdir):
324 make_repo(
325 tmpdir,
326 bundles={"test": {}},
327 nodes={
328 "node1": {
329 'bundles': ["test"],
330 'metadata': {"foo": "bar"},
331 },
332 },
333 )
334 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
335 f.write(
336 """@metadata_processor
337 def foo(metadata):
338 return metadata, DONE, OVERWRITE
339 """)
340 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
341 assert rcode != 0
342
343
344 def test_metadatapy_invalid_option(tmpdir):
345 make_repo(
346 tmpdir,
347 bundles={"test": {}},
348 nodes={
349 "node1": {
350 'bundles': ["test"],
351 'metadata': {"foo": "bar"},
352 },
353 },
354 )
355 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
356 f.write(
357 """@metadata_processor
358 def foo(metadata):
359 return metadata, DONE, 1000
360 """)
361 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
362 assert rcode != 0
363
364
365 def test_metadatapy_invalid_done_and_again(tmpdir):
366 make_repo(
367 tmpdir,
368 bundles={"test": {}},
369 nodes={
370 "node1": {
371 'bundles': ["test"],
372 'metadata': {"foo": "bar"},
373 },
374 },
375 )
376 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
377 f.write(
378 """@metadata_processor
379 def foo(metadata):
380 return metadata, DONE, RUN_ME_AGAIN
381 """)
382 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
383 assert rcode != 0
384
385
386 def test_metadatapy_invalid_no_done_or_again(tmpdir):
387 make_repo(
388 tmpdir,
389 bundles={"test": {}},
390 nodes={
391 "node1": {
392 'bundles': ["test"],
393 'metadata': {"foo": "bar"},
394 },
395 },
396 )
397 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
398 f.write(
399 """@metadata_processor
400 def foo(metadata):
401 return {}, DEFAULTS
402 """)
403 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
404 assert rcode != 0
405
406
407 def test_metadatapy_invalid_defaults_and_overwrite(tmpdir):
408 make_repo(
409 tmpdir,
410 bundles={"test": {}},
411 nodes={
412 "node1": {
413 'bundles': ["test"],
414 'metadata': {"foo": "bar"},
415 },
416 },
417 )
418 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
419 f.write(
420 """@metadata_processor
421 def foo(metadata):
422 return {}, DEFAULTS, OVERWRITE, DONE
423 """)
424 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
425 assert rcode != 0
195 """)
196 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
197 assert loads(stdout.decode()) == {
198 "baz": "foo",
199 "foo": "bar",
200 }
201 assert stderr == b""
202 assert rcode == 0
426203
427204
428205 def test_table(tmpdir):
450227 },
451228 },
452229 },
230 groups={"all": {'member_patterns': {r".*"}}},
231 )
232 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw metadata all -k foo_dict/bar foo_list foo_int foo_umlaut", path=str(tmpdir))
233 assert stdout.decode('utf-8') == """node\tfoo_dict/bar\tfoo_int\tfoo_list\tfoo_umlaut
234 node1\tbaz\t47\tbar, 1\tföö
235 node2\t<missing>\t-3\t\tfüü
236 """
237 assert stderr == b""
238 assert rcode == 0
239
240
241 def test_metadatapy_merge_order(tmpdir):
242 make_repo(
243 tmpdir,
244 bundles={"test": {}},
245 nodes={
246 "node1": {
247 'bundles': ["test"],
248 'groups': {"group1"},
249 'metadata': {
250 "four": "node",
251 },
252 },
253 },
453254 groups={
454 "all": {
455 'members': ["node1", "node2"],
456 },
457 },
458 )
459 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw metadata --table all foo_dict bar, foo_list, foo_int, foo_umlaut", path=str(tmpdir))
460 assert stdout.decode('utf-8') == """node\tfoo_dict bar\tfoo_list\tfoo_int\tfoo_umlaut
461 node1\tbaz\tbar, 1\t47\tföö
462 node2\t<missing>\t\t-3\tfüü
463 """
464 assert stderr == b""
465 assert rcode == 0
466
467
468 def test_table_no_key(tmpdir):
469 make_repo(
470 tmpdir,
471 nodes={
472 "node1": {},
473 },
474 )
475 stdout, stderr, rcode = run("bw metadata --table node1", path=str(tmpdir))
476 assert rcode == 1
477
478
479 def test_metadatapy_proc_merge_order(tmpdir):
480 make_repo(
481 tmpdir,
482 bundles={"test": {}},
483 nodes={
484 "node1": {
485 'bundles': ["test"],
486 'metadata': {
487 "one": "node",
488 "two": "node",
489 "five": "node",
255 "group1": {
256 'metadata': {
257 "three": "group",
258 "four": "group",
490259 },
491260 },
492261 },
494263 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
495264 f.write(
496265 """defaults = {
266 "one": "defaults",
497267 "two": "defaults",
498268 "three": "defaults",
499269 "four": "defaults",
502272 @metadata_reactor
503273 def foo_reactor(metadata):
504274 return {
275 "two": "reactor",
276 "three": "reactor",
505277 "four": "reactor",
506 "five": "reactor",
507 }
508 """)
509 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
510 assert loads(stdout.decode()) == {
511 "one": "node",
512 "two": "node",
513 "three": "defaults",
514 "four": "reactor",
515 "five": "reactor",
516 }
517 assert stderr == b""
518 assert rcode == 0
519
520
521 def test_metadatapy_do_not_run_me_again(tmpdir):
522 make_repo(
523 tmpdir,
524 bundles={"test": {}},
525 nodes={
526 "node1": {
527 'bundles': ["test"],
528 },
529 },
530 )
531 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
532 f.write(
533 """called = False
534 @metadata_reactor
278 }
279 """)
280 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
281 assert loads(stdout.decode()) == {
282 "one": "defaults",
283 "two": "reactor",
284 "three": "group",
285 "four": "node",
286 }
287 assert stderr == b""
288 assert rcode == 0
289
290
291 def test_metadatapy_static_reorder(tmpdir):
292 make_repo(
293 tmpdir,
294 bundles={"test": {}},
295 nodes={
296 "node1": {
297 'bundles': ["test"],
298 'metadata': {
299 "foo": "bar",
300 "frob": "flup",
301 },
302 },
303 },
304 )
305 with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f:
306 f.write(
307 """@metadata_reactor
535308 def foo_reactor(metadata):
536 global called
537 if not called:
538 called = True
539 raise DoNotRunAgain
540 else:
541 raise AssertionError
542 @metadata_reactor
543 def bar_reactor(metadata):
544 return {'called': called}
545 """)
546 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
547 assert loads(stdout.decode()) == {
548 "called": True,
309 return {
310 "foo": "overwritten",
311 "baz": metadata.get("frob"),
312 }
313 """)
314 stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir))
315 assert loads(stdout.decode()) == {
316 "foo": "bar",
317 "frob": "flup",
318 "baz": "flup",
549319 }
550320 assert stderr == b""
551321 assert rcode == 0
620390 return {'foo_ran': True}
621391 else:
622392 return {'foo': metadata.get('bar'), 'foo_ran': True}
393
394
623395 @metadata_reactor
624396 def bar(metadata):
625397 foo_ran = metadata.get('foo_ran', False)
1919 def test_hostname(tmpdir):
2020 make_repo(
2121 tmpdir,
22 groups={"all": {'members': ["node1"]}},
22 groups={"all": {'member_patterns': {r".*"}}},
2323 nodes={"node1": {'hostname': "node1.example.com"}},
2424 )
25 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all hostname | cut -f 2", path=str(tmpdir))
25 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all -a hostname | cut -f 2", path=str(tmpdir))
2626 assert stdout == b"node1.example.com\n"
2727 assert stderr == b""
2828 assert rcode == 0
3535 "bundle1": {},
3636 "bundle2": {},
3737 },
38 groups={"all": {'members': ["node1", "node2"]}},
38 groups={"all": {'member_patterns': {r".*"}}},
3939 nodes={
4040 "node1": {'bundles': ["bundle1", "bundle2"]},
4141 "node2": {'bundles': ["bundle2"]},
4242 },
4343 )
44 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all bundles | grep node1 | cut -f 2", path=str(tmpdir))
44 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes all -a bundles | grep node1 | cut -f 2", path=str(tmpdir))
4545 assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2"]
4646 assert stderr == b""
4747 assert rcode == 0
7171 },
7272 },
7373 )
74 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 bundles | cut -f 2", path=str(tmpdir))
74 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 -a bundles | cut -f 2", path=str(tmpdir))
7575 assert stdout.decode().strip().split("\n") == ["bundle1", "bundle2", "bundle3"]
7676 assert stderr == b""
7777 assert rcode == 0
78
79
80 def test_template_node(tmpdir):
81 make_repo(
82 tmpdir,
83 nodes={
84 "node1": {'template_node': "node2"},
85 "node2": {'dummy': True},
86 },
87 )
88 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 dummy | grep node1 | cut -f 2", path=str(tmpdir))
89 assert stdout.decode().strip() == "True"
90 assert stderr == b""
91 assert rcode == 0
92
93
94 def test_template_node_cascade(tmpdir):
95 make_repo(
96 tmpdir,
97 nodes={
98 "node1": {'template_node': "node2"},
99 "node2": {'template_node': "node1"},
100 },
101 )
102 stdout, stderr, rcode = run("BW_TABLE_STYLE=grep bw nodes node1 dummy", path=str(tmpdir))
103 assert rcode == 1
0 from os.path import join
1
20 from bundlewrap.utils.testing import make_repo, run
31
42
64 make_repo(
75 tmpdir,
86 nodes={
9 "node-foo": {},
7 "node-foo": {'groups': {"group-foo"}},
108 "node-bar": {},
119 "node-baz": {},
12 "node-pop": {},
10 "node-pop": {'groups': {"group-baz"}},
11 },
12 groups={
13 "group-foo": {
14 'member_patterns': [r".*-bar"],
15 },
16 "group-bar": {
17 'subgroups': ["group-foo"],
18 },
19 "group-baz": {},
20 "group-frob": {
21 'members': {"node-pop"},
22 },
23 "group-pop": {
24 'subgroup_patterns': [r"ba"],
25 },
1326 },
1427 )
15 with open(join(str(tmpdir), "groups.py"), 'w') as f:
16 f.write("""
17 groups = {
18 "group-foo": {
19 'members': ["node-foo"],
20 'member_patterns': [r".*-bar"],
21 },
22 "group-bar": {
23 'subgroups': ["group-foo"],
24 },
25 "group-baz": {
26 'members': ["node-pop"],
27 'members_add': lambda node: node.name == "node-pop",
28 },
29 "group-pop": {
30 'subgroup_patterns': [r"ba"],
31 },
32 }
33 """)
3428 stdout, stderr, rcode = run("bw plot groups-for-node node-foo", path=str(tmpdir))
3529 assert stdout == b"""digraph bundlewrap
3630 {
4337 "node-foo" [fontcolor="#303030",shape=box,style=rounded];
4438 "group-bar" -> "group-foo" [color="#6BB753",penwidth=2]
4539 "group-pop" -> "group-bar" [color="#6BB753",penwidth=2]
46 "group-foo" -> "node-foo" [color="#D18C57",penwidth=2]
4740 }
4841 """
4942 assert stderr == b""
5649 node [color="#303030"; fillcolor="#303030"; fontname=Helvetica]
5750 edge [arrowhead=vee]
5851 "group-baz" [fontcolor=white,style=filled];
52 "group-frob" [fontcolor=white,style=filled];
5953 "group-pop" [fontcolor=white,style=filled];
6054 "node-pop" [fontcolor="#303030",shape=box,style=rounded];
55 "group-frob" -> "node-pop" [color="#D18C57",penwidth=2]
6156 "group-pop" -> "group-baz" [color="#6BB753",penwidth=2]
62 "group-baz" -> "node-pop" [color="#D18C57",penwidth=2]
6357 }
6458 """
6559 assert stderr == b""
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from bundlewrap.utils.testing import make_repo, run
41
52
2623 )
2724
2825 stdout, stderr, rcode = run("bw stats", path=str(tmpdir))
29 assert stdout == """╭───────┬─────────────────────╮
30 │ count │ type │
31 ├───────┼─────────────────────┤
32 │ 1 │ nodes │
33 │ 0 │ groups │
34 │ 1 │ bundles │
35 │ 0 │ metadata defaults │
36 │ 0 │ metadata processors │
37 │ 0 │ metadata reactors │
38 │ 2 │ items │
39 ├───────┼─────────────────────┤
40 │ 2 │ file │
41 ╰───────┴─────────────────────╯
26 assert stdout == """╭───────┬───────────────────╮
27 │ count │ type │
28 ├───────┼───────────────────┤
29 │ 1 │ nodes │
30 │ 0 │ groups │
31 │ 1 │ bundles │
32 │ 0 │ metadata defaults │
33 │ 0 │ metadata reactors │
34 │ 2 │ items │
35 ├───────┼───────────────────┤
36 │ 2 │ file │
37 ╰───────┴───────────────────╯
4238 """.encode('utf-8')
114114 assert run("bw test -I", path=str(tmpdir))[2] == 1
115115
116116
117 def test_unknown_tag(tmpdir):
118 make_repo(
119 tmpdir,
120 nodes={
121 "node1": {
122 'bundles': ["bundle1"],
123 },
124 },
125 bundles={
126 "bundle1": {
127 "files": {
128 "/foo": {
129 'content': "none",
130 'needs': {
131 "tag:bar",
132 },
133 },
134 },
135 },
136 },
137 )
138 assert run("bw test -I", path=str(tmpdir))[2] == 0
139
140
117141 def test_circular_trigger_self(tmpdir):
118142 make_repo(
119143 tmpdir,
199223 def test_group_metadata_collision(tmpdir):
200224 make_repo(
201225 tmpdir,
202 nodes={"node1": {}},
226 nodes={
227 "node1": {
228 'groups': {
229 "group1",
230 "group3",
231 },
232 },
233 },
203234 groups={
204235 "group1": {
205 'members': ["node1"],
206236 'metadata': {
207237 'foo': {
208238 'baz': 1,
219249 },
220250 'subgroups': ["group3"],
221251 },
222 "group3": {
223 'members': ["node1"],
224 },
252 "group3": {},
225253 },
226254 )
227255 assert run("bw test -M", path=str(tmpdir))[2] == 1
230258 def test_group_metadata_collision_subgroups(tmpdir):
231259 make_repo(
232260 tmpdir,
233 nodes={"node1": {}},
261 nodes={
262 "node1": {
263 'groups': {
264 "group1",
265 "group3",
266 },
267 },
268 },
234269 groups={
235270 "group1": {
236 'members': ["node1"],
237271 'metadata': {
238272 'foo': {
239273 'baz': 1,
250284 },
251285 'subgroups': ["group1", "group3"],
252286 },
253 "group3": {
254 'members': ["node1"],
255 },
287 "group3": {},
256288 },
257289 )
258290 assert run("bw test -M", path=str(tmpdir))[2] == 0
261293 def test_group_metadata_collision_list(tmpdir):
262294 make_repo(
263295 tmpdir,
264 nodes={"node1": {}},
296 nodes={
297 "node1": {
298 'groups': {
299 "group1",
300 "group2",
301 },
302 },
303 },
265304 groups={
266305 "group1": {
267 'members': ["node1"],
268306 'metadata': {
269307 'foo': [1],
270308 },
271309 },
272310 "group2": {
273 'members': ["node1"],
274311 'metadata': {
275312 'foo': [2],
276313 },
283320 def test_group_metadata_collision_dict(tmpdir):
284321 make_repo(
285322 tmpdir,
286 nodes={"node1": {}},
323 nodes={
324 "node1": {
325 'groups': {
326 "group1",
327 "group2",
328 },
329 },
330 },
287331 groups={
288332 "group1": {
289 'members': ["node1"],
290333 'metadata': {
291334 'foo': {'bar': 1},
292335 },
293336 },
294337 "group2": {
295 'members': ["node1"],
296338 'metadata': {
297339 'foo': 2,
298340 },
305347 def test_group_metadata_collision_dict_ok(tmpdir):
306348 make_repo(
307349 tmpdir,
308 nodes={"node1": {}},
350 nodes={
351 "node1": {
352 'groups': {
353 "group1",
354 "group2",
355 },
356 },
357 },
309358 groups={
310359 "group1": {
311 'members': ["node1"],
312360 'metadata': {
313361 'foo': {'bar': 1},
314362 },
315363 },
316364 "group2": {
317 'members': ["node1"],
318365 'metadata': {
319366 'foo': {'baz': 2},
320367 },
327374 def test_group_metadata_collision_set(tmpdir):
328375 make_repo(
329376 tmpdir,
330 nodes={"node1": {}},
377 nodes={
378 "node1": {
379 'groups': {
380 "group1",
381 "group2",
382 },
383 },
384 },
331385 groups={
332386 "group1": {
333 'members': ["node1"],
334387 'metadata': {
335388 'foo': set([1]),
336389 },
337390 },
338391 "group2": {
339 'members': ["node1"],
340392 'metadata': {
341393 'foo': 2,
342394 },
349401 def test_group_metadata_collision_set_ok(tmpdir):
350402 make_repo(
351403 tmpdir,
352 nodes={"node1": {}},
404 nodes={
405 "node1": {
406 'groups': {
407 "group1",
408 "group2",
409 },
410 },
411 },
353412 groups={
354413 "group1": {
355 'members': ["node1"],
356414 'metadata': {
357415 'foo': set([1]),
358416 },
359417 },
360418 "group2": {
361 'members': ["node1"],
362419 'metadata': {
363420 'foo': set([2]),
364421 },
366423 },
367424 )
368425 assert run("bw test -M", path=str(tmpdir))[2] == 0
426
427
428 def test_defaults_metadata_collision(tmpdir):
429 make_repo(
430 tmpdir,
431 nodes={
432 "node1": {
433 'bundles': {"bundle1", "bundle2"},
434 },
435 },
436 bundles={
437 "bundle1": {},
438 "bundle2": {},
439 },
440 )
441 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
442 f.write(
443 """defaults = {
444 "foo": "bar",
445 }
446 """)
447 with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f:
448 f.write(
449 """defaults = {
450 "foo": "baz",
451 }
452 """)
453 stdout, stderr, rcode = run("bw test -M", path=str(tmpdir))
454 assert rcode == 1
455 assert b"foo" in stderr
456
457
458 def test_defaults_metadata_collision_nested(tmpdir):
459 make_repo(
460 tmpdir,
461 nodes={
462 "node1": {
463 'bundles': {"bundle1", "bundle2"},
464 },
465 },
466 bundles={
467 "bundle1": {},
468 "bundle2": {},
469 },
470 )
471 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
472 f.write(
473 """defaults = {
474 "foo": {"bar": "baz"},
475 }
476 """)
477 with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f:
478 f.write(
479 """defaults = {
480 "foo": {"bar": "frob"},
481 }
482 """)
483 stdout, stderr, rcode = run("bw test -M", path=str(tmpdir))
484 assert rcode == 1
485 assert b"foo/bar" in stderr
486
487
488 def test_defaults_metadata_collision_ok(tmpdir):
489 make_repo(
490 tmpdir,
491 nodes={
492 "node1": {
493 'bundles': {"bundle1", "bundle2"},
494 },
495 },
496 bundles={
497 "bundle1": {},
498 "bundle2": {},
499 },
500 )
501 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
502 f.write(
503 """defaults = {
504 "foo": {"bar"},
505 }
506 """)
507 with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f:
508 f.write(
509 """defaults = {
510 "foo": {"baz"},
511 }
512 """)
513 assert run("bw test -M", path=str(tmpdir))[2] == 0
514
515
516 def test_reactor_metadata_collision(tmpdir):
517 make_repo(
518 tmpdir,
519 nodes={
520 "node1": {
521 'bundles': {"bundle1", "bundle2"},
522 },
523 },
524 bundles={
525 "bundle1": {},
526 "bundle2": {},
527 },
528 )
529 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
530 f.write(
531 """@metadata_reactor
532 def foo(metadata):
533 return {"foo": 1}
534 """)
535 with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f:
536 f.write(
537 """@metadata_reactor
538 def foo(metadata):
539 return {"foo": 2}
540 """)
541 stdout, stderr, rcode = run("bw test -M", path=str(tmpdir))
542 assert rcode == 1
543 assert b"foo" in stderr
544
545
546 def test_reactor_metadata_collision_nested(tmpdir):
547 make_repo(
548 tmpdir,
549 nodes={
550 "node1": {
551 'bundles': {"bundle1", "bundle2"},
552 },
553 },
554 bundles={
555 "bundle1": {},
556 "bundle2": {},
557 },
558 )
559 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
560 f.write(
561 """@metadata_reactor
562 def foo(metadata):
563 return {"foo": {"bar": "1"}}
564 """)
565 with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f:
566 f.write(
567 """@metadata_reactor
568 def foo(metadata):
569 return {"foo": {"bar": "2"}}
570 """)
571 stdout, stderr, rcode = run("bw test -M", path=str(tmpdir))
572 assert rcode == 1
573 assert b"foo/bar" in stderr
574
575
576 def test_reactor_metadata_collision_nested_mixed(tmpdir):
577 make_repo(
578 tmpdir,
579 nodes={
580 "node1": {
581 'bundles': {"bundle1", "bundle2"},
582 },
583 },
584 bundles={
585 "bundle1": {},
586 "bundle2": {},
587 },
588 )
589 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
590 f.write(
591 """@metadata_reactor
592 def foo(metadata):
593 return {"foo": {"bar": {True}}}
594 """)
595 with open(join(str(tmpdir), "bundles", "bundle2", "metadata.py"), 'w') as f:
596 f.write(
597 """@metadata_reactor
598 def foo(metadata):
599 return {"foo": {"bar": [False]}}
600 """)
601 stdout, stderr, rcode = run("bw test -M", path=str(tmpdir))
602 assert rcode == 1
603 assert b"foo/bar" in stderr
369604
370605
371606 def test_fault_missing(tmpdir):
391626 assert run("bw test -iI", path=str(tmpdir))[2] == 0
392627
393628
629 def test_fault_missing_content(tmpdir):
630 make_repo(
631 tmpdir,
632 nodes={
633 "node1": {
634 'bundles': ["bundle1"],
635 },
636 },
637 bundles={
638 "bundle1": {}
639 },
640 )
641 with open(join(str(tmpdir), "bundles", "bundle1", "items.py"), 'w') as f:
642 f.write("""
643 files = {
644 "/foo": {
645 'content': repo.vault.decrypt("bzzt", key="unavailable"),
646 },
647 }
648 """)
649 assert run("bw test -I", path=str(tmpdir))[2] == 1
650 assert run("bw test -iI", path=str(tmpdir))[2] == 0
651
652
394653 def test_metadata_determinism_ok(tmpdir):
395654 make_repo(
396655 tmpdir,
404663 },
405664 )
406665 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
407 f.write("""@metadata_processor
666 f.write("""@metadata_reactor
408667 def test(metadata):
409 metadata['test'] = 1
410 return metadata, DONE
668 return {'test': 1}
411669 """)
412670 assert run("bw test -m 3", path=str(tmpdir))[2] == 0
413671
426684 )
427685 with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f:
428686 f.write("""from random import randint
429
430 @metadata_processor
687 n = randint(1, 99999)
688
689 @metadata_reactor
431690 def test(metadata):
432 metadata.setdefault('test', randint(1, 99999))
433 return metadata, DONE
691 return {'test': n}
434692 """)
435693 assert run("bw test -m 3", path=str(tmpdir))[2] == 1
436694
483741 make_repo(
484742 tmpdir,
485743 nodes={
486 "node1": {},
744 "node1": {'groups': {"group2"}},
487745 },
488746 groups={
489747 "group1": {'subgroups': ["missing-group"]},
490 "group2": {'members': ["node1"]},
748 "group2": {},
491749 },
492750 )
493751 assert run("bw test", path=str(tmpdir))[2] == 1
499757 make_repo(
500758 tmpdir,
501759 nodes={
502 "node1": {},
760 "node1": {'groups': {"group2"}},
503761 },
504762 groups={
505763 "group1": {},
506 "group2": {'members': ["node1"]},
764 "group2": {},
507765 },
508766 )
509767 assert run("bw test", path=str(tmpdir))[2] == 0
583841 },
584842 )
585843 assert run("bw test -I", path=str(tmpdir))[2] == 1
586
587
588 def test_secret_identifier_only_once(tmpdir):
589 make_repo(
590 tmpdir,
591 nodes={
592 "node1": {
593 'bundles': ["bundle1"],
594 },
595 },
596 bundles={
597 "bundle1": {
598 'files': {
599 "/test": {
600 'content': "${repo.vault.password_for('testing')}",
601 'content_type': 'mako',
602 },
603 },
604 },
605 },
606 )
607 assert run("bw test -s ''", path=str(tmpdir))[2] == 1
608 assert run("bw test -s 'test'", path=str(tmpdir))[2] == 0
609 assert run("bw test -s 'test,foo'", path=str(tmpdir))[2] == 0
610
611
612 def test_secret_identifier_twice(tmpdir):
613 make_repo(
614 tmpdir,
615 nodes={
616 "node1": {
617 'bundles': ["bundle1"],
618 },
619 "node2": {
620 'bundles': ["bundle1"],
621 },
622 },
623 bundles={
624 "bundle1": {
625 'files': {
626 "/test": {
627 'content': "${repo.vault.password_for('testing')}",
628 'content_type': 'mako',
629 },
630 },
631 },
632 },
633 )
634 assert run("bw test -s ''", path=str(tmpdir))[2] == 0
635 assert run("bw test -s 'test'", path=str(tmpdir))[2] == 0
636 assert run("bw test -s 'test,foo'", path=str(tmpdir))[2] == 0
637844
638845
639846 def test_reverse_dummy_dep(tmpdir):
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from os.path import join
41
52 from bundlewrap.utils.testing import host_os, make_repo, run
0 from os.path import join
1
2 from bundlewrap.utils.testing import make_repo, run
3
4
5 def test_metadatapy(tmpdir):
6 make_repo(
7 tmpdir,
8 )
9 with open(join(str(tmpdir), "libs", "libstest.py"), 'w') as f:
10 f.write(
11 """ivar = 47
12
13 def func():
14 return 48
15 """)
16 stdout, stderr, rcode = run("bw debug -c 'print(repo.libs.libstest.ivar)'", path=str(tmpdir))
17 assert stdout == b"47\n"
18 assert stderr == b""
19 assert rcode == 0
20
21 stdout, stderr, rcode = run("bw debug -c 'print(repo.libs.libstest.func())'", path=str(tmpdir))
22 assert stdout == b"48\n"
23 assert stderr == b""
24 assert rcode == 0
25
0 from base64 import b64decode
1 from os.path import join
2
3 from bundlewrap.utils.testing import make_repo, run
4
5
6 def test_b64encode_fault(tmpdir):
7 make_repo(tmpdir)
8
9 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir))
10 assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n"
11 assert stderr == b""
12 assert rcode == 0
13
14
15 def test_encrypt(tmpdir):
16 make_repo(tmpdir)
17
18 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir))
19 assert stderr == b""
20 assert rcode == 0
21
22 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir))
23 assert stdout == b"test\n"
24 assert stderr == b""
25 assert rcode == 0
26
27
28 def test_encrypt_different_key_autodetect(tmpdir):
29 make_repo(tmpdir)
30
31 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\", key=\"generate\"))'", path=str(tmpdir))
32 assert stderr == b""
33 assert rcode == 0
34
35 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir))
36 assert stdout == b"test\n"
37 assert stderr == b""
38 assert rcode == 0
39
40
41 def test_encrypt_file(tmpdir):
42 make_repo(tmpdir)
43
44 source_file = join(str(tmpdir), "data", "source")
45 with open(source_file, 'w') as f:
46 f.write("ohai")
47
48 stdout, stderr, rcode = run(
49 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
50 source_file,
51 "encrypted",
52 ),
53 path=str(tmpdir),
54 )
55 assert stderr == b""
56 assert rcode == 0
57
58 stdout, stderr, rcode = run(
59 "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format(
60 "encrypted",
61 ),
62 path=str(tmpdir),
63 )
64 assert stdout == b"ohai\n"
65 assert stderr == b""
66 assert rcode == 0
67
68
69 def test_encrypt_file_different_key_autodetect(tmpdir):
70 make_repo(tmpdir)
71
72 source_file = join(str(tmpdir), "data", "source")
73 with open(source_file, 'w') as f:
74 f.write("ohai")
75
76 stdout, stderr, rcode = run(
77 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\", \"{}\")'".format(
78 source_file,
79 "encrypted",
80 "generate",
81 ),
82 path=str(tmpdir),
83 )
84 assert stderr == b""
85 assert rcode == 0
86
87 stdout, stderr, rcode = run(
88 "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format(
89 "encrypted",
90 ),
91 path=str(tmpdir),
92 )
93 assert stdout == b"ohai\n"
94 assert stderr == b""
95 assert rcode == 0
96
97
98 def test_encrypt_file_base64(tmpdir):
99 make_repo(tmpdir)
100
101 source_file = join(str(tmpdir), "data", "source")
102 with open(source_file, 'wb') as f:
103 f.write("öhai".encode('latin-1'))
104
105 stdout, stderr, rcode = run(
106 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
107 source_file,
108 "encrypted",
109 ),
110 path=str(tmpdir),
111 )
112 assert stderr == b""
113 assert rcode == 0
114
115 stdout, stderr, rcode = run(
116 "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
117 "encrypted",
118 ),
119 path=str(tmpdir),
120 )
121 assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1')
122 assert stderr == b""
123 assert rcode == 0
124
125
126 def test_format_password(tmpdir):
127 make_repo(tmpdir)
128
129 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir))
130 assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n"
131 assert stderr == b""
132 assert rcode == 0
133
134
135 def test_human_password(tmpdir):
136 make_repo(tmpdir)
137
138 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\"))'", path=str(tmpdir))
139 assert stdout == b"Xaint-Heep-Pier-Tikl-76\n"
140 assert stderr == b""
141 assert rcode == 0
142
143
144 def test_human_password_digits(tmpdir):
145 make_repo(tmpdir)
146
147 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir))
148 assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n"
149 assert stderr == b""
150 assert rcode == 0
151
152
153 def test_human_password_per_word(tmpdir):
154 make_repo(tmpdir)
155
156 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir))
157 assert stdout == b"X-D-F-H-42\n"
158 assert stderr == b""
159 assert rcode == 0
160
161
162 def test_human_password_words(tmpdir):
163 make_repo(tmpdir)
164
165 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir))
166 assert stdout == b"Xaint-Heep-13\n"
167 assert stderr == b""
168 assert rcode == 0
169
170
171 def test_random_bytes_as_base64(tmpdir):
172 make_repo(tmpdir)
173
174 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\"))'", path=str(tmpdir))
175 assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n"
176 assert stderr == b""
177 assert rcode == 0
178
179
180 def test_random_bytes_as_base64_length(tmpdir):
181 make_repo(tmpdir)
182
183 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\", length=1))'", path=str(tmpdir))
184 assert stdout == b"rg==\n"
185 assert stderr == b""
186 assert rcode == 0
187
188
189 def test_faults_equality_decrypt(tmpdir):
190 make_repo(tmpdir)
191
192 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"foo\"))'", path=str(tmpdir))
193 assert stderr == b""
194 assert rcode == 0
195 enc_foo = stdout.decode('utf-8').strip()
196
197 stdout, stderr, rcode = run(
198 "bw debug -c 'print(repo.vault.encrypt(\"bar\"))'", path=str(tmpdir),
199 )
200 assert stderr == b""
201 assert rcode == 0
202 enc_bar = stdout.decode('utf-8').strip()
203
204 stdout, stderr, rcode = run(
205 "bw debug -c 'print(repo.vault.decrypt(\"{}\") == repo.vault.decrypt(\"{}\"))'".format(
206 enc_foo, enc_foo,
207 ),
208 path=str(tmpdir),
209 )
210 assert stdout == b"True\n"
211 assert stderr == b""
212 assert rcode == 0
213
214 stdout, stderr, rcode = run(
215 "bw debug -c 'print(repo.vault.decrypt(\"{}\") == repo.vault.decrypt(\"{}\"))'".format(
216 enc_foo, enc_bar,
217 ),
218 path=str(tmpdir),
219 )
220 assert stdout == b"False\n"
221 assert stderr == b""
222 assert rcode == 0
223
224
225 def test_faults_equality_decrypt_file(tmpdir):
226 make_repo(tmpdir)
227
228 source_file = join(str(tmpdir), "data", "source")
229 with open(source_file, 'w') as f:
230 f.write("foo")
231 stdout, stderr, rcode = run(
232 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
233 source_file,
234 "enc_foo",
235 ),
236 path=str(tmpdir),
237 )
238 assert stderr == b""
239 assert rcode == 0
240
241 source_file = join(str(tmpdir), "data", "source")
242 with open(source_file, 'w') as f:
243 f.write("bar")
244 stdout, stderr, rcode = run(
245 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
246 source_file,
247 "enc_bar",
248 ),
249 path=str(tmpdir),
250 )
251 assert stderr == b""
252 assert rcode == 0
253
254 stdout, stderr, rcode = run(
255 "bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format(
256 "enc_foo", "enc_foo",
257 ),
258 path=str(tmpdir),
259 )
260 assert stdout == b"True\n"
261 assert stderr == b""
262 assert rcode == 0
263
264 stdout, stderr, rcode = run(
265 "bw debug -c 'print(repo.vault.decrypt_file(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format(
266 "enc_foo", "enc_bar",
267 ),
268 path=str(tmpdir),
269 )
270 assert stdout == b"False\n"
271 assert stderr == b""
272 assert rcode == 0
273
274
275 def test_faults_equality_decrypt_file_as_base64(tmpdir):
276 make_repo(tmpdir)
277
278 source_file = join(str(tmpdir), "data", "source")
279 with open(source_file, 'w') as f:
280 f.write("foo")
281 stdout, stderr, rcode = run(
282 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
283 source_file,
284 "enc_foo",
285 ),
286 path=str(tmpdir),
287 )
288 assert stderr == b""
289 assert rcode == 0
290
291 source_file = join(str(tmpdir), "data", "source")
292 with open(source_file, 'w') as f:
293 f.write("bar")
294 stdout, stderr, rcode = run(
295 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
296 source_file,
297 "enc_bar",
298 ),
299 path=str(tmpdir),
300 )
301 assert stderr == b""
302 assert rcode == 0
303
304 stdout, stderr, rcode = run(
305 "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
306 "enc_foo", "enc_foo",
307 ),
308 path=str(tmpdir),
309 )
310 assert stdout == b"True\n"
311 assert stderr == b""
312 assert rcode == 0
313
314 stdout, stderr, rcode = run(
315 "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
316 "enc_foo", "enc_bar",
317 ),
318 path=str(tmpdir),
319 )
320 assert stdout == b"False\n"
321 assert stderr == b""
322 assert rcode == 0
323
324
325 def test_faults_equality_decrypt_file_mixed(tmpdir):
326 make_repo(tmpdir)
327
328 source_file = join(str(tmpdir), "data", "source")
329 with open(source_file, 'w') as f:
330 f.write("foo")
331 stdout, stderr, rcode = run(
332 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
333 source_file,
334 "enc_foo",
335 ),
336 path=str(tmpdir),
337 )
338 assert stderr == b""
339 assert rcode == 0
340
341 stdout, stderr, rcode = run(
342 "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\") == repo.vault.decrypt_file(\"{}\"))'".format(
343 "enc_foo", "enc_foo",
344 ),
345 path=str(tmpdir),
346 )
347 assert stdout == b"False\n"
348 assert stderr == b""
349 assert rcode == 0
350
351
352 def test_faults_equality_human_password_for(tmpdir):
353 make_repo(tmpdir)
354
355 stdout, stderr, rcode = run(
356 "bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"a\"))'",
357 path=str(tmpdir),
358 )
359 assert stdout == b"True\n"
360 assert stderr == b""
361 assert rcode == 0
362
363 stdout, stderr, rcode = run(
364 "bw debug -c 'print(repo.vault.human_password_for(\"a\") == repo.vault.human_password_for(\"b\"))'",
365 path=str(tmpdir),
366 )
367 assert stdout == b"False\n"
368 assert stderr == b""
369 assert rcode == 0
370
371
372 def test_faults_equality_password_for(tmpdir):
373 make_repo(tmpdir)
374
375 stdout, stderr, rcode = run(
376 "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"a\"))'",
377 path=str(tmpdir),
378 )
379 assert stdout == b"True\n"
380 assert stderr == b""
381 assert rcode == 0
382
383 stdout, stderr, rcode = run(
384 "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.password_for(\"b\"))'",
385 path=str(tmpdir),
386 )
387 assert stdout == b"False\n"
388 assert stderr == b""
389 assert rcode == 0
390
391
392 def test_faults_equality_password_for_mixed(tmpdir):
393 make_repo(tmpdir)
394
395 stdout, stderr, rcode = run(
396 "bw debug -c 'print(repo.vault.password_for(\"a\") == repo.vault.human_password_for(\"a\"))'",
397 path=str(tmpdir),
398 )
399 assert stdout == b"False\n"
400 assert stderr == b""
401 assert rcode == 0
402
403
404 def test_faults_equality_random_bytes_as_base64(tmpdir):
405 make_repo(tmpdir)
406
407 stdout, stderr, rcode = run(
408 "bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"a\"))'",
409 path=str(tmpdir),
410 )
411 assert stdout == b"True\n"
412 assert stderr == b""
413 assert rcode == 0
414
415 stdout, stderr, rcode = run(
416 "bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"a\") == repo.vault.random_bytes_as_base64_for(\"b\"))'",
417 path=str(tmpdir),
418 )
419 assert stdout == b"False\n"
420 assert stderr == b""
421 assert rcode == 0
+0
-190
tests/integration/secrets.py less more
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from base64 import b64decode
4 from os.path import join
5
6 from bundlewrap.utils.testing import make_repo, run
7
8
9 def test_b64encode_fault(tmpdir):
10 make_repo(tmpdir)
11
12 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").b64encode())'", path=str(tmpdir))
13 assert stdout == b"ZmFDVFQ3NmthZ3REdVpFNXdub2lEMUN4aEdLbWJnaVg=\n"
14 assert stderr == b""
15 assert rcode == 0
16
17
18 def test_encrypt(tmpdir):
19 make_repo(tmpdir)
20
21 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir))
22 assert stderr == b""
23 assert rcode == 0
24
25 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir))
26 assert stdout == b"test\n"
27 assert stderr == b""
28 assert rcode == 0
29
30
31 def test_encrypt_different_key_autodetect(tmpdir):
32 make_repo(tmpdir)
33
34 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\", key=\"generate\"))'", path=str(tmpdir))
35 assert stderr == b""
36 assert rcode == 0
37
38 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir))
39 assert stdout == b"test\n"
40 assert stderr == b""
41 assert rcode == 0
42
43
44 def test_encrypt_file(tmpdir):
45 make_repo(tmpdir)
46
47 source_file = join(str(tmpdir), "data", "source")
48 with open(source_file, 'w') as f:
49 f.write("ohai")
50
51 stdout, stderr, rcode = run(
52 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
53 source_file,
54 "encrypted",
55 ),
56 path=str(tmpdir),
57 )
58 assert stderr == b""
59 assert rcode == 0
60
61 stdout, stderr, rcode = run(
62 "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format(
63 "encrypted",
64 ),
65 path=str(tmpdir),
66 )
67 assert stdout == b"ohai\n"
68 assert stderr == b""
69 assert rcode == 0
70
71
72 def test_encrypt_file_different_key_autodetect(tmpdir):
73 make_repo(tmpdir)
74
75 source_file = join(str(tmpdir), "data", "source")
76 with open(source_file, 'w') as f:
77 f.write("ohai")
78
79 stdout, stderr, rcode = run(
80 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\", \"{}\")'".format(
81 source_file,
82 "encrypted",
83 "generate",
84 ),
85 path=str(tmpdir),
86 )
87 assert stderr == b""
88 assert rcode == 0
89
90 stdout, stderr, rcode = run(
91 "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format(
92 "encrypted",
93 ),
94 path=str(tmpdir),
95 )
96 assert stdout == b"ohai\n"
97 assert stderr == b""
98 assert rcode == 0
99
100
101 def test_encrypt_file_base64(tmpdir):
102 make_repo(tmpdir)
103
104 source_file = join(str(tmpdir), "data", "source")
105 with open(source_file, 'wb') as f:
106 f.write("öhai".encode('latin-1'))
107
108 stdout, stderr, rcode = run(
109 "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format(
110 source_file,
111 "encrypted",
112 ),
113 path=str(tmpdir),
114 )
115 assert stderr == b""
116 assert rcode == 0
117
118 stdout, stderr, rcode = run(
119 "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format(
120 "encrypted",
121 ),
122 path=str(tmpdir),
123 )
124 assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1')
125 assert stderr == b""
126 assert rcode == 0
127
128
129 def test_format_password(tmpdir):
130 make_repo(tmpdir)
131
132 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.password_for(\"testing\").format_into(\"format: {}\"))'", path=str(tmpdir))
133 assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n"
134 assert stderr == b""
135 assert rcode == 0
136
137
138 def test_human_password(tmpdir):
139 make_repo(tmpdir)
140
141 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\"))'", path=str(tmpdir))
142 assert stdout == b"Xaint-Heep-Pier-Tikl-76\n"
143 assert stderr == b""
144 assert rcode == 0
145
146
147 def test_human_password_digits(tmpdir):
148 make_repo(tmpdir)
149
150 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", digits=4))'", path=str(tmpdir))
151 assert stdout == b"Xaint-Heep-Pier-Tikl-7608\n"
152 assert stderr == b""
153 assert rcode == 0
154
155
156 def test_human_password_per_word(tmpdir):
157 make_repo(tmpdir)
158
159 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", per_word=1))'", path=str(tmpdir))
160 assert stdout == b"X-D-F-H-42\n"
161 assert stderr == b""
162 assert rcode == 0
163
164
165 def test_human_password_words(tmpdir):
166 make_repo(tmpdir)
167
168 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.human_password_for(\"hello world\", words=2))'", path=str(tmpdir))
169 assert stdout == b"Xaint-Heep-13\n"
170 assert stderr == b""
171 assert rcode == 0
172
173
174 def test_random_bytes_as_base64(tmpdir):
175 make_repo(tmpdir)
176
177 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\"))'", path=str(tmpdir))
178 assert stdout == b"rt+Dgv0yA10DS3ux94mmtEg+isChTJvgkfklzmWkvyg=\n"
179 assert stderr == b""
180 assert rcode == 0
181
182
183 def test_random_bytes_as_base64_length(tmpdir):
184 make_repo(tmpdir)
185
186 stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.random_bytes_as_base64_for(\"foo\", length=1))'", path=str(tmpdir))
187 assert stdout == b"rg==\n"
188 assert stderr == b""
189 assert rcode == 0
0 from os.path import join
1
2 from bundlewrap.repo import Repository
3 from bundlewrap.utils import get_file_contents
4 from bundlewrap.utils.testing import make_repo
5
6
7 def test_toml_conversion(tmpdir):
8 make_repo(
9 tmpdir,
10 nodes={
11 'node1': {
12 'os': 'ubuntu',
13 'metadata': {
14 "foo": {
15 "bar": "baz",
16 },
17 },
18 },
19 },
20 )
21 repo = Repository(tmpdir)
22 node = repo.get_node("node1")
23 node.toml_save()
24
25 assert get_file_contents(join(tmpdir, "nodes", "node1.toml")) == \
26 b"""os = "ubuntu"
27
28 [metadata.foo]
29 bar = "baz"
30 """
0 from bundlewrap.utils import Fault
1
2
3 def test_basic_resolve():
4 def callback():
5 return 4 # Chosen by fair dice roll. Guaranteed to be random.
6
7 f = Fault('id', callback)
8 assert f.value == 4
9
10
11 def test_add_fault():
12 def callback_a():
13 return 'foo'
14 def callback_b():
15 return 'bar'
16
17 a = Fault('id foo', callback_a)
18 b = Fault('id bar', callback_b)
19 c = a + b
20 assert c.value == 'foobar'
21
22
23 def test_add_fault_nonstring():
24 def callback_a():
25 return 4
26 def callback_b():
27 return 8
28
29 a = Fault('id foo', callback_a)
30 b = Fault('id bar', callback_b)
31 c = a + b
32 assert c.value == 12
33
34
35 def test_add_plain_nonstring():
36 def callback():
37 return 4
38
39 a = Fault('id foo', callback)
40 b = a + 8
41 assert b.value == 12
42
43
44 def test_add_plain():
45 def callback_a():
46 return 'foo'
47
48 a = Fault('id foo', callback_a)
49 c = a + 'bar'
50 assert c.value == 'foobar'
51
52
53 def test_order():
54 def callback_a():
55 return 'foo'
56 def callback_b():
57 return 'bar'
58 def callback_c():
59 return '0first'
60
61 a = Fault('id foo', callback_a)
62 b = Fault('id bar', callback_b)
63 c = Fault('id 0first', callback_c)
64
65 lst = sorted([a, b, c])
66
67 assert lst[0].value == '0first'
68 assert lst[1].value == 'bar'
69 assert lst[2].value == 'foo'
70
71
72 def test_b64encode():
73 def callback():
74 return 'foo'
75
76 a = Fault('id foo', callback).b64encode()
77 assert a.value == 'Zm9v'
78
79
80 def test_format_into():
81 def callback():
82 return 'foo'
83
84 a = Fault('id foo', callback).format_into('This is my secret: "{}"')
85 assert a.value == 'This is my secret: "foo"'
86
87
88 # XXX Other methods missing. This basically tests if
89 # _make_method_callback() is working.
90 def test_generic_method_lower():
91 def callback():
92 return 'FOO'
93
94 a = Fault('id FOO', callback)
95 assert a.lower().value == 'foo'
96
97
98 def test_equal_no_operators():
99 def callback_a():
100 return 'foo'
101 def callback_b():
102 return 'foo, but here you see the problem'
103
104 a = Fault('id foo', callback_a)
105 b = Fault('id foo', callback_b)
106 assert id(a) != id(b)
107 assert a == b
108
109
110 def test_not_equal_no_operators():
111 def callback_a():
112 return 'this interface is not fool proof'
113 def callback_b():
114 return 'this interface is not fool proof'
115
116 a = Fault('id foo', callback_a)
117 b = Fault('id bar', callback_b)
118 assert id(a) != id(b)
119 assert a != b
120
121
122 def test_equal_lower():
123 def callback_a():
124 return 'foo'
125 def callback_b():
126 return 'foo'
127
128 a = Fault('id foo', callback_a).lower()
129 b = Fault('id foo', callback_b).lower()
130 assert id(a) != id(b)
131 assert a == b
132
133
134 def test_not_equal_lower():
135 def callback_a():
136 return 'foo'
137 def callback_b():
138 return 'foo'
139
140 a = Fault('id foo', callback_a).lower()
141 b = Fault('id bar', callback_b).lower()
142 assert id(a) != id(b)
143 assert a != b
144
145
146 def test_equal_b64encode():
147 def callback_a():
148 return 'foo'
149 def callback_b():
150 return 'foo'
151
152 a = Fault('id foo', callback_a).b64encode()
153 b = Fault('id foo', callback_b).b64encode()
154 assert id(a) != id(b)
155 assert a == b
156
157
158 def test_not_equal_b64encode():
159 def callback_a():
160 return 'foo'
161 def callback_b():
162 return 'foo'
163
164 a = Fault('id foo', callback_a).b64encode()
165 b = Fault('id bar', callback_b).b64encode()
166 assert id(a) != id(b)
167 assert a != b
168
169
170 def test_equal_format_into():
171 def callback_a():
172 return 'foo'
173 def callback_b():
174 return 'foo'
175
176 a = Fault('id foo', callback_a).format_into('bar {}')
177 b = Fault('id foo', callback_b).format_into('bar {}')
178 assert id(a) != id(b)
179 assert a == b
180
181
182 def test_not_equal_format_into():
183 def callback_a():
184 return 'foo'
185 def callback_b():
186 return 'foo'
187
188 a = Fault('id foo', callback_a).format_into('bar {}')
189 b = Fault('id foo', callback_b).format_into('baz {}')
190 assert id(a) != id(b)
191 assert a != b
192
193
194 def test_nested_equal():
195 def callback_a():
196 return 'foo'
197 def callback_b():
198 return 'foo'
199
200 a = Fault('id foo', callback_a).lower().b64encode()
201 b = Fault('id foo', callback_b).lower().b64encode()
202 assert id(a) != id(b)
203 assert a == b
204
205
206 def test_nested_not_equal_because_of_id():
207 def callback_a():
208 return 'foo'
209 def callback_b():
210 return 'foo'
211
212 a = Fault('id foo', callback_a).lower().b64encode()
213 b = Fault('id bar', callback_b).lower().b64encode()
214 assert id(a) != id(b)
215 assert a != b
216
217
218 def test_nested_not_equal_because_of_operators():
219 def callback_a():
220 return 'foo'
221 def callback_b():
222 return 'foo'
223
224 a = Fault('id foo', callback_a).lower().b64encode()
225 b = Fault('id foo', callback_b).lower()
226 assert id(a) != id(b)
227 assert a != b
228
229
230 def test_can_be_used_in_set():
231 def callback_a():
232 return 'foo'
233 def callback_b():
234 return 'bar'
235
236 a = Fault('id foo', callback_a)
237 b = Fault('id bar', callback_b)
238 s = {a, a, b}
239 assert len(s) == 2
240 assert 'foo' in [i.value for i in s]
241 assert 'bar' in [i.value for i in s]
242
243
244 def test_kwargs_add_to_idlist():
245 def callback():
246 return 'foo'
247
248 a = Fault('id foo', callback, foo='bar', baz='bam', frob='glob')
249 b = Fault('id foo', callback, different='kwargs')
250 assert a != b
251
252
253 def test_eq_and_hash_do_not_resolve_fault():
254 def callback():
255 raise Exception('Fault resolved, this should not happen')
256
257 a = Fault('id foo', callback)
258 b = Fault('id foo', callback)
259 assert a == b
260
261 s = {a, b}
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3 from bundlewrap.utils import Fault
40 from bundlewrap.utils.dicts import merge_dict
5 from bundlewrap.metadata import atomic, blame_changed_paths, changes_metadata
1 from bundlewrap.metadata import atomic
62
73
84 def test_atomic_no_merge_base():
1713 {1: [5]},
1814 {1: atomic([6, 7])},
1915 ) == {1: [6, 7]}
20
21
22 def test_blame_and_merge():
23 dict1 = {
24 'key1': 11,
25 'key2': {
26 'key21': 121,
27 'key22': 122,
28 },
29 'key3': {
30 'key31': {
31 'key311': [1311],
32 },
33 },
34 }
35 dict2 = {
36 'key2': {
37 'key21': 221,
38 },
39 'key3': {
40 'key31': {
41 'key311': [2311],
42 'key312': 2312,
43 },
44 },
45 'key4': 24,
46 }
47 from pprint import pprint
48 blame = {}
49 merged = merge_dict(
50 {},
51 dict1,
52 )
53 blame_changed_paths(
54 {},
55 merged,
56 blame,
57 "dict1",
58 )
59 pprint(blame)
60 merged2 = merge_dict(
61 merged,
62 dict2,
63 )
64 blame_changed_paths(
65 merged,
66 merged2,
67 blame,
68 "dict2",
69 )
70 pprint(blame)
71
72 should = {
73 ('key1',): ("dict1",),
74 ('key2',): ("dict1", "dict2"),
75 ('key2', 'key21'): ("dict2",),
76 ('key2', 'key22'): ("dict1",),
77 ('key3',): ("dict1", "dict2"),
78 ('key3', 'key31',): ("dict1", "dict2"),
79 ('key3', 'key31', 'key311'): ("dict1", "dict2"),
80 ('key3', 'key31', 'key312'): ("dict2",),
81 ('key4',): ("dict2",),
82 }
83 pprint(should)
84 assert blame == should
85
86 assert merged2 == {
87 'key1': 11,
88 'key2': {
89 'key21': 221,
90 'key22': 122,
91 },
92 'key3': {
93 'key31': {
94 'key311': [1311, 2311],
95 'key312': 2312,
96 },
97 },
98 'key4': 24,
99 }
100
101
102 def test_changes_same():
103 assert not changes_metadata(
104 {
105 'foo': 1,
106 'bar': 2,
107 'baz': [3],
108 },
109 {
110 'baz': [3],
111 },
112 )
113
114
115 def test_changes_list():
116 assert changes_metadata(
117 {
118 'foo': 1,
119 'bar': 2,
120 'baz': [3],
121 },
122 {
123 'baz': [4],
124 },
125 )
126
127
128 def test_changes_nested_same():
129 assert not changes_metadata(
130 {
131 'foo': 1,
132 'bar': 2,
133 'baz': {
134 'frob': 4,
135 },
136 },
137 {
138 'baz': {
139 'frob': 4,
140 },
141 },
142 )
143
144
145 def test_changes_nested():
146 assert changes_metadata(
147 {
148 'foo': 1,
149 'bar': 2,
150 'baz': {
151 'frob': 4,
152 },
153 },
154 {
155 'baz': {
156 'frob': 5,
157 },
158 },
159 )
160
161
162 def test_changes_fault():
163 def callback1():
164 return 1
165
166 def callback2():
167 return 2
168
169 assert not changes_metadata(
170 {
171 'foo': Fault(callback1),
172 },
173 {
174 'foo': Fault(callback2),
175 },
176 )
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
3
40 from bundlewrap.metadata import atomic
51 from bundlewrap.utils.metastack import Metastack
62 from pytest import raises
178174 assert stack.get('something', None) == 456
179175
180176
181 def test_should_be_frozen():
177 def test_deepcopy():
182178 stack = Metastack()
183179 stack._set_layer('base', {'foo': {'bar': {1, 2, 3}}})
184180 foo = stack.get('foo', None)
185
186 with raises(AttributeError):
187 foo['bar'].add(4)
188
189 with raises(TypeError):
190 del foo['bar']
181 foo['bar'].add(4)
182 assert stack.get('foo/bar') == {1, 2, 3}
183 del foo['bar']
184 assert stack.get('foo/bar')
191185
192186
193187 def test_atomic_in_base():
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from bundlewrap.items.pkg_openbsd import parse_pkg_name
41 from pytest import raises
52
00 from bundlewrap.metadata import atomic
1 from bundlewrap.utils.dicts import freeze_object, map_dict_keys, reduce_dict
1 from bundlewrap.utils.dicts import (
2 map_dict_keys,
3 reduce_dict,
4 validate_dict,
5 COLLECTION_OF_STRINGS,
6 TUPLE_OF_INTS,
7 )
8
29 from pytest import raises
3
4 from sys import version_info
510
611
712 def test_dictmap():
2429 ("key2", "key5", "key6"),
2530 ("key2", "key7"),
2631 ])
27
28
29 def test_freeze_object():
30 orig = {
31 'bool': True,
32 'int': 3,
33 'none': None,
34 'simple_list': [1, 2],
35 'simple_set': {3, 4},
36 'recursive_dict': {
37 'something': {
38 'else': 3,
39 },
40 'str': 'str',
41 },
42 'list_of_dicts': [
43 {
44 'name': 'yaml',
45 'attribute': 123,
46 'see': 'how lists of dicts are a bad idea anyway',
47 },
48 {
49 'name': 'yaml',
50 'attribute': 42,
51 'everything': ['got', 'the', 'same', 'name'],
52 },
53 ],
54 }
55
56 frozen = freeze_object(orig)
57
58 assert frozen['bool'] == True
59 assert frozen['int'] == 3
60 assert frozen['none'] == None
61 assert frozen['simple_list'][0] == 1
62 assert frozen['simple_list'][1] == 2
63 assert len(frozen['simple_list']) == 2
64 assert 4 in frozen['simple_set']
65 assert len(frozen['simple_set']) == 2
66 assert frozen['list_of_dicts'][0]['attribute'] == 123
67 assert frozen['recursive_dict']['something']['else'] == 3
68
69 # XXX Remove this if in bw 4.0 and always do the check
70 if version_info[0] >= 3:
71 with raises(TypeError):
72 frozen['bool'] = False
73
74 with raises(TypeError):
75 frozen['int'] = 10
76
77 with raises(TypeError):
78 frozen['none'] = None
79
80 with raises(TypeError):
81 frozen['list_of_dicts'][0]['attribute'] = 456
82
83 with raises(TypeError):
84 frozen['recursive_dict']['something']['else'] = 4
85
86 with raises(TypeError):
87 del frozen['int']
88
89 with raises(AttributeError):
90 frozen['simple_list'].append(5)
91
92 with raises(AttributeError):
93 frozen['simple_set'].add(5)
9432
9533
9634 def test_reduce_dict_two_lists():
13573 }],
13674 'd': 3,
13775 }
76
77
78 def test_validate_ok():
79 validate_dict(
80 {
81 'a': 5,
82 'b': "bee",
83 'c': None,
84 'd': ("t", "u", "p", "l", "e"),
85 'e': ["l", "i", "s", "t"],
86 'f': {"s", "e", "t"},
87 'g': (1, "2"),
88 'h': [1, "2"],
89 'i': {1, "2"},
90 'j': True,
91 'k': False,
92 'l': (1, 2, 3),
93 },
94 {
95 'a': int,
96 'b': str,
97 'c': type(None),
98 'd': COLLECTION_OF_STRINGS,
99 'e': COLLECTION_OF_STRINGS,
100 'f': COLLECTION_OF_STRINGS,
101 'g': tuple,
102 'h': list,
103 'i': set,
104 'j': bool,
105 'k': (int, bool),
106 'l': TUPLE_OF_INTS,
107 },
108 )
109
110
111 def test_validate_single_type_error():
112 with raises(ValueError):
113 validate_dict(
114 {
115 'a': 5,
116 },
117 {
118 'a': str,
119 },
120 )
121
122
123 def test_validate_multi_type_error():
124 with raises(ValueError):
125 validate_dict(
126 {
127 'a': 5,
128 },
129 {
130 'a': (str, list),
131 },
132 )
133
134
135 def test_validate_inner_type_error():
136 with raises(ValueError):
137 validate_dict(
138 {
139 'd': ("t", "u", "p", "l", "e", 47),
140 },
141 {
142 'd': COLLECTION_OF_STRINGS,
143 },
144 )
145
146
147 def test_validate_inner_type_error2():
148 with raises(ValueError):
149 validate_dict(
150 {
151 'l': (1, 2, "3"),
152 },
153 {
154 'l': TUPLE_OF_INTS,
155 },
156 )
157
158
159 def test_validate_missing_key():
160 with raises(ValueError):
161 validate_dict(
162 {
163 'a': 5,
164 },
165 {
166 'a': int,
167 'b': str,
168 },
169 required_keys=['a', 'b'],
170 )
171
172
173 def test_validate_required_key():
174 validate_dict(
175 {
176 'a': 5,
177 'b': "bee",
178 },
179 {
180 'a': int,
181 'b': str,
182 },
183 required_keys=['a', 'b'],
184 )
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from bundlewrap.utils.table import ROW_SEPARATOR, render_table
41
52
0 # -*- coding: utf-8 -*-
1 from __future__ import unicode_literals
2
30 from datetime import timedelta
41
52 from bundlewrap.utils.text import (