New upstream release
Jonathan Carter
6 years ago
0 | # 3.3.0 | |
1 | ||
2 | 2018-03-09 | |
3 | ||
4 | * added experimental support for Kubernetes | |
5 | * some hooks can now raise an exception to skip nodes | |
6 | * fixed ED25519 public keys not being recognized as text files | |
7 | * fixed package names with hyphens for pkg_openbsd | |
8 | * fixed diff for user groups | |
9 | ||
10 | ||
0 | 11 | # 3.2.1 |
1 | 12 | |
2 | 13 | 2018-01-08 |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | VERSION = (3, 2, 1) | |
3 | VERSION = (3, 3, 0) | |
4 | 4 | VERSION_STRING = ".".join([str(v) for v in VERSION]) |
8 | 8 | from sys import exit |
9 | 9 | |
10 | 10 | from ..concurrency import WorkerPool |
11 | from ..exceptions import SkipNode | |
11 | 12 | from ..utils import SkipList |
12 | 13 | from ..utils.cmdline import get_target_nodes |
13 | 14 | from ..utils.table import ROW_SEPARATOR, render_table |
33 | 34 | io.stdout(_("{x} {node} skipped by --resume-file").format(node=bold(node.name), x=yellow("»"))) |
34 | 35 | return None |
35 | 36 | |
36 | node.repo.hooks.node_run_start( | |
37 | node.repo, | |
38 | node, | |
39 | command, | |
40 | ) | |
37 | try: | |
38 | node.repo.hooks.node_run_start( | |
39 | node.repo, | |
40 | node, | |
41 | command, | |
42 | ) | |
43 | except SkipNode as exc: | |
44 | io.stdout(_("{x} {node} skipped by hook ({reason})").format( | |
45 | node=bold(node.name), | |
46 | reason=str(exc) or _("no reason given"), | |
47 | x=yellow("»"), | |
48 | )) | |
49 | return None | |
41 | 50 | |
42 | 51 | result = node.run( |
43 | 52 | command, |
140 | 140 | pass |
141 | 141 | |
142 | 142 | |
143 | class SkipNode(UnicodeException): | |
144 | """ | |
145 | Can be raised by hooks to skip a node. | |
146 | """ | |
147 | pass | |
148 | ||
149 | ||
143 | 150 | class TemplateError(RepositoryError): |
144 | 151 | """ |
145 | 152 | Raised when an error occurs while rendering a template. |
12 | 12 | 'cmd_wrapper_inner': "export LANG=C; {}", |
13 | 13 | 'cmd_wrapper_outer': "sudo sh -c {}", |
14 | 14 | 'dummy': False, |
15 | 'kubectl_context': None, | |
15 | 16 | 'os': 'linux', |
16 | 17 | # Setting os_version to 0 by default will probably yield less |
17 | 18 | # surprises than setting it to max_int. Users will probably |
187 | 187 | def _template_content(self): |
188 | 188 | if self.attributes['source'] is not None: |
189 | 189 | filename = join(self.item_data_dir, self.attributes['source']) |
190 | if exists(filename): | |
191 | with open(filename, 'rb') as f: | |
192 | content = f.read() | |
193 | else: | |
190 | if not exists(filename): | |
194 | 191 | filename = join(self.item_dir, self.attributes['source']) |
195 | with open(filename, 'rb') as f: | |
196 | content = f.read() | |
197 | return force_text(content) | |
192 | with open(filename, 'rb') as f: | |
193 | return force_text(f.read()) | |
198 | 194 | else: |
199 | 195 | return force_text(self.attributes['content']) |
200 | 196 |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from abc import ABCMeta | |
4 | import json | |
5 | from os.path import exists, join | |
6 | import re | |
7 | ||
8 | from bundlewrap.exceptions import BundleError | |
9 | from bundlewrap.operations import run_local | |
10 | from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item | |
11 | from bundlewrap.items.files import content_processor_jinja2, content_processor_mako | |
12 | from bundlewrap.utils.dicts import merge_dict, reduce_dict | |
13 | from bundlewrap.utils.ui import io | |
14 | from bundlewrap.utils.text import force_text, mark_for_translation as _ | |
15 | from six import add_metaclass | |
16 | import yaml | |
17 | ||
18 | ||
19 | NAME_REGEX = r"[a-z0-9-]+/[a-z0-9-]{1,253}" | |
20 | NAME_REGEX_COMPILED = re.compile(NAME_REGEX) | |
21 | ||
22 | ||
23 | def log_error(run_result): | |
24 | if run_result.return_code != 0: | |
25 | io.debug(run_result.stdout.decode('utf-8')) | |
26 | io.debug(run_result.stderr.decode('utf-8')) | |
27 | ||
28 | ||
29 | @add_metaclass(ABCMeta) | |
30 | class KubernetesItem(Item): | |
31 | """ | |
32 | A generic Kubernetes item. | |
33 | """ | |
34 | ITEM_ATTRIBUTES = { | |
35 | 'delete': False, | |
36 | 'encoding': "utf-8", # required by content processors | |
37 | 'manifest': None, | |
38 | 'manifest_file': None, | |
39 | 'manifest_processor': None, | |
40 | 'context': None, | |
41 | } | |
42 | KIND = None | |
43 | KUBECTL_RESOURCE_TYPE = None | |
44 | KUBERNETES_APIVERSION = "v1" | |
45 | ||
46 | def __init__(self, *args, **kwargs): | |
47 | super(KubernetesItem, self).__init__(*args, **kwargs) | |
48 | self.item_data_dir = join(self.bundle.bundle_data_dir, "manifests") | |
49 | self.item_dir = join(self.bundle.bundle_dir, "manifests") | |
50 | ||
51 | @property | |
52 | def _template_content(self): # required by content processors | |
53 | filename = join(self.item_data_dir, self.attributes['manifest_file']) | |
54 | if not exists(filename): | |
55 | filename = join(self.item_dir, self.attributes['manifest_file']) | |
56 | with open(filename, 'rb') as f: | |
57 | return force_text(f.read()) | |
58 | ||
59 | def cdict(self): | |
60 | if self.attributes['delete']: | |
61 | return None | |
62 | else: | |
63 | return {'manifest': self.manifest} | |
64 | ||
65 | def fix(self, status): | |
66 | if status.must_be_deleted: | |
67 | result = run_local([ | |
68 | "kubectl", | |
69 | "--context={}".format(self.node.kubectl_context), | |
70 | "--namespace={}".format(self.namespace), | |
71 | "delete", | |
72 | self.KUBECTL_RESOURCE_TYPE, | |
73 | self.resource_name, | |
74 | ]) | |
75 | log_error(result) | |
76 | else: | |
77 | result = run_local([ | |
78 | "kubectl", | |
79 | "--context={}".format(self.node.kubectl_context), | |
80 | "--namespace={}".format(self.namespace), | |
81 | "apply", | |
82 | "-f", | |
83 | "-", | |
84 | ], data_stdin=self.manifest.encode('utf-8')) | |
85 | log_error(result) | |
86 | ||
87 | def get_auto_deps(self, items, _secrets=True): | |
88 | deps = [] | |
89 | for item in items: | |
90 | if ( | |
91 | item.ITEM_TYPE_NAME == 'k8s_namespace' and | |
92 | item.name == self.namespace | |
93 | ): | |
94 | if item.attributes['delete'] and not self.attributes['delete']: | |
95 | raise BundleError(_( | |
96 | "{item} (bundle '{bundle}' on {node}) " | |
97 | "cannot exist in namespace marked for deletion" | |
98 | ).format( | |
99 | item=self.id, | |
100 | bundle=self.bundle.name, | |
101 | node=self.node.name, | |
102 | )) | |
103 | deps.append(item.id) | |
104 | elif ( | |
105 | _secrets and | |
106 | item.ITEM_TYPE_NAME == 'k8s_secret' and | |
107 | item.namespace == self.namespace | |
108 | ): | |
109 | deps.append(item.id) | |
110 | return deps | |
111 | ||
112 | @property | |
113 | def manifest(self): | |
114 | if self.attributes['manifest_processor'] == 'jinja2': | |
115 | content_processor = content_processor_jinja2 | |
116 | elif self.attributes['manifest_processor'] == 'mako': | |
117 | content_processor = content_processor_mako | |
118 | else: | |
119 | content_processor = lambda item: item._template_content.encode('utf-8') | |
120 | ||
121 | if self.attributes['manifest'] is not None or self.attributes['manifest_file'] is None: | |
122 | user_manifest = self.attributes['manifest'] or {} | |
123 | elif ( | |
124 | self.attributes['manifest_file'].endswith(".yaml") or | |
125 | self.attributes['manifest_file'].endswith(".yml") | |
126 | ): | |
127 | user_manifest = yaml.load(content_processor(self)) | |
128 | elif self.attributes['manifest_file'].endswith(".json"): | |
129 | user_manifest = json.loads(content_processor(self)) | |
130 | ||
131 | return json.dumps(merge_dict( | |
132 | { | |
133 | 'apiVersion': self.KUBERNETES_APIVERSION, | |
134 | 'kind': self.KIND, | |
135 | 'metadata': { | |
136 | 'name': self.resource_name, | |
137 | }, | |
138 | }, | |
139 | user_manifest, | |
140 | ), indent=4, sort_keys=True) | |
141 | ||
142 | @property | |
143 | def namespace(self): | |
144 | return self.name.split("/", 1)[0] | |
145 | ||
146 | def patch_attributes(self, attributes): | |
147 | if 'context' not in attributes: | |
148 | attributes['context'] = {} | |
149 | return attributes | |
150 | ||
151 | @property | |
152 | def resource_name(self): | |
153 | return self.name.split("/", 1)[1] | |
154 | ||
155 | def sdict(self): | |
156 | result = run_local([ | |
157 | "kubectl", | |
158 | "--context={}".format(self.node.kubectl_context), | |
159 | "--namespace={}".format(self.namespace), | |
160 | "get", | |
161 | "-o", | |
162 | "json", | |
163 | self.KUBECTL_RESOURCE_TYPE, | |
164 | self.resource_name, | |
165 | ]) | |
166 | if result.return_code == 0: | |
167 | full_json_response = json.loads(result.stdout) | |
168 | if full_json_response.get("status", {}).get("phase") == "Terminating": | |
169 | # this resource is currently being deleted, consider it gone | |
170 | return None | |
171 | return {'manifest': json.dumps(reduce_dict( | |
172 | full_json_response, | |
173 | json.loads(self.manifest), | |
174 | ), indent=4, sort_keys=True)} | |
175 | elif result.return_code == 1 and "NotFound" in result.stderr.decode('utf-8'): | |
176 | return None | |
177 | else: | |
178 | io.debug(result.stdout.decode('utf-8')) | |
179 | io.debug(result.stderr.decode('utf-8')) | |
180 | raise RuntimeError(_("error getting state of {}, check `bw --debug`".format(self.id))) | |
181 | ||
182 | @classmethod | |
183 | def validate_attributes(cls, bundle, item_id, attributes): | |
184 | if attributes.get('delete', False): | |
185 | for attr in attributes.keys(): | |
186 | if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): | |
187 | raise BundleError(_( | |
188 | "{item} from bundle '{bundle}' cannot have other " | |
189 | "attributes besides 'delete'" | |
190 | ).format(item=item_id, bundle=bundle.name)) | |
191 | if attributes.get('manifest') and attributes.get('manifest_file'): | |
192 | raise BundleError(_( | |
193 | "{item} from bundle '{bundle}' cannot have both 'manifest' and 'manifest_file'" | |
194 | ).format(item=item_id, bundle=bundle.name)) | |
195 | if attributes.get('manifest_processor') not in (None, 'jinja2', 'mako'): | |
196 | raise BundleError(_( | |
197 | "{item} from bundle '{bundle}' has invalid manifest_processor " | |
198 | "(must be 'jinja2' or 'mako')" | |
199 | ).format(item=item_id, bundle=bundle.name)) | |
200 | ||
201 | @classmethod | |
202 | def validate_name(cls, bundle, name): | |
203 | if not NAME_REGEX_COMPILED.match(name): | |
204 | raise BundleError(_( | |
205 | "name for {item_type}:{name} (bundle '{bundle}') " | |
206 | "on {node} doesn't match {regex}" | |
207 | ).format( | |
208 | item_type=cls.ITEM_TYPE_NAME, | |
209 | name=name, | |
210 | bundle=bundle.name, | |
211 | node=bundle.node.name, | |
212 | refex=NAME_REGEX, | |
213 | )) | |
214 | ||
215 | ||
216 | class KubernetesConfigMap(KubernetesItem): | |
217 | BUNDLE_ATTRIBUTE_NAME = "k8s_configmaps" | |
218 | KIND = "ConfigMap" | |
219 | KUBECTL_RESOURCE_TYPE = "configmaps" | |
220 | KUBERNETES_APIVERSION = "v1" | |
221 | ITEM_TYPE_NAME = "k8s_configmap" | |
222 | ||
223 | ||
224 | class KubernetesCronJob(KubernetesItem): | |
225 | BUNDLE_ATTRIBUTE_NAME = "k8s_cronjobs" | |
226 | KIND = "CronJob" | |
227 | KUBECTL_RESOURCE_TYPE = "cronjobs" | |
228 | KUBERNETES_APIVERSION = "batch/v1beta1" | |
229 | ITEM_TYPE_NAME = "k8s_cronjob" | |
230 | ||
231 | ||
232 | class KubernetesDaemonSet(KubernetesItem): | |
233 | BUNDLE_ATTRIBUTE_NAME = "k8s_daemonsets" | |
234 | KIND = "DaemonSet" | |
235 | KUBECTL_RESOURCE_TYPE = "daemonsets" | |
236 | KUBERNETES_APIVERSION = "v1" | |
237 | ITEM_TYPE_NAME = "k8s_daemonset" | |
238 | ||
239 | def get_auto_deps(self, items): | |
240 | deps = super(KubernetesDaemonSet, self).get_auto_deps(items) | |
241 | for item in items: | |
242 | if ( | |
243 | item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and | |
244 | item.namespace == self.namespace | |
245 | ): | |
246 | deps.append(item.id) | |
247 | return deps | |
248 | ||
249 | ||
250 | class KubernetesDeployment(KubernetesItem): | |
251 | BUNDLE_ATTRIBUTE_NAME = "k8s_deployments" | |
252 | KIND = "Deployment" | |
253 | KUBECTL_RESOURCE_TYPE = "deployments" | |
254 | KUBERNETES_APIVERSION = "extensions/v1beta1" | |
255 | ITEM_TYPE_NAME = "k8s_deployment" | |
256 | ||
257 | def get_auto_deps(self, items): | |
258 | deps = super(KubernetesDeployment, self).get_auto_deps(items) | |
259 | for item in items: | |
260 | if ( | |
261 | item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and | |
262 | item.namespace == self.namespace | |
263 | ): | |
264 | deps.append(item.id) | |
265 | return deps | |
266 | ||
267 | ||
268 | class KubernetesIngress(KubernetesItem): | |
269 | BUNDLE_ATTRIBUTE_NAME = "k8s_ingresses" | |
270 | KIND = "Ingress" | |
271 | KUBECTL_RESOURCE_TYPE = "ingresses" | |
272 | KUBERNETES_APIVERSION = "extensions/v1beta1" | |
273 | ITEM_TYPE_NAME = "k8s_ingress" | |
274 | ||
275 | def get_auto_deps(self, items): | |
276 | deps = super(KubernetesIngress, self).get_auto_deps(items) | |
277 | for item in items: | |
278 | if ( | |
279 | item.ITEM_TYPE_NAME == 'k8s_service' and | |
280 | item.namespace == self.namespace | |
281 | ): | |
282 | deps.append(item.id) | |
283 | return deps | |
284 | ||
285 | ||
286 | class KubernetesNamespace(KubernetesItem): | |
287 | BUNDLE_ATTRIBUTE_NAME = "k8s_namespaces" | |
288 | KIND = "Namespace" | |
289 | KUBECTL_RESOURCE_TYPE = "namespaces" | |
290 | KUBERNETES_APIVERSION = "v1" | |
291 | ITEM_TYPE_NAME = "k8s_namespace" | |
292 | ||
293 | def get_auto_deps(self, items): | |
294 | return [] | |
295 | ||
296 | @property | |
297 | def namespace(self): | |
298 | return self.name | |
299 | ||
300 | @property | |
301 | def resource_name(self): | |
302 | return self.name | |
303 | ||
304 | @classmethod | |
305 | def validate_name(cls, bundle, name): | |
306 | pass | |
307 | ||
308 | ||
309 | class KubernetesPersistentVolumeClain(KubernetesItem): | |
310 | BUNDLE_ATTRIBUTE_NAME = "k8s_pvc" | |
311 | KIND = "PersistentVolumeClaim" | |
312 | KUBECTL_RESOURCE_TYPE = "persistentvolumeclaims" | |
313 | KUBERNETES_APIVERSION = "v1" | |
314 | ITEM_TYPE_NAME = "k8s_pvc" | |
315 | ||
316 | ||
317 | class KubernetesSecret(KubernetesItem): | |
318 | BUNDLE_ATTRIBUTE_NAME = "k8s_secrets" | |
319 | KIND = "Secret" | |
320 | KUBECTL_RESOURCE_TYPE = "secrets" | |
321 | KUBERNETES_APIVERSION = "v1" | |
322 | ITEM_TYPE_NAME = "k8s_secret" | |
323 | ||
324 | def get_auto_deps(self, items): | |
325 | return super(KubernetesSecret, self).get_auto_deps(items, _secrets=False) | |
326 | ||
327 | ||
328 | class KubernetesService(KubernetesItem): | |
329 | BUNDLE_ATTRIBUTE_NAME = "k8s_services" | |
330 | KIND = "Service" | |
331 | KUBECTL_RESOURCE_TYPE = "services" | |
332 | KUBERNETES_APIVERSION = "v1" | |
333 | ITEM_TYPE_NAME = "k8s_service" | |
334 | ||
335 | ||
336 | class KubernetesStatefulSet(KubernetesItem): | |
337 | BUNDLE_ATTRIBUTE_NAME = "k8s_statefulsets" | |
338 | KIND = "StatefulSet" | |
339 | KUBECTL_RESOURCE_TYPE = "statefulsets" | |
340 | KUBERNETES_APIVERSION = "apps/v1" | |
341 | ITEM_TYPE_NAME = "k8s_statefulset" | |
342 | ||
343 | def get_auto_deps(self, items): | |
344 | deps = super(KubernetesStatefulSet, self).get_auto_deps(items) | |
345 | for item in items: | |
346 | if ( | |
347 | item.ITEM_TYPE_NAME in ('k8s_pvc', 'k8s_configmap') and | |
348 | item.namespace == self.namespace | |
349 | ): | |
350 | deps.append(item.id) | |
351 | return deps |
8 | 8 | from bundlewrap.utils.text import mark_for_translation as _ |
9 | 9 | |
10 | 10 | |
11 | PKGSPEC_REGEX = re.compile(r"^([^-]+)-(\d[^-]+)(-(.+))?$") | |
11 | PKGSPEC_REGEX = re.compile(r"^(.+)-(\d.*)$") | |
12 | ||
13 | ||
14 | def parse_pkg_name(pkgname, line): | |
15 | matches = PKGSPEC_REGEX.match(line) | |
16 | assert matches != None, _("Unexpected OpenBSD package name: {line}").format(line=line) | |
17 | ||
18 | installed_package, installed_version_and_more = matches.groups() | |
19 | assert not installed_version_and_more.endswith("-"), \ | |
20 | _("Unexpected OpenBSD package name (ends in dash): {line}").format(line=line) | |
21 | ||
22 | if installed_package == pkgname: | |
23 | if "-" in installed_version_and_more: | |
24 | tokens = installed_version_and_more.split("-") | |
25 | installed_version = tokens[0] | |
26 | installed_flavor = "-".join(tokens[1:]) | |
27 | else: | |
28 | installed_version = installed_version_and_more | |
29 | installed_flavor = "" | |
30 | ||
31 | return True, installed_version, installed_flavor | |
32 | else: | |
33 | return False, None, None | |
12 | 34 | |
13 | 35 | |
14 | 36 | def pkg_install(node, pkgname, flavor, version): |
42 | 64 | may_fail=True, |
43 | 65 | ) |
44 | 66 | for line in result.stdout.decode('utf-8').strip().splitlines(): |
45 | installed_package, installed_version, _, installed_flavor = \ | |
46 | PKGSPEC_REGEX.match(line).groups() | |
47 | if installed_package == pkgname: | |
48 | # If our regex didn't match a flavor, then this is | |
49 | # equivalent to using the "normal" flavor. | |
50 | if installed_flavor is None: | |
51 | installed_flavor = "" | |
67 | found, installed_version, installed_flavor = parse_pkg_name(pkgname, line) | |
68 | if found: | |
52 | 69 | return installed_version, installed_flavor |
70 | ||
53 | 71 | return False, None |
54 | 72 | |
55 | 73 |
152 | 152 | |
153 | 153 | def display_dicts(self, cdict, sdict, keys): |
154 | 154 | for attr_name, attr_display_name in _ATTRIBUTE_NAMES.items(): |
155 | if attr_name == attr_display_name: | |
156 | # Don't change anything; the `del`s below would | |
157 | # always remove the key entirely! | |
158 | continue | |
155 | 159 | try: |
156 | 160 | keys.remove(attr_name) |
157 | 161 | except ValueError: |
43 | 43 | self.interactive = interactive |
44 | 44 | |
45 | 45 | def __enter__(self): |
46 | if self.node.os == 'kubernetes': | |
47 | # no locking required | |
48 | return self | |
46 | 49 | with tempfile() as local_path: |
47 | 50 | if not self.ignore: |
48 | 51 | with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))): |
100 | 103 | return self |
101 | 104 | |
102 | 105 | def __exit__(self, type, value, traceback): |
106 | if self.node.os == 'kubernetes': | |
107 | # no locking required | |
108 | return | |
103 | 109 | with io.job(_("{node} removing hard lock").format(node=bold(self.node.name))): |
104 | 110 | result = self.node.run("rm -R {}".format(quote(HARD_LOCK_PATH)), may_fail=True) |
105 | 111 | |
145 | 151 | |
146 | 152 | |
147 | 153 | def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None): |
154 | assert node.os != 'kubernetes' | |
148 | 155 | if "\n" in comment: |
149 | 156 | raise ValueError(_("Lock comments must not contain any newlines")) |
150 | 157 | if not item_selectors: |
175 | 182 | |
176 | 183 | |
177 | 184 | def softlock_list(node): |
185 | if node.os == 'kubernetes': | |
186 | return [] | |
178 | 187 | with io.job(_("{} checking soft locks").format(bold(node.name))): |
179 | 188 | cat = node.run("cat {}".format(SOFT_LOCK_FILE.format(id="*")), may_fail=True) |
180 | 189 | if cat.return_code != 0: |
203 | 212 | |
204 | 213 | |
205 | 214 | def softlock_remove(node, lock_id): |
215 | assert node.os != 'kubernetes' | |
206 | 216 | io.debug(_("removing soft lock {id} from node {node}").format( |
207 | 217 | id=lock_id, |
208 | 218 | node=node.name, |
18 | 18 | NodeLockedException, |
19 | 19 | NoSuchBundle, |
20 | 20 | RepositoryError, |
21 | SkipNode, | |
21 | 22 | ) |
22 | 23 | from .group import GROUP_ATTR_DEFAULTS |
23 | 24 | from .itemqueue import ItemQueue |
343 | 344 | OS_FAMILY_DEBIAN + \ |
344 | 345 | OS_FAMILY_REDHAT |
345 | 346 | |
346 | OS_KNOWN = OS_FAMILY_BSD + OS_FAMILY_LINUX | |
347 | ||
348 | def __init__(self, name, attributes=None, transport='ssh', transport_options=None): | |
347 | OS_KNOWN = OS_FAMILY_BSD + OS_FAMILY_LINUX + ('kubernetes',) | |
348 | ||
349 | def __init__(self, name, attributes=None): | |
349 | 350 | if attributes is None: |
350 | 351 | attributes = {} |
351 | if transport_options is None: | |
352 | transport_options = {} | |
353 | ||
354 | if transport == 'ssh': | |
355 | transport_options.setdefault( | |
356 | 'add_host_keys', | |
357 | environ.get('BW_ADD_HOST_KEYS', False) == "1", | |
358 | ) | |
359 | 352 | |
360 | 353 | if not validate_name(name): |
361 | 354 | raise RepositoryError(_("'{}' is not a valid node name").format(name)) |
362 | 355 | |
356 | self._add_host_keys = environ.get('BW_ADD_HOST_KEYS', False) == "1" | |
363 | 357 | self._bundles = attributes.get('bundles', []) |
364 | 358 | self._compiling_metadata = Lock() |
365 | 359 | self._dynamic_group_lock = Lock() |
370 | 364 | self._ssh_first_conn_lock = Lock() |
371 | 365 | self.hostname = attributes.get('hostname', name) |
372 | 366 | self.name = name |
373 | self.transport = transport | |
374 | self.transport_options = transport_options | |
375 | 367 | |
376 | 368 | for attr in GROUP_ATTR_DEFAULTS: |
377 | 369 | setattr(self, "_{}".format(attr), attributes.get(attr)) |
567 | 559 | )) |
568 | 560 | return None |
569 | 561 | |
562 | try: | |
563 | self.repo.hooks.node_apply_start( | |
564 | self.repo, | |
565 | self, | |
566 | interactive=interactive, | |
567 | ) | |
568 | except SkipNode as exc: | |
569 | io.stdout(_("{x} {node} skipped by hook ({reason})").format( | |
570 | node=bold(self.name), | |
571 | reason=str(exc) or _("no reason given"), | |
572 | x=yellow("»"), | |
573 | )) | |
574 | return None | |
575 | ||
570 | 576 | start = datetime.now() |
571 | ||
572 | 577 | io.stdout(_("{x} {node} {started} at {time}").format( |
573 | 578 | node=bold(self.name), |
574 | 579 | started=bold(_("started")), |
575 | 580 | time=start.strftime("%Y-%m-%d %H:%M:%S"), |
576 | 581 | x=blue("i"), |
577 | 582 | )) |
578 | self.repo.hooks.node_apply_start( | |
579 | self.repo, | |
580 | self, | |
581 | interactive=interactive, | |
582 | ) | |
583 | 583 | |
584 | 584 | try: |
585 | 585 | with NodeLock(self, interactive=interactive, ignore=force) as lock: |
631 | 631 | self.hostname, |
632 | 632 | remote_path, |
633 | 633 | local_path, |
634 | add_host_keys=self.transport_options['add_host_keys'], | |
634 | add_host_keys=self._add_host_keys, | |
635 | 635 | wrapper_inner=self.cmd_wrapper_inner, |
636 | 636 | wrapper_outer=self.cmd_wrapper_outer, |
637 | 637 | ) |
686 | 686 | return self.repo._metadata_for_node(self.name, partial=True) |
687 | 687 | |
688 | 688 | def run(self, command, data_stdin=None, may_fail=False, log_output=False): |
689 | assert self.os != 'kubernetes' | |
690 | ||
689 | 691 | if log_output: |
690 | 692 | def log_function(msg): |
691 | 693 | io.stdout("{x} {node} {msg}".format( |
696 | 698 | else: |
697 | 699 | log_function = None |
698 | 700 | |
699 | add_host_keys = self.transport_options['add_host_keys'] | |
700 | ||
701 | 701 | if not self._ssh_conn_established: |
702 | 702 | # Sometimes we're opening SSH connections to a node too fast |
703 | 703 | # for OpenSSH to establish the ControlMaster socket for the |
707 | 707 | # multiplexed connection. |
708 | 708 | if self._ssh_first_conn_lock.acquire(False): |
709 | 709 | try: |
710 | operations.run(self.hostname, "true", add_host_keys=add_host_keys) | |
710 | operations.run(self.hostname, "true", add_host_keys=self._add_host_keys) | |
711 | 711 | self._ssh_conn_established = True |
712 | 712 | finally: |
713 | 713 | self._ssh_first_conn_lock.release() |
720 | 720 | return operations.run( |
721 | 721 | self.hostname, |
722 | 722 | command, |
723 | add_host_keys=add_host_keys, | |
723 | add_host_keys=self._add_host_keys, | |
724 | 724 | data_stdin=data_stdin, |
725 | 725 | ignore_failure=may_fail, |
726 | 726 | log_function=log_function, |
729 | 729 | ) |
730 | 730 | |
731 | 731 | def upload(self, local_path, remote_path, mode=None, owner="", group="", may_fail=False): |
732 | assert self.os != 'kubernetes' | |
732 | 733 | return operations.upload( |
733 | 734 | self.hostname, |
734 | 735 | local_path, |
735 | 736 | remote_path, |
736 | add_host_keys=self.transport_options['add_host_keys'], | |
737 | add_host_keys=self._add_host_keys, | |
737 | 738 | group=group, |
738 | 739 | mode=mode, |
739 | 740 | owner=owner, |
253 | 253 | return merged |
254 | 254 | |
255 | 255 | |
256 | def reduce_dict(full_dict, template_dict): | |
257 | """ | |
258 | Take a large dict and recursively remove all keys that are not | |
259 | present in the template dict. Also descends into lists. | |
260 | ||
261 | >>> full_dict = { | |
262 | 'a': [{ | |
263 | 'b': 1, | |
264 | 'c': 2, # this will be removed from final result | |
265 | }], | |
266 | 'd': 3, | |
267 | } | |
268 | >>> template_dict = { | |
269 | 'a': [{ | |
270 | 'b': None, | |
271 | }], | |
272 | 'd': None, | |
273 | 'e': None, | |
274 | } | |
275 | >>> reduce_dict(full_dict, template_dict) | |
276 | { | |
277 | 'a': [{ | |
278 | 'b': 1, | |
279 | }], | |
280 | 'd': 3, | |
281 | } | |
282 | """ | |
283 | if isinstance(full_dict, list): | |
284 | if not isinstance(template_dict, list): | |
285 | return full_dict | |
286 | result = [] | |
287 | for index in range(len(full_dict)): | |
288 | full_dict_element = full_dict[index] | |
289 | try: | |
290 | template_dict_element = template_dict[index] | |
291 | except IndexError: | |
292 | template_dict_element = full_dict_element | |
293 | result.append(reduce_dict(full_dict_element, template_dict_element)) | |
294 | return result | |
295 | elif isinstance(full_dict, dict): | |
296 | if not isinstance(template_dict, dict): | |
297 | return full_dict | |
298 | result = {} | |
299 | for key, value in full_dict.items(): | |
300 | if key in template_dict: | |
301 | result[key] = reduce_dict(value, template_dict[key]) | |
302 | return result | |
303 | else: | |
304 | return full_dict | |
305 | ||
306 | ||
256 | 307 | def statedict_to_json(sdict, pretty=False): |
257 | 308 | """ |
258 | 309 | Returns a canonical JSON representation of the given statedict. |
106 | 106 | "text" in self.desc or |
107 | 107 | self.desc in ( |
108 | 108 | "empty", |
109 | "OpenSSH ED25519 public key", | |
109 | 110 | "OpenSSH RSA public key", |
110 | 111 | "OpenSSH DSA public key", |
111 | 112 | ) |
129 | 130 | result = self.node.run("sha1sum -- {}".format(quote(self.path))) |
130 | 131 | return force_text(result.stdout).strip().split()[0] |
131 | 132 | |
133 | @cached_property | |
134 | def sha256(self): | |
135 | if self.node.os == 'macos': | |
136 | result = self.node.run("shasum -a 256 -- {}".format(quote(self.path))) | |
137 | elif self.node.os in self.node.OS_FAMILY_BSD: | |
138 | result = self.node.run("sha256 -q -- {}".format(quote(self.path))) | |
139 | else: | |
140 | result = self.node.run("sha256sum -- {}".format(quote(self.path))) | |
141 | return force_text(result.stdout).strip().split()[0] | |
142 | ||
132 | 143 | @property |
133 | 144 | def size(self): |
134 | 145 | return self.stat['size'] |
0 | bundlewrap (3.3.0-1) unstable; urgency=medium | |
1 | ||
2 | * New upstream release | |
3 | * Update compat to level 11 | |
4 | * Move VCS repository to salsa.debian.net | |
5 | * Migrate to using python3 | |
6 | * Update copyright years | |
7 | ||
8 | -- Jonathan Carter <jcc@debian.org> Mon, 12 Mar 2018 09:22:10 +0200 | |
9 | ||
0 | 10 | bundlewrap (3.2.1-1) unstable; urgency=medium |
1 | 11 | |
2 | 12 | * New upstream release |
2 | 2 | Priority: optional |
3 | 3 | Maintainer: Jonathan Carter <jcc@debian.org> |
4 | 4 | Uploaders: Python Applications Packaging Team <python-apps-team@lists.alioth.debian.org> |
5 | Build-Depends: debhelper (>= 10), | |
5 | Build-Depends: debhelper (>= 11), | |
6 | 6 | dh-python, |
7 | python, | |
8 | python-setuptools | |
7 | python3-minimal, | |
8 | python3, | |
9 | python3-setuptools, | |
10 | python3-requests, | |
11 | python3-cryptography | |
9 | 12 | Standards-Version: 4.1.3 |
13 | X-Python3-Version: >= 3.4 | |
10 | 14 | Homepage: http://bundlewrap.org/ |
11 | 15 | Vcs-Git: https://salsa.debian.org/python-team/applications/bundlewrap.git |
12 | 16 | Vcs-Browser: https://salsa.debian.org/python-team/applications/bundlewrap |
13 | 17 | |
14 | 18 | Package: bundlewrap |
15 | 19 | Architecture: all |
16 | Depends: ${python:Depends}, | |
20 | Depends: ${python3:Depends}, | |
17 | 21 | ${misc:Depends}, |
18 | ${shlibs:Depends} | |
22 | ${shlibs:Depends}, | |
19 | 23 | Description: Decentralized configuration management system with Python |
20 | 24 | By allowing for easy and low-overhead config management, BundleWrap fills |
21 | 25 | the gap between complex deployments using Chef or Puppet and old school |
2 | 2 | Source: https://github.com/bundlewrap/bundlewrap |
3 | 3 | |
4 | 4 | Files: * |
5 | Copyright: 2016-2017 Torsten Rehn <torsten@rehn.email> | |
5 | Copyright: 2016-2018 Torsten Rehn <torsten@rehn.email> | |
6 | 6 | Comment: Copyrights are assigned to Torsten Rehn (see: CAA.md) |
7 | 7 | Additional author: Peter Hofmann <scm@uninformativ.de> |
8 | 8 | Additional author: Tim Buchwaldt <tim@buchwaldt.ws> |
11 | 11 | License: GPL-3 |
12 | 12 | |
13 | 13 | Files: debian/* |
14 | Copyright: 2016-2017 Jonathan Carter <jcarter@linux.com> | |
14 | Copyright: 2016-2018 Jonathan Carter <jcarter@linux.com> | |
15 | 15 | License: GPL-3 |
16 | 16 | |
17 | 17 | License: GPL-3 |
0 | 0 | @import url('https://fonts.googleapis.com/css?family=Maven+Pro'); |
1 | @import url('https://fonts.googleapis.com/css?family=Roboto:400,400i,700'); | |
1 | @import url('https://fonts.googleapis.com/css?family=Open+Sans:400,400i,700'); | |
2 | 2 | @import url('https://fonts.googleapis.com/css?family=Source+Code+Pro:400,700'); |
3 | 3 | body, h1, h2, h3, h4, h5, h6 { |
4 | 4 | background: white; |
5 | font-family: Roboto, Helvetica, sans-serif; | |
5 | font-family: "Open Sans", Helvetica, sans-serif; | |
6 | 6 | } |
7 | 7 | h1, h2, h3, h4, h5, h6 { |
8 | 8 | margin-top: 0; |
55 | 55 | |
56 | 56 | ### Step 8: Create pull request |
57 | 57 | |
58 | Create a pull request on GitHub to request inclusion of your new plugin in the official repo. Only then will your plugin become available to be installed by `bw repo plugin install yourplugin`. | |
58 | Create a pull request on GitHub to request inclusion of your new plugin in the official repo. Once your branch is merged, your plugin will become available to be installed by `bw repo plugin install yourplugin` and appear on [plugins.bundlewrap.org](http://plugins.bundlewrap.org). | |
59 | 59 | |
60 | 60 | <br> |
61 | 61 |
0 | # Kubernetes | |
1 | ||
2 | <div class="alert alert-warning">Support for Kubernetes is experimental at this time. Backwards-incompatible changes may happen at any time.</div> | |
3 | ||
4 | To manage a Kubernetes cluster with BundleWrap, you first need to set up a kubectl context that works with the cluster. If you're running on Google Kubernetes Engine for example, this can be accomplished with: | |
5 | ||
6 | gcloud auth login | |
7 | gcloud container clusters get-credentials your-cluster --zone your-zone --project your-project | |
8 | ||
9 | You also need to make sure context names are the same on your teammates' machines. | |
10 | ||
11 | <br> | |
12 | ||
13 | ## Setting up a node | |
14 | ||
15 | Each Kubernetes cluster you manage becomes a node. Here is an example `nodes.py`: | |
16 | ||
17 | nodes = { | |
18 | "my-cluster": { | |
19 | 'os': 'kubernetes', | |
20 | 'bundles': ["my-app"], | |
21 | 'kubectl_context': "my-context", | |
22 | }, | |
23 | } | |
24 | ||
25 | <br> | |
26 | ||
27 | ## Kubernetes bundles | |
28 | ||
29 | You can then proceed to write bundles as with regular nodes, but using the [k8s_ items](../items/k8s.md): | |
30 | ||
31 | k8s_namespaces = { | |
32 | "my-app": {}, | |
33 | } | |
34 | ||
35 | k8s_deployments = { | |
36 | "my-app/my-deployment": { | |
37 | 'manifest': { | |
38 | "spec": { | |
39 | "selector": { | |
40 | "matchLabels": { | |
41 | "app": "nginx", | |
42 | }, | |
43 | }, | |
44 | "replicas": 2, | |
45 | "template": { | |
46 | "metadata": { | |
47 | "labels": { | |
48 | "app": "nginx", | |
49 | }, | |
50 | }, | |
51 | "spec": { | |
52 | "containers": [ | |
53 | { | |
54 | "name": "nginx", | |
55 | "image": "nginx:latest", | |
56 | "ports": [ | |
57 | {"containerPort": 80}, | |
58 | ] | |
59 | }, | |
60 | ], | |
61 | }, | |
62 | }, | |
63 | }, | |
64 | }, | |
65 | }, | |
66 | } | |
67 | ||
68 | All item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Note that BundleWrap will include defaults for the `apiVersion`, `Kind`, and `metadata/name` keys, but you can override them if you must. | |
69 | ||
70 | Alternatively, you can keep your resource definitions in manifest files: | |
71 | ||
72 | k8s_namespaces = { | |
73 | "my-app": {}, | |
74 | } | |
75 | ||
76 | k8s_deployments = { | |
77 | "my-app/my-deployment": { | |
78 | 'manifest_file': "my_deployment.yaml", | |
79 | }, | |
80 | } | |
81 | ||
82 | BundleWrap will then look for `my_deployment.yaml` in `bundles/<bundle>/manifests/`. You can also use [templating](../items/k8s.md#manifest_processor) in these files. |
0 | # Kubernetes items | |
1 | ||
2 | <div class="alert alert-warning">Support for Kubernetes is experimental at this time. Backwards-incompatible changes may happen at any time.</div> | |
3 | ||
4 | See also: [Guide to Kubernetes](../guide/kubernetes.md) | |
5 | ||
6 | <br> | |
7 | ||
8 | Manage resources in Kubernetes clusters. | |
9 | ||
10 | k8s_namespaces = { | |
11 | "my-app": {}, | |
12 | "my-previous-app": {'delete': True}, | |
13 | } | |
14 | ||
15 | k8s_deployments = { | |
16 | "my-app/my-deployment": { | |
17 | 'manifest': { | |
18 | ... | |
19 | }, | |
20 | }, | |
21 | } | |
22 | ||
23 | Note that all item names (except namespaces themselves) must be prefixed with the name of a namespace and a forward slash `/`. Resource items will automatically depend on their namespace if you defined it. | |
24 | ||
25 | <br> | |
26 | ||
27 | ## Resource types | |
28 | ||
29 | <table> | |
30 | <tr><th>Resource type</th><th>Bundle attribute</th><th>apiVersion</th></tr> | |
31 | <tr><td>Config Map</td><td>k8s_configmaps</td><td>v1</td></tr> | |
32 | <tr><td>Cron Job</td><td>k8s_cronjobs</td><td>batch/v1beta1</td></tr> | |
33 | <tr><td>Daemon Set</td><td>k8s_daemonsets</td><td>v1</td></tr> | |
34 | <tr><td>Deployment</td><td>k8s_deployments</td><td>extensions/v1beta1</td></tr> | |
35 | <tr><td>Ingress</td><td>k8s_ingresses</td><td>extensions/v1beta1</td></tr> | |
36 | <tr><td>Namespace</td><td>k8s_namespaces</td><td>v1</td></tr> | |
37 | <tr><td>Persistent Volume Claim</td><td>k8s_pvc</td><td>v1</td></tr> | |
38 | <tr><td>Service</td><td>k8s_services</td><td>v1</td></tr> | |
39 | <tr><td>Secret</td><td>k8s_secrets</td><td>v1</td></tr> | |
40 | <tr><td>StatefulSet</td><td>k8s_statefulsets</td><td>apps/v1</td></tr> | |
41 | </table> | |
42 | ||
43 | <br> | |
44 | ||
45 | # Attribute reference | |
46 | ||
47 | See also: [The list of generic builtin item attributes](../repo/items.py.md#builtin-item-attributes) | |
48 | ||
49 | <hr> | |
50 | ||
51 | ## context | |
52 | ||
53 | Only used with Mako and Jinja2 manifests (see `manifest_processing` below). The values of this dictionary will be available from within the template as variables named after the respective keys. | |
54 | ||
55 | <hr> | |
56 | ||
57 | ## delete | |
58 | ||
59 | Set this to `True` to have the resource removed. | |
60 | ||
61 | <hr> | |
62 | ||
63 | ## manifest | |
64 | ||
65 | The resource definition (as defined in the [Kubernetes API](https://kubernetes.io/docs/reference/)) formatted as a Python dictionary (will be converted to JSON and passed to `kubectl apply`). Mutually exclusive with `manifest_file`. | |
66 | ||
67 | <hr> | |
68 | ||
69 | ## manifest_file | |
70 | ||
71 | Filename of the resource definition relative to the `manifests` subdirectory of your bundle. Filenames must end in `.yaml`, `.yml`, or `.json` to indicate file format. Mutually exclusive with `manifest`. | |
72 | ||
73 | <br> | |
74 | ||
75 | ## manifest_processor | |
76 | ||
77 | Set this to `jinja2` or `mako` if you want to use a template engine to process your `manifest_file`. Defaults to `None`. |
163 | 163 | |
164 | 164 | `interactive` `True` if this is an interactive apply run. |
165 | 165 | |
166 | To skip a node: | |
167 | ||
168 | ``` | |
169 | from bundlewrap.exceptions import SkipNode | |
170 | raise SkipNode("reason goes here") | |
171 | ``` | |
172 | ||
166 | 173 | --- |
167 | 174 | |
168 | 175 | **`node_apply_end(repo, node, duration=None, interactive=False, result=None, **kwargs)`** |
191 | 198 | |
192 | 199 | `command` The command that will be run on the node. |
193 | 200 | |
201 | To skip a node: | |
202 | ||
203 | ``` | |
204 | from bundlewrap.exceptions import SkipNode | |
205 | raise SkipNode("reason goes here") | |
206 | ``` | |
207 | ||
194 | 208 | --- |
195 | 209 | |
196 | 210 | **`node_run_end(repo, node, command, duration=None, return_code=None, stdout="", stderr="", **kwargs)`** |
39 | 39 | <tr><td><a href="../../items/directory">directory</a></td><td><code>directories</code></td><td>Manages permissions and ownership for directories</td></tr> |
40 | 40 | <tr><td><a href="../../items/file">file</a></td><td><code>files</code></td><td>Manages contents, permissions, and ownership for files</td></tr> |
41 | 41 | <tr><td><a href="../../items/group">group</a></td><td><code>groups</code></td><td>Manages groups by wrapping <code>groupadd</code>, <code>groupmod</code> and <code>groupdel</code></td></tr> |
42 | <tr><td><a href="../../items/k8s">k8s_*</a></td><td><code>k8s_*</code></td><td>Manages resources in Kubernetes clusters by wrapping <code>kuebctl</code></td></tr> | |
42 | 43 | <tr><td><a href="../../items/pkg_apt">pkg_apt</a></td><td><code>pkg_apt</code></td><td>Installs and removes packages with APT</td></tr> |
43 | 44 | <tr><td><a href="../../items/pkg_dnf">pkg_dnf</a></td><td><code>pkg_dnf</code></td><td>Installs and removes packages with dnf</td></tr> |
44 | 45 | <tr><td><a href="../../items/pkg_opkg">pkg_opkg</a></td><td><code>pkg_opkg</code></td><td>Installs and removes packages with opkg</td></tr> |
16 | 16 | - File templates: guide/item_file_templates.md |
17 | 17 | - Handling secrets: guide/secrets.md |
18 | 18 | - Locking: guide/locks.md |
19 | - Kubernetes: guide/kubernetes.md | |
19 | 20 | - Custom items: guide/dev_item.md |
20 | 21 | - Writing plugins: guide/dev_plugin.md |
21 | 22 | - Python API: guide/api.md |
37 | 38 | - directory: items/directory.md |
38 | 39 | - file: items/file.md |
39 | 40 | - group: items/group.md |
41 | - k8s_*: items/k8s.md | |
40 | 42 | - pkg_apt: items/pkg_apt.md |
41 | 43 | - pkg_dnf: items/pkg_dnf.md |
42 | 44 | - pkg_openbsd: items/pkg_openbsd.md |
7 | 7 | "Jinja2", |
8 | 8 | "Mako", |
9 | 9 | "passlib", |
10 | "pyyaml", | |
10 | 11 | "requests >= 1.0.0", |
11 | 12 | "six", |
12 | 13 | ] |
15 | 16 | |
16 | 17 | setup( |
17 | 18 | name="bundlewrap", |
18 | version="3.2.1", | |
19 | version="3.3.0", | |
19 | 20 | description="Config management with Python", |
20 | 21 | long_description=( |
21 | 22 | "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" |
0 | # -*- coding: utf-8 -*- | |
1 | from __future__ import unicode_literals | |
2 | ||
3 | from bundlewrap.items.pkg_openbsd import parse_pkg_name | |
4 | from pytest import raises | |
5 | ||
6 | ||
7 | def test_not_found(): | |
8 | found, version, flavor = parse_pkg_name("rsync", "irssi-1.0.4p0-socks") | |
9 | assert found is False | |
10 | ||
11 | ||
12 | def test_only_version(): | |
13 | found, version, flavor = parse_pkg_name("irssi", "irssi-1.0.4p0") | |
14 | assert found is True | |
15 | assert version == "1.0.4p0" | |
16 | assert flavor == "" | |
17 | ||
18 | ||
19 | def test_version_and_flavor(): | |
20 | found, version, flavor = parse_pkg_name("irssi", "irssi-1.0.4p0-socks") | |
21 | assert found is True | |
22 | assert version == "1.0.4p0" | |
23 | assert flavor == "socks" | |
24 | ||
25 | ||
26 | def test_dashname_not_found(): | |
27 | found, version, flavor = parse_pkg_name("rsync", "cyrus-sasl-2.1.26p24-pgsql") | |
28 | assert found is False | |
29 | ||
30 | ||
31 | def test_dashname_only_version(): | |
32 | found, version, flavor = parse_pkg_name("cyrus-sasl", "cyrus-sasl-2.1.26p24") | |
33 | assert found is True | |
34 | assert version == "2.1.26p24" | |
35 | assert flavor == "" | |
36 | ||
37 | ||
38 | def test_dashname_version_and_flavor(): | |
39 | found, version, flavor = parse_pkg_name("cyrus-sasl", "cyrus-sasl-2.1.26p24-pgsql") | |
40 | assert found is True | |
41 | assert version == "2.1.26p24" | |
42 | assert flavor == "pgsql" | |
43 | ||
44 | ||
45 | def test_dashflavor_not_found(): | |
46 | found, version, flavor = parse_pkg_name("rsync", "vim-8.0.0987p0-gtk2-lua") | |
47 | assert found is False | |
48 | ||
49 | ||
50 | def test_dashflavor_version_and_flavor(): | |
51 | found, version, flavor = parse_pkg_name("vim", "vim-8.0.0987p0-gtk2-lua") | |
52 | assert found is True | |
53 | assert version == "8.0.0987p0" | |
54 | assert flavor == "gtk2-lua" | |
55 | ||
56 | ||
57 | def test_dashall_not_found(): | |
58 | found, version, flavor = parse_pkg_name("rsync", "graphical-vim-8.0.0987p0-gtk2-lua") | |
59 | assert found is False | |
60 | ||
61 | ||
62 | def test_dashall_not_found(): | |
63 | found, version, flavor = parse_pkg_name("graphical-vim", "graphical-vim-8.0.0987p0-gtk2-lua") | |
64 | assert found is True | |
65 | assert version == "8.0.0987p0" | |
66 | assert flavor == "gtk2-lua" | |
67 | ||
68 | ||
69 | def test_illegal_version_ends_with_dash(): | |
70 | with raises(AssertionError): | |
71 | parse_pkg_name("dummy", "foo-1.0-") | |
72 | ||
73 | ||
74 | def test_illegal_flavor_ends_with_dash(): | |
75 | with raises(AssertionError): | |
76 | parse_pkg_name("dummy", "foo-1.0-bar-") | |
77 | ||
78 | ||
79 | def test_illegal_no_version(): | |
80 | with raises(AssertionError): | |
81 | parse_pkg_name("dummy", "foo-bar") | |
82 | ||
83 | ||
84 | def test_illegal_no_name(): | |
85 | with raises(AssertionError): | |
86 | parse_pkg_name("dummy", "1.0-flavor") | |
87 | ||
88 | ||
89 | def test_illegal_only_version(): | |
90 | with raises(AssertionError): | |
91 | parse_pkg_name("dummy", "1.0") | |
92 | ||
93 | ||
94 | def test_illegal_empty_line(): | |
95 | with raises(AssertionError): | |
96 | parse_pkg_name("dummy", "") |
0 | 0 | from bundlewrap.metadata import atomic |
1 | from bundlewrap.utils.dicts import map_dict_keys | |
1 | from bundlewrap.utils.dicts import map_dict_keys, reduce_dict | |
2 | 2 | |
3 | 3 | |
4 | 4 | def test_dictmap(): |
21 | 21 | ("key2", "key5", "key6"), |
22 | 22 | ("key2", "key7"), |
23 | 23 | ]) |
24 | ||
25 | ||
26 | def test_reduce_dict_two_lists(): | |
27 | assert reduce_dict( | |
28 | [1, 2, 3], | |
29 | [1, 2], | |
30 | ) == [1, 2, 3] | |
31 | ||
32 | ||
33 | def test_reduce_dict_list_and_dict(): | |
34 | assert reduce_dict( | |
35 | [1, 2, 3], | |
36 | {'a': 4}, | |
37 | ) == [1, 2, 3] | |
38 | ||
39 | ||
40 | def test_reduce_dict_simple(): | |
41 | assert reduce_dict( | |
42 | {'a': 1, 'b': 2}, | |
43 | {'a': 3}, | |
44 | ) == {'a': 1} | |
45 | ||
46 | ||
47 | def test_reduce_dict_nested(): | |
48 | full_dict = { | |
49 | 'a': [{ | |
50 | 'b': 1, | |
51 | 'c': 2, | |
52 | }], | |
53 | 'd': 3, | |
54 | } | |
55 | template_dict = { | |
56 | 'a': [{ | |
57 | 'b': None, | |
58 | }], | |
59 | 'd': None, | |
60 | 'e': None, | |
61 | } | |
62 | assert reduce_dict(full_dict, template_dict) == { | |
63 | 'a': [{ | |
64 | 'b': 1, | |
65 | }], | |
66 | 'd': 3, | |
67 | } |