New upstream release
Jonathan Carter
5 years ago
0 | # 3.5.2 | |
1 | ||
2 | 2018-12-11 | |
3 | ||
4 | * fixed IO activation/deactivation when using bw as a library | |
5 | * fixed `atomic()` being removed prematurely during metadata processing | |
6 | ||
7 | ||
0 | 8 | # 3.5.1 |
1 | 9 | |
2 | 10 | 2018-07-08 |
0 | 0 | # -*- coding: utf-8 -*- |
1 | 1 | from __future__ import unicode_literals |
2 | 2 | |
3 | VERSION = (3, 5, 1) | |
3 | VERSION = (3, 5, 2) | |
4 | 4 | VERSION_STRING = ".".join([str(v) for v in VERSION]) |
167 | 167 | def sdict(self): |
168 | 168 | result = run_local(self._kubectl + ["get", "-o", "json", self.KIND, self.resource_name]) |
169 | 169 | if result.return_code == 0: |
170 | full_json_response = json.loads(result.stdout) | |
170 | full_json_response = json.loads(result.stdout.decode('utf-8')) | |
171 | 171 | if full_json_response.get("status", {}).get("phase") == "Terminating": |
172 | 172 | # this resource is currently being deleted, consider it gone |
173 | 173 | return None |
8 | 8 | from socket import gethostname |
9 | 9 | from time import time |
10 | 10 | |
11 | from .exceptions import NodeLockedException | |
11 | from .exceptions import NodeLockedException, RemoteException | |
12 | 12 | from .utils import cached_property, tempfile |
13 | 13 | from .utils.text import ( |
14 | 14 | blue, |
19 | 19 | parse_duration, |
20 | 20 | red, |
21 | 21 | wrap_question, |
22 | yellow, | |
22 | 23 | ) |
23 | 24 | from .utils.ui import io |
24 | 25 | |
27 | 28 | HARD_LOCK_FILE = HARD_LOCK_PATH + "/info" |
28 | 29 | SOFT_LOCK_PATH = "/tmp/bundlewrap.softlock.d" |
29 | 30 | SOFT_LOCK_FILE = "/tmp/bundlewrap.softlock.d/{id}" |
31 | ||
32 | ||
33 | def get_hard_lock_info(node, local_path): | |
34 | try: | |
35 | node.download(HARD_LOCK_FILE, local_path) | |
36 | with open(local_path, 'r') as fp: | |
37 | return json.load(fp) | |
38 | except (RemoteException, ValueError): | |
39 | io.stderr(_( | |
40 | "{x} {node} corrupted hard lock: " | |
41 | "unable to read or parse lock file contents " | |
42 | "(clear it with `bw run {node} 'rm -Rf {path}'`)" | |
43 | ).format( | |
44 | node=bold(node.name), | |
45 | path=HARD_LOCK_PATH, | |
46 | warning=yellow("!"), | |
47 | )) | |
48 | return {} | |
30 | 49 | |
31 | 50 | |
32 | 51 | def identity(): |
51 | 70 | with io.job(_("{node} checking hard lock status").format(node=bold(self.node.name))): |
52 | 71 | result = self.node.run("mkdir " + quote(HARD_LOCK_PATH), may_fail=True) |
53 | 72 | if result.return_code != 0: |
54 | self.node.download(HARD_LOCK_FILE, local_path) | |
55 | with open(local_path, 'r') as f: | |
56 | try: | |
57 | info = json.loads(f.read()) | |
58 | except: | |
59 | io.stderr(_( | |
60 | "{warning} corrupted lock on {node}: " | |
61 | "unable to read or parse lock file contents " | |
62 | "(clear it with `bw run {node} 'rm -R {path}'`)" | |
63 | ).format( | |
64 | node=self.node.name, | |
65 | path=HARD_LOCK_FILE, | |
66 | warning=red(_("WARNING")), | |
67 | )) | |
68 | info = {} | |
73 | info = get_hard_lock_info(self.node, local_path) | |
69 | 74 | expired = False |
70 | 75 | try: |
71 | 76 | d = info['date'] |
266 | 266 | if isinstance(obj, METADATA_TYPES): |
267 | 267 | return obj |
268 | 268 | elif isinstance(obj, dict): |
269 | new_obj = {} | |
269 | if isinstance(obj, ATOMIC_TYPES[dict]): | |
270 | new_obj = atomic({}) | |
271 | else: | |
272 | new_obj = {} | |
270 | 273 | for key, value in obj.items(): |
271 | 274 | if not isinstance(key, METADATA_TYPES): |
272 | 275 | raise ValueError(_("illegal metadata key type: {}").format(repr(key))) |
273 | 276 | new_key = copy(key) |
274 | 277 | new_obj[new_key] = deepcopy_metadata(value) |
275 | 278 | elif isinstance(obj, (list, tuple)): |
276 | new_obj = [] | |
279 | if isinstance(obj, (ATOMIC_TYPES[list], ATOMIC_TYPES[tuple])): | |
280 | new_obj = atomic([]) | |
281 | else: | |
282 | new_obj = [] | |
277 | 283 | for member in obj: |
278 | 284 | new_obj.append(deepcopy_metadata(member)) |
279 | 285 | elif isinstance(obj, set): |
280 | new_obj = set() | |
286 | if isinstance(obj, ATOMIC_TYPES[set]): | |
287 | new_obj = atomic(set()) | |
288 | else: | |
289 | new_obj = set() | |
281 | 290 | for member in obj: |
282 | 291 | new_obj.add(deepcopy_metadata(member)) |
283 | 292 | else: |
66 | 66 | INITIAL_CONTENT = { |
67 | 67 | FILENAME_GROUPS: _(""" |
68 | 68 | groups = { |
69 | #'group1': { | |
69 | #'group-1': { | |
70 | 70 | # 'bundles': ( |
71 | # 'bundle1', | |
71 | # 'bundle-1', | |
72 | 72 | # ), |
73 | 73 | # 'members': ( |
74 | # 'node1', | |
74 | # 'node-1', | |
75 | 75 | # ), |
76 | 76 | # 'subgroups': ( |
77 | # 'group2', | |
77 | # 'group-2', | |
78 | 78 | # ), |
79 | 79 | #}, |
80 | 80 | 'all': { |
87 | 87 | |
88 | 88 | FILENAME_NODES: _(""" |
89 | 89 | nodes = { |
90 | 'node1': { | |
90 | 'node-1': { | |
91 | 91 | 'hostname': "localhost", |
92 | 92 | }, |
93 | 93 | } |
178 | 178 | self._spinner = spinner() |
179 | 179 | self._last_spinner_character = next(self._spinner) |
180 | 180 | self._last_spinner_update = 0 |
181 | self._signal_handler_thread = Thread( | |
182 | target=self._signal_handler_thread_body, | |
183 | ) | |
184 | # daemon mode is required because we need to keep the thread | |
185 | # around until the end of a soft shutdown to wait for a hard | |
186 | # shutdown signal, but don't have a feasible way of stopping | |
187 | # the thread once the soft shutdown has completed | |
188 | self._signal_handler_thread.daemon = True | |
181 | self._signal_handler_thread = None | |
189 | 182 | self._child_pids = [] |
190 | 183 | self._status_line_present = False |
191 | 184 | self._waiting_for_input = False |
200 | 193 | getpid(), |
201 | 194 | ), |
202 | 195 | ), 'a') |
196 | self._signal_handler_thread = Thread( | |
197 | target=self._signal_handler_thread_body, | |
198 | ) | |
199 | # daemon mode is required because we need to keep the thread | |
200 | # around until the end of a soft shutdown to wait for a hard | |
201 | # shutdown signal, but don't have a feasible way of stopping | |
202 | # the thread once the soft shutdown has completed | |
203 | self._signal_handler_thread.daemon = True | |
203 | 204 | self._signal_handler_thread.start() |
204 | 205 | signal(SIGINT, sigint_handler) |
205 | 206 | signal(SIGQUIT, sigquit_handler) |
0 | bundlewrap (3.5.2-1) unstable; urgency=medium | |
1 | ||
2 | * New upstream release | |
3 | * Update standards version to 4.2.1 | |
4 | ||
5 | -- Jonathan Carter <jcc@debian.org> Sun, 16 Dec 2018 14:24:01 +0200 | |
6 | ||
0 | 7 | bundlewrap (3.5.1-1) unstable; urgency=medium |
1 | 8 | |
2 | 9 | * New upstream release |
63 | 63 | |
64 | 64 | You can also encrypt entire files: |
65 | 65 | |
66 | <pre><code class="nohighlight">$ bw debug -c "repo.vault.encrypt_file('/my/secret.file', 'encrypted.file'))"</code></pre> | |
66 | <pre><code class="nohighlight">$ bw debug -c "repo.vault.encrypt_file('/my/secret.file', 'encrypted.file')"</code></pre> | |
67 | 67 | |
68 | 68 | <div class="alert alert-info">Encrypted files are always read and written relative to the <code>data/</code> subdirectory of your repo.</div> |
69 | 69 |
24 | 24 | <a href="/items/file/#source">data/</a> |
25 | 25 | <a href="/repo/hooks">hooks/</a> |
26 | 26 | <a href="/guide/dev_item">items/</a> |
27 | <a href="/repo/libs">libs/</a> | |
27 | 28 | <a href="/guide/secrets">.secrets.cfg</a> |
28 | 29 | <a href="/repo/groups.py">groups.py</a> |
29 | 30 | <a href="/repo/nodes.py">nodes.py</a> |
37 | 37 | Especially in larger installations, a single nodes.py can become inconvenient to work with. This example reads nodes from a `nodes/` directory. |
38 | 38 | |
39 | 39 | from glob import glob |
40 | from os.path import join | |
40 | 41 | |
41 | 42 | nodes = {} |
42 | for node in glob("nodes/*.py"): | |
43 | for node in glob(join(repo_path, "nodes", "*.py")): | |
43 | 44 | with open(node, 'r') as f: |
44 | 45 | exec(f.read()) |
45 | 46 |
16 | 16 | |
17 | 17 | setup( |
18 | 18 | name="bundlewrap", |
19 | version="3.5.1", | |
19 | version="3.5.2", | |
20 | 20 | description="Config management with Python", |
21 | 21 | long_description=( |
22 | 22 | "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" |
192 | 192 | assert rcode == 0 |
193 | 193 | |
194 | 194 | |
195 | def test_metadatapy_defaults_atomic(tmpdir): | |
196 | make_repo( | |
197 | tmpdir, | |
198 | bundles={"test": {}}, | |
199 | ) | |
200 | with open(join(str(tmpdir), "nodes.py"), 'w') as f: | |
201 | f.write( | |
202 | """ | |
203 | from bundlewrap.metadata import atomic | |
204 | ||
205 | nodes = { | |
206 | "node1": { | |
207 | 'bundles': ["test"], | |
208 | 'metadata': {"foo": atomic({"bar": "baz"})}, | |
209 | }, | |
210 | } | |
211 | """) | |
212 | with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: | |
213 | f.write( | |
214 | """@metadata_processor | |
215 | def foo(metadata): | |
216 | return { | |
217 | "foo": { | |
218 | "bar": "frob", | |
219 | "baz": "gobble", | |
220 | }, | |
221 | }, DONE, DEFAULTS | |
222 | """) | |
223 | stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) | |
224 | assert loads(stdout.decode()) == { | |
225 | "foo": {"bar": "baz"}, | |
226 | } | |
227 | assert stderr == b"" | |
228 | assert rcode == 0 | |
229 | ||
230 | ||
195 | 231 | def test_metadatapy_update(tmpdir): |
196 | 232 | make_repo( |
197 | 233 | tmpdir, |