diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..fee3ff2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +/dist/ +/docs/build/ +/tests/.coveralls.yml diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..3608788 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,21 @@ +language: python +python: +- 2.7 +- 3.3 +- 3.4 +- 3.5 +- 3.6 +install: +- pip install . +before_script: +- ssh-keygen -f ~/.ssh/id_rsa -N "" +- cp ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys +- ssh -o StrictHostKeyChecking=no localhost id +script: +- py.test tests +notifications: + irc: + channels: + - "irc.freenode.org#bundlewrap" + use_notice: true + skip_join: true diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..fae7aa0 --- /dev/null +++ b/AUTHORS @@ -0,0 +1,7 @@ +# By adding your name to this file you agree to the Copyright Assignment +# Agreement found in the CAA.md file in this repository. + +Torsten Rehn +Peter Hofmann +Tim Buchwaldt +Rico Ullmann diff --git a/CAA.md b/CAA.md new file mode 100644 index 0000000..fc6cbe8 --- /dev/null +++ b/CAA.md @@ -0,0 +1,95 @@ +# BundleWrap Individual Contributor Copyright Assignment Agreement + +Thank you for your interest in contributing to the BundleWrap open-source project, currently owned and represented by [Torsten Rehn](mailto:torsten@rehn.email) ("We" or "Us"). + +This contributor agreement ("Agreement") documents the rights granted by contributors to Us. To make this document effective, please sign it and send it to Us by email or electronic submission, following the instructions at [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing). This is a legally binding document, so please read it carefully before agreeing to it. The Agreement may cover more than one software project managed by Us. + +## 1. Definitions + +"You" means the individual who Submits a Contribution to Us. + +"Contribution" means any work of authorship that is Submitted by You to Us in which You own or assert ownership of the Copyright. If You do not own the Copyright in the entire work of authorship, please follow the instructions in [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing). + +"Copyright" means all rights protecting works of authorship owned or controlled by You, including copyright, moral and neighboring rights, as appropriate, for the full term of their existence including any extensions by You. + +"Material" means the work of authorship which is made available by Us to third parties. When this Agreement covers more than one software project, the Material means the work of authorship to which the Contribution was Submitted. After You Submit the Contribution, it may be included in the Material. + +"Submit" means any form of electronic, verbal, or written communication sent to Us or our representatives, including but not limited to electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, Us for the purpose of discussing and improving the Material, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution." + +"Submission Date" means the date on which You Submit a Contribution to Us. + +"Effective Date" means the date You execute this Agreement or the date You first Submit a Contribution to Us, whichever is earlier. + +## 2. Grant of Rights + +### 2.1 Copyright Assignment + +1) At the time the Contribution is Submitted, You assign to Us all right, title, and interest worldwide in all Copyright covering the Contribution; provided that this transfer is conditioned upon compliance with Section 2.3. + +2) To the extent that any of the rights in Section 2.1.1 cannot be assigned by You to Us, You grant to Us a perpetual, worldwide, exclusive, royalty-free, transferable, irrevocable license under such non-assigned rights, with rights to sublicense through multiple tiers of sublicensees, to practice such non-assigned rights, including, but not limited to, the right to reproduce, modify, display, perform and distribute the Contribution; provided that this license is conditioned upon compliance with Section 2.3. + +3) To the extent that any of the rights in Section 2.1.1 can neither be assigned nor licensed by You to Us, You irrevocably waive and agree never to assert such rights against Us, any of our successors in interest, or any of our licensees, either direct or indirect; provided that this agreement not to assert is conditioned upon compliance with Section 2.3. + +4) Upon such transfer of rights to Us, to the maximum extent possible, We immediately grant to You a perpetual, worldwide, non-exclusive, royalty-free, transferable, irrevocable license under such rights covering the Contribution, with rights to sublicense through multiple tiers of sublicensees, to reproduce, modify, display, perform, and distribute the Contribution. The intention of the parties is that this license will be as broad as possible and to provide You with rights as similar as possible to the owner of the rights that You transferred. This license back is limited to the Contribution and does not provide any rights to the Material. + +### 2.2 Patent License + +For patent claims including, without limitation, method, process, and apparatus claims which You own, control or have the right to grant, now or in the future, You grant to Us a perpetual, worldwide, non-exclusive, transferable, royalty-free, irrevocable patent license, with the right to sublicense these rights to multiple tiers of sublicensees, to make, have made, use, sell, offer for sale, import and otherwise transfer the Contribution and the Contribution in combination with the Material (and portions of such combination). This license is granted only to the extent that the exercise of the licensed rights infringes such patent claims; and provided that this license is conditioned upon compliance with Section 2.3. + +### 2.3 Outbound License + +As a condition on the grant of rights in Sections 2.1 and 2.2, We agree to license the Contribution only under the terms of the license or licenses which We are using on the Submission Date for the Material (including any rights to adopt any future version of a license if permitted). + +### 2.4 Moral Rights + +If moral rights apply to the Contribution, to the maximum extent permitted by law, You waive and agree not to assert such moral rights against Us or our successors in interest, or any of our licensees, either direct or indirect. + +### 2.5 Our Rights + +You acknowledge that We are not obligated to use Your Contribution as part of the Material and may decide to include any Contribution We consider appropriate. + +### 2.6 Reservation of Rights + +Any rights not expressly assigned or licensed under this section are expressly reserved by You. + +## 3. Agreement + +You confirm that: + +1) You have the legal authority to enter into this Agreement. + +2) You own the Copyright and patent claims covering the Contribution which are required to grant the rights under Section 2. + +3) The grant of rights under Section 2 does not violate any grant of rights which You have made to third parties, including Your employer. If You are an employee, You have had Your employer approve this Agreement or sign the Entity version of this document. If You are less than eighteen years old, please have Your parents or guardian sign the Agreement. + +4) You have followed the instructions in [http://docs.bundlewrap.org/misc/contributing](http://docs.bundlewrap.org/misc/contributing), if You do not own the Copyright in the entire work of authorship Submitted. + +## 4. Disclaimer + +EXCEPT FOR THE EXPRESS WARRANTIES IN SECTION 3, THE CONTRIBUTION IS PROVIDED "AS IS". MORE PARTICULARLY, ALL EXPRESS OR IMPLIED WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE EXPRESSLY DISCLAIMED BY YOU TO US AND BY US TO YOU. TO THE EXTENT THAT ANY SUCH WARRANTIES CANNOT BE DISCLAIMED, SUCH WARRANTY IS LIMITED IN DURATION TO THE MINIMUM PERIOD PERMITTED BY LAW. + +## 5. Consequential Damage Waiver + +TO THE MAXIMUM EXTENT PERMITTED BY APPLICABLE LAW, IN NO EVENT WILL YOU OR US BE LIABLE FOR ANY LOSS OF PROFITS, LOSS OF ANTICIPATED SAVINGS, LOSS OF DATA, INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL AND EXEMPLARY DAMAGES ARISING OUT OF THIS AGREEMENT REGARDLESS OF THE LEGAL OR EQUITABLE THEORY (CONTRACT, TORT OR OTHERWISE) UPON WHICH THE CLAIM IS BASED. + +## 6. Miscellaneous + +### 6.1 + +This Agreement will be governed by and construed in accordance with the laws of Germany excluding its conflicts of law provisions. Under certain circumstances, the governing law in this section might be superseded by the United Nations Convention on Contracts for the International Sale of Goods ("UN Convention") and the parties intend to avoid the application of the UN Convention to this Agreement and, thus, exclude the application of the UN Convention in its entirety to this Agreement. + +### 6.2 + +This Agreement sets out the entire agreement between You and Us for Your Contributions to Us and overrides all other agreements or understandings. + +### 6.3 + +If You or We assign the rights or obligations received through this Agreement to a third party, as a condition of the assignment, that third party must agree in writing to abide by all the rights and obligations in the Agreement. + +### 6.4 + +The failure of either party to require performance by the other party of any provision of this Agreement in one situation shall not affect the right of a party to require such performance at any time in the future. A waiver of performance under a provision in one situation shall not be considered a waiver of the performance of the provision in the future or a waiver of the provision in its entirety. + +### 6.5 + +If any provision of this Agreement is found void and unenforceable, such provision will be replaced to the extent possible with a provision that comes closest to the meaning of the original provision and which is enforceable. The terms and conditions set forth in this Agreement shall apply notwithstanding any failure of essential purpose of this Agreement or any limited remedy to the maximum extent possible under law. diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..309cf49 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,536 @@ +# 2.12.2 + +2016-12-23 + +* added support for Python 3.6 +* changed diff line length limit from 128 to 1024 characters +* fixed deadlock in Group.members_remove +* fixed unknown subgroups not being detected properly + + +# 2.12.1 + +2016-12-20 + +* fixed exception when changing owner of postgres databases +* fixed postgres roles requiring a password even when deleted +* fixed incorrect exit codes in some situations with `bw test` + + +# 2.12.0 + +2016-11-28 + +* added `BW_DEBUG_LOG_DIR` +* improved reporting of action failures +* fixed `bw plot groups` and `bw plot groups-for-node` +* fixed access to partial metadata in `Group.members_add` and `_remove` + + +# 2.11.0 + +2016-11-14 + +* added `bw nodes --inline` +* added `Group.members_add` and `.members_remove` +* fixed symlinks not overwriting other path types +* fixed `precedes` and `triggers` for bundle, tag and type items +* fixed diffs for sets and tuples + + +# 2.10.0 + +2016-11-03 + +* added pkg_dnf items +* added rudimentary string operations on Faults +* added Fault documentation +* added `bw test --config-determinism` and `--metadata-determinism` +* improved debugging facilities for metadata processor loops +* improved handling and reporting of missing Faults + + +# 2.9.1 + +2016-10-18 + +* fixed `bw verify` without `-S` +* fixed asking for changes to directory items + + +# 2.9.0 + +2016-10-17 + +* added directory purging +* added `bw --adhoc-nodes` +* improve handling of unknown nodes/groups +* improvements to `bw nodes` + + +# 2.8.0 + +2016-09-12 + +* added `BW_HARDLOCK_EXPIRY` env var +* added `bw hash --group` +* added `subgroup_patterns` +* added `bw test --ignore-missing-faults` +* added `node.cmd_wrapper_inner` and `_outer` +* added `node.os_version` +* fixed exception handling under Python 2 +* fixed partial metadata not being completed in some cases + + +# 2.7.1 + +2016-07-15 + +* improved responsiveness to SIGINT during metadata generation +* fixed SIGINT handling on Python 2.7 + + +# 2.7.0 + +2016-07-15 + +* `bw lock show` can now show entire groups +* `bw` can now be invoked from any subdirectory of a repository +* added `bw hash --metadata` +* added `bw nodes --attrs` +* added `repo.vault.format` +* added graceful handling of SIGINT +* added log level indicator to debug output +* added `node.dummy` attribute +* added `BW_SSH_ARGS` environment variable +* `bash` is no longer required on nodes +* `node.os` and `node.use_shadow_passwords` can now be set at the group level +* sets are now allowed in metadata +* optimized execution of metadata processors +* fixed `bw apply --force` with unlocked nodes +* fixed `bw test` not detecting merge of lists in unrelated groups' metadata +* fixed installation of some pkg_openbsd +* fixed piping into `bw apply -i` +* fixed handling user names with non-ASCII characters +* fixed skipped and failed items sometimes being handled incorrectly +* fixed error with autoskipped triggered items +* fixed skip reason for some soft locked items + + +# 2.6.1 + +2016-05-29 + +* fixed accidentally changed default salt for user items + + +# 2.6.0 + +2016-05-29 + +* added support for OpenBSD packages and services +* added soft locking mechanism +* added `enabled` option for `svc_systemd` +* fixed running compound commands + + +# 2.5.2 + +2016-05-04 + +* fixed compatibility with some exotic node shells +* fixed quitting at question prompts +* fixed creating files with content_type 'any' + + +# 2.5.1 + +2016-04-07 + +* fixed false positive on metadata collision check + + +# 2.5.0 + +2016-04-04 + +* improved performance and memory usage +* added metadata conflict detection to `bw test` +* added metadata type validation +* added `BW_VAULT_DUMMY_MODE` +* added q(uit) option to questions +* output disabled by default when using as a library +* fixed `bw hash -d` +* fixed excessive numbers of open files +* fixed partial metadata access from metadata processors + + +# 2.4.0 + +2016-03-20 + +* added `bw plot group` +* added `bw plot groups-for-node` +* `bw` will now check requirements.txt in your repo before doing anything +* improved output of `--help` +* metadata processors now have access to partial node metadata while it is being compiled +* fixed `bw test` when using more than the default number of node workers +* fixed passing Faults to `postgres_role` and `users` +* fixed detection of non-existent paths on CentOS and others + + +# 2.3.1 + +2016-03-15 + +* fixed handling of 'generate' keys for `repo.vault` + + +# 2.3.0 + +2016-03-15 + +* added `repo.vault` for handling secrets +* circular dependencies are now detected by `bw test` +* fixed handling of broken pipes in internal subprocesses +* fixed previous input being read when asking a question +* fixed reading non-ASCII templates on systems with ASCII locale +* `bw apply` and `bw verify` now exit with return code 1 if there are errors + + +# 2.2.0 + +2016-03-02 + +* added item tagging +* added `bw apply --skip` +* fixed newline warning on long diff files +* fixed calling `bw` without arguments + + +# 2.1.0 + +2016-02-25 + +* added `bw stats` +* added `bw items --file-preview` +* added hooks for `bw test` +* reason for skipping an item is now displayed in regular output +* fixed exception handling for invalid cdicts/sdicts +* fixed handling of SSH errors +* fixed broken diffs caused by partial file downloads +* fixed interactive prompts sometimes not reading input correctly + + +# 2.0.1 + +2016-02-22 + +* fixed display of failed actions +* updated display of interactive lock override prompt +* improved robustness of internal output subsystem + + +# 2.0.0 + +2016-02-22 + +* added support for Python 3.3+ +* switched from Fabric/Paramiko to OpenSSH +* removed SSH and sudo passwords **(BACKWARDS INCOMPATIBLE)** +* metadata is now merged recursively **(BACKWARDS INCOMPATIBLE)** +* file items: the source attribute now has a default **(BACKWARDS INCOMPATIBLE)** +* file items: the default content_type is now text **(BACKWARDS INCOMPATIBLE)** +* reworked command line options for `bw verify` **(BACKWARDS INCOMPATIBLE)** +* `cascade_skip` now defaults to `False` if the item is triggered or uses `unless` **(BACKWARDS INCOMPATIBLE)** +* `bw verify` and `bw apply` now show incorrect/fixed/failed attributes +* `bw apply` now uses a status line to show current activity +* generally improved output formatting + + +# 1.6.0 + +2016-02-22 + +* added `bw migrate` **(will be removed in 2.0.0)** +* added warnings for upgrading to 2.0.0 **(will be removed in 2.0.0)** + + +# 1.5.1 + +2015-06-11 + +* clean up local lock files +* fixed detection of some types of directories +* fixed exception spam when trying to load internal attributes as libs + + +# 1.5.0 + +2015-05-10 + +* added postgres_db and postgres_role items +* added `bw verify --only-needs-fixing` +* added `bw verify --summary` +* added `Repository.nodes_in_group()` +* added `verify_with` attribute for file items +* libs now have access to `repo_path` +* user items: fixed asking for password hash change +* file items: fixed `bw items -w` with `content_type: 'any'` +* improved various error messages + + +# 1.4.0 + +2015-03-02 + +* added virtualenv support for pkg_pip +* added reverse syntax for triggers and preceded_by +* lots of fixes and internal improvements around preceded_by + + +# 1.3.0 + +2014-12-31 + +* added pkg_pip items +* added pkg_yum items +* added pkg_zypper items +* added preceded_by item attribute +* fixed detection of non-existing files on CentOS/RHEL +* fixed detection of special files on Arch Linux +* fixed handling UTF-8 output of failed commands + + +# 1.2.2 + +2014-10-27 + +* fixed item classes not being restored after repo serialization + + +# 1.2.1 + +2014-10-21 + +* fixed a critical bug in bundle serialization + + +# 1.2.0 + +2014-10-19 + +* added item generators +* added `bw test --plugin-conflict-error` +* added `bw debug -c` +* improved unicode handling +* fixed logging issues + + +# 1.1.0 + +2014-08-11 + +* added metadata processors +* added `bw metadata` +* added `bw apply --profiling` +* added Repository.nodes_in_all_groups() +* added Repository.nodes_in_any_group() +* added the data subdirectory +* improved various error messages + + +# 1.0.0 + +2014-07-19 + +* API will now remain stable until 2.0.0 +* added hooks for actions +* added support for Jinja2 templates +* fixed some CLI commands not terminating correctly + + +# 0.14.0 + +2014-07-13 + +* files, directories and symlinks don't care about ownership and mode by + default **(BACKWARDS INCOMPATIBLE)** +* Mako file templates can now use include + + +# 0.13.0 + +2014-06-19 + +* added password-based SSH/sudo authentication +* fixed symlink items not checking existing link targets +* fixed exception when triggering skipped items +* output is now prefixed with `node:bundle:item_type:item_name` +* `bw repo debug` is now a top-level command **(BACKWARDS INCOMPATIBLE)** +* `bw repo plot` is now a top-level command **(BACKWARDS INCOMPATIBLE)** +* `bw repo test` is now a top-level command **(BACKWARDS INCOMPATIBLE)** + + +# 0.12.0 + +2014-05-11 + +* added plugins +* added group metadata +* user and group attributes are now optional +* user groups may no longer contain primary group **(BACKWARDS INCOMPATIBLE)** +* improvements to logging and output +* fixed a critical bug preventing per-node customization of bundles +* fixed pkg_apt choking on interactive dpkg prompts +* fixed hashing of plaintext user passwords without salt + + +# 0.11.2 + +2014-04-02 + +* packaging fixes only + + +# 0.11.1 + +2014-04-02 + +* packaging fixes only + + +# 0.11.0 + +2014-03-23 + +* renamed builtin item attribute 'depends' to 'needs' **(BACKWARDS INCOMPATIBLE)** +* removed PARALLEL_APPLY on custom items in favor of BLOCK_CONCURRENT **(BACKWARDS INCOMPATIBLE)** +* added builtin item attribute 'needed_by' +* added canned actions for services +* added deletion of files, groups and users +* simplified output of `bw apply` +* `bw repo test` now also verifies dependencies +* fixed `bw repo test` for files without a template +* fixed triggered actions being run every time +* various fixes and improvements around dependency handling + + +# 0.10.0 + +2014-03-08 + +* removed the 'timing' attribute on actions **(BACKWARDS INCOMPATIBLE)** +* actions are now first-class items +* items can now trigger each other (most useful with actions) +* added System V service item +* added `bw repo test` +* added negated bundle and group selectors to CLI +* can now manage files while ignoring their content +* more control over how actions are run in interactive mode +* bundles can now be assigned to nodes directly +* fixed creating symlinks in nonexistent unmanaged directories + + +# 0.9.0 + +2014-02-24 + +* added 'unless' for actions +* improved exception handling +* fixed actions not triggering in noninteractive mode +* fixed noninteractive installation of Debian packages +* slightly more verbose output + + +# 0.8.0 + +2014-02-21 + +* move from Alpha into Beta stage +* added builtin item attribute 'unless' +* added lightweight git/hg/bzr integration +* added -f switch to `bw apply` +* template context can now be customized +* added Node.has_bundle, .in_group etc. +* fixed a LineBuffer bug +* prevented output of some extraneous whitespace + + +# 0.7.0 + +2014-02-16 + +* added safety checks to prevent diffs of unwieldy files +* added a "text" content type for files +* added support for arbitrary encodings in managed files +* addes systemd and Upstart service items +* added hooks +* added action triggers (for service restarts after config changes) +* lots of new documentation +* better error messages when defining duplicate items +* better dependencies between files, directories and symlinks +* fixed a bug that prevented managing /etc/sudoers + + +# 0.6.0 + +2014-01-01 + +* added actions +* reworked group patterns **(BACKWARDS INCOMPATIBLE)** +* reworked output verbosity **(BACKWARDS INCOMPATIBLE)** +* added support for libs directory +* fixed high CPU load while waiting for interactive response +* various other minor fixes and improvements + + +# 0.5.0 + +2013-11-09 + +* manage users and groups +* manage symlinks +* node locking +* PARALLEL_APPLY setting for items +* manage Arch Linux packages +* plot item dependencies +* encoding fixes for file handling + + +# 0.4.0 + +2013-08-25 + +* manage directories +* manage Debian packages +* UI improvements + + +# 0.3.0 + +2013-08-04 + +* basic file management +* concurrency improvements +* logging/output improvements +* use Fabric for remote operations +* lots of other small improvements + + +# 0.2.0 + +2013-07-12 + +* bundle management +* item APIs +* new concurrency helpers + + +# 0.1.0 + +2013-06-16 + +* initial release +* node and group management +* running commands on nodes diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..c83f4ed --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1 @@ +Please see [the docs on contributing](http://docs.bundlewrap.org/misc/contributing). diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..b518c2f --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +include AUTHORS CHANGELOG.md LICENSE README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..a6a597a --- /dev/null +++ b/README.md @@ -0,0 +1,25 @@ +BundleWrap is a decentralized configuration management system that is designed to be powerful, easy to extend and extremely versatile. + +For more information, have a look at [bundlewrap.org](http://bundlewrap.org) and [docs.bundlewrap.org](http://docs.bundlewrap.org). + +------------------------------------------------------------------------ + + + Latest Version + +  + + Build status + +  + + Code health + +  + + Python compatibility + + +------------------------------------------------------------------------ + +BundleWrap is © 2013 - 2016 [Torsten Rehn](mailto:torsten@rehn.email) diff --git a/assets/icon.psd b/assets/icon.psd new file mode 100644 index 0000000..baded45 Binary files /dev/null and b/assets/icon.psd differ diff --git a/bundlewrap/__init__.py b/bundlewrap/__init__.py new file mode 100644 index 0000000..d4028da --- /dev/null +++ b/bundlewrap/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +VERSION = (2, 12, 2) +VERSION_STRING = ".".join([str(v) for v in VERSION]) diff --git a/bundlewrap/bundle.py b/bundlewrap/bundle.py new file mode 100644 index 0000000..faf6364 --- /dev/null +++ b/bundlewrap/bundle.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os.path import exists, join + +from .exceptions import NoSuchBundle, RepositoryError +from .utils import cached_property, get_all_attrs_from_file +from .utils.text import mark_for_translation as _ +from .utils.text import validate_name +from .utils.ui import io + + +FILENAME_BUNDLE = "items.py" +FILENAME_METADATA = "metadata.py" + + +class Bundle(object): + """ + A collection of config items, bound to a node. + """ + def __init__(self, node, name): + self.name = name + self.node = node + self.repo = node.repo + + if not validate_name(name): + raise RepositoryError(_("invalid bundle name: {}").format(name)) + + if name not in self.repo.bundle_names: + raise NoSuchBundle(_("bundle not found: {}").format(name)) + + self.bundle_dir = join(self.repo.bundles_dir, self.name) + self.bundle_data_dir = join(self.repo.data_dir, self.name) + self.bundle_file = join(self.bundle_dir, FILENAME_BUNDLE) + self.metadata_file = join(self.bundle_dir, FILENAME_METADATA) + + @cached_property + def bundle_attrs(self): + if not exists(self.bundle_file): + return {} + else: + with io.job(_(" {node} {bundle} collecting items...").format( + node=self.node.name, + bundle=self.name, + )): + return get_all_attrs_from_file( + self.bundle_file, + base_env={ + 'node': self.node, + 'repo': self.repo, + }, + ) + + @cached_property + def items(self): + for item_class in self.repo.item_classes: + for item_name, item_attrs in self.bundle_attrs.get( + item_class.BUNDLE_ATTRIBUTE_NAME, + {}, + ).items(): + yield self.make_item( + item_class.BUNDLE_ATTRIBUTE_NAME, + item_name, + item_attrs, + ) + + def make_item(self, attribute_name, item_name, item_attrs): + for item_class in self.repo.item_classes: + if item_class.BUNDLE_ATTRIBUTE_NAME == attribute_name: + return item_class(self, item_name, item_attrs) + raise RuntimeError( + _("bundle '{bundle}' tried to generate item '{item}' from " + "unknown attribute '{attr}'").format( + attr=attribute_name, + bundle=self.name, + item=item_name, + ) + ) + + @cached_property + def metadata_processors(self): + with io.job(_(" {node} {bundle} collecting metadata processors...").format( + node=self.node.name, + bundle=self.name, + )): + if not exists(self.metadata_file): + return [] + result = [] + for name, attr in get_all_attrs_from_file( + self.metadata_file, + base_env={ + 'node': self.node, + 'repo': self.repo, + }, + ).items(): + if name.startswith("_") or not callable(attr): + continue + result.append(attr) + return result diff --git a/bundlewrap/cmdline/__init__.py b/bundlewrap/cmdline/__init__.py new file mode 100644 index 0000000..09acc07 --- /dev/null +++ b/bundlewrap/cmdline/__init__.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from functools import wraps +from os import environ, getcwd +from os.path import dirname +from sys import argv, exit, stderr, stdout +from traceback import print_exc + + +from ..exceptions import NoSuchRepository, MissingRepoDependency +from ..repo import Repository +from ..utils.text import force_text, mark_for_translation as _, red +from ..utils.ui import io +from .parser import build_parser_bw + + +def suppress_broken_pipe_msg(f): + """ + Oh boy. + + CPython does funny things with SIGPIPE. By default, it is caught and + raised as a BrokenPipeError. When do we get a SIGPIPE? Most commonly + when piping into head: + + bw nodes | head -n 1 + + head will exit after receiving the first line, causing the kernel to + send SIGPIPE to our process. Since in most cases, we can't just quit + early, we simply ignore BrokenPipeError in utils.ui.write_to_stream. + + Unfortunately, Python will still print a message: + + Exception ignored in: <_io.TextIOWrapper name='' + mode='w' encoding='UTF-8'> + BrokenPipeError: [Errno 32] Broken pipe + + See also http://bugs.python.org/issue11380. The crazy try/finally + construct below is taken from there and I quote: + + This will: + - capture any exceptions *you've* raised as the context for the + errors raised in this handler + - expose any exceptions generated during this thing itself + - prevent the interpreter dying during shutdown in + flush_std_files by closing the files (you can't easily wipe + out the pending writes that have failed) + + CAVEAT: There is a seamingly easier method floating around on the + net (http://stackoverflow.com/a/16865106) that restores the default + behavior for SIGPIPE (i.e. not turning it into a BrokenPipeError): + + from signal import signal, SIGPIPE, SIG_DFL + signal(SIGPIPE,SIG_DFL) + + This worked fine for a while but broke when using + multiprocessing.Manager() to share the list of jobs in utils.ui + between processes. When the main process terminated, it quit with + return code 141 (indicating a broken pipe), and the background + process used for the manager continued to hang around indefinitely. + Bonus fun: This was observed only on Ubuntu Trusty (14.04). + """ + @wraps(f) + def wrapper(*args, **kwargs): + try: + return f(*args, **kwargs) + except SystemExit: + raise + except: + print_exc() + exit(1) + finally: + try: + stdout.flush() + finally: + try: + stdout.close() + finally: + try: + stderr.flush() + finally: + stderr.close() + return wrapper + + +@suppress_broken_pipe_msg +def main(*args, **kwargs): + """ + Entry point for the 'bw' command line utility. + + The args and path parameters are used for integration tests. + """ + if not args: + args = argv[1:] + path = kwargs.get('path', getcwd()) + + text_args = [force_text(arg) for arg in args] + + parser_bw = build_parser_bw() + pargs = parser_bw.parse_args(args) + if not hasattr(pargs, 'func'): + parser_bw.print_help() + exit(2) + + io.debug_mode = pargs.debug + io.activate() + io.debug(_("invocation: {}").format(" ".join(argv))) + + if 'BWADDHOSTKEYS' in environ: # TODO remove in 3.0.0 + environ.setdefault('BW_ADD_HOST_KEYS', environ['BWADDHOSTKEYS']) + if 'BWCOLORS' in environ: # TODO remove in 3.0.0 + environ.setdefault('BW_COLORS', environ['BWCOLORS']) + if 'BWITEMWORKERS' in environ: # TODO remove in 3.0.0 + environ.setdefault('BW_ITEM_WORKERS', environ['BWITEMWORKERS']) + if 'BWNODEWORKERS' in environ: # TODO remove in 3.0.0 + environ.setdefault('BW_NODE_WORKERS', environ['BWNODEWORKERS']) + + environ.setdefault('BW_ADD_HOST_KEYS', "1" if pargs.add_ssh_host_keys else "0") + + if len(text_args) >= 1 and ( + text_args[0] == "--version" or + (len(text_args) >= 2 and text_args[0] == "repo" and text_args[1] == "create") or + text_args[0] == "zen" or + "-h" in text_args or + "--help" in text_args + ): + # 'bw repo create' is a special case that only takes a path + repo = path + else: + while True: + try: + repo = Repository(path) + break + except NoSuchRepository: + if path == dirname(path): + io.stderr(_( + "{x} The current working directory " + "is not a BundleWrap repository." + ).format(x=red("!!!"))) + exit(1) + else: + path = dirname(path) + except MissingRepoDependency as exc: + io.stderr(str(exc)) + exit(1) + + # convert all string args into text + text_pargs = {key: force_text(value) for key, value in vars(pargs).items()} + + try: + pargs.func(repo, text_pargs) + finally: + io.deactivate() diff --git a/bundlewrap/cmdline/apply.py b/bundlewrap/cmdline/apply.py new file mode 100644 index 0000000..364e6df --- /dev/null +++ b/bundlewrap/cmdline/apply.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from datetime import datetime +from sys import exit + +from ..concurrency import WorkerPool +from ..utils.cmdline import get_target_nodes +from ..utils.text import bold +from ..utils.text import error_summary, mark_for_translation as _ +from ..utils.ui import io + + +def bw_apply(repo, args): + errors = [] + target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + pending_nodes = target_nodes[:] + + repo.hooks.apply_start( + repo, + args['target'], + target_nodes, + interactive=args['interactive'], + ) + + start_time = datetime.now() + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': node.apply, + 'task_id': node.name, + 'kwargs': { + 'autoskip_selector': args['autoskip'], + 'force': args['force'], + 'interactive': args['interactive'], + 'workers': args['item_workers'], + 'profiling': args['profiling'], + }, + } + + def handle_result(task_id, return_value, duration): + if ( + return_value is not None and # node skipped because it had no items + args['profiling'] + ): + total_time = 0.0 + io.stdout(_(" {}").format(bold(task_id))) + io.stdout(_(" {} BEGIN PROFILING DATA " + "(most expensive items first)").format(bold(task_id))) + io.stdout(_(" {} seconds item").format(bold(task_id))) + for time_elapsed, item_id in return_value.profiling_info: + io.stdout(" {} {:10.3f} {}".format( + bold(task_id), + time_elapsed.total_seconds(), + item_id, + )) + total_time += time_elapsed.total_seconds() + io.stdout(_(" {} {:10.3f} (total)").format(bold(task_id), total_time)) + io.stdout(_(" {} END PROFILING DATA").format(bold(task_id))) + io.stdout(_(" {}").format(bold(task_id))) + + def handle_exception(task_id, exception, traceback): + msg = "{}: {}".format(task_id, exception) + io.stderr(traceback) + io.stderr(repr(exception)) + io.stderr(msg) + errors.append(msg) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_result=handle_result, + handle_exception=handle_exception, + pool_id="apply", + workers=args['node_workers'], + ) + worker_pool.run() + + error_summary(errors) + + repo.hooks.apply_end( + repo, + args['target'], + target_nodes, + duration=datetime.now() - start_time, + ) + + exit(1 if errors else 0) diff --git a/bundlewrap/cmdline/debug.py b/bundlewrap/cmdline/debug.py new file mode 100644 index 0000000..2d5b811 --- /dev/null +++ b/bundlewrap/cmdline/debug.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from code import interact + +from .. import VERSION_STRING +from ..utils.cmdline import get_node +from ..utils.text import mark_for_translation as _ +from ..utils.ui import io + + +DEBUG_BANNER = _("BundleWrap {version} interactive repository inspector\n" + "> You can access the current repository as 'repo'." + "").format(version=VERSION_STRING) + +DEBUG_BANNER_NODE = DEBUG_BANNER + "\n" + \ + _("> You can access the selected node as 'node'.") + + +def bw_debug(repo, args): + if args['node'] is None: + env = {'repo': repo} + banner = DEBUG_BANNER + else: + env = {'node': get_node(repo, args['node']), 'repo': repo} + banner = DEBUG_BANNER_NODE + + io.deactivate() + if args['command']: + exec(args['command'], env) + else: + interact(banner, local=env) diff --git a/bundlewrap/cmdline/groups.py b/bundlewrap/cmdline/groups.py new file mode 100644 index 0000000..ef0945d --- /dev/null +++ b/bundlewrap/cmdline/groups.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from ..utils import names +from ..utils.ui import io + + +def bw_groups(repo, args): + for group in repo.groups: + line = group.name + if args['show_nodes']: + line += ": " + ", ".join(names(group.nodes)) + io.stdout(line) diff --git a/bundlewrap/cmdline/hash.py b/bundlewrap/cmdline/hash.py new file mode 100644 index 0000000..9051839 --- /dev/null +++ b/bundlewrap/cmdline/hash.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from sys import exit + +from ..exceptions import NoSuchGroup, NoSuchNode +from ..utils.cmdline import get_item +from ..utils.text import mark_for_translation as _, red +from ..utils.ui import io + + +def bw_hash(repo, args): + if args['group_membership'] and args['metadata']: + io.stdout(_( + "{x} Cannot hash group membership and metadata at the same time").format(x=red("!!!") + )) + exit(1) + if args['group_membership'] and args['item']: + io.stdout(_("{x} Cannot hash group membership for an item").format(x=red("!!!"))) + exit(1) + if args['item'] and args['metadata']: + io.stdout(_("{x} Items don't have metadata").format(x=red("!!!"))) + exit(1) + + if args['node_or_group']: + try: + target = repo.get_node(args['node_or_group']) + target_type = 'node' + except NoSuchNode: + try: + target = repo.get_group(args['node_or_group']) + target_type = 'group' + except NoSuchGroup: + if args['adhoc_nodes']: + target = repo.create_node(args['node_or_group']) + target_type = 'node' + else: + io.stderr(_("{x} No such node or group: {node_or_group}").format( + node_or_group=args['node_or_group'], + x=red("!!!"), + )) + exit(1) + else: + if args['item']: + target = get_item(target, args['item']) + target_type = 'item' + else: + target = repo + target_type = 'repo' + + if target_type == 'node' and args['dict'] and args['metadata']: + io.stdout(_("{x} Cannot show a metadata dict for a single node").format(x=red("!!!"))) + exit(1) + if target_type == 'group' and args['item']: + io.stdout(_("{x} Cannot select item for group").format(x=red("!!!"))) + exit(1) + + if args['dict']: + if args['group_membership']: + if target_type in ('node', 'repo'): + for group in target.groups: + io.stdout(group.name) + else: + for node in target.nodes: + io.stdout(node.name) + elif args['metadata']: + for node in target.nodes: + io.stdout("{}\t{}".format(node.name, node.metadata_hash())) + else: + cdict = target.cached_cdict if args['item'] else target.cdict + if cdict is None: + io.stdout("REMOVE") + else: + for key, value in sorted(cdict.items()): + io.stdout("{}\t{}".format(key, value) if args['item'] else "{} {}".format(value, key)) + else: + if args['group_membership']: + io.stdout(target.group_membership_hash()) + elif args['metadata']: + io.stdout(target.metadata_hash()) + else: + io.stdout(target.hash()) diff --git a/bundlewrap/cmdline/items.py b/bundlewrap/cmdline/items.py new file mode 100644 index 0000000..8ac4d14 --- /dev/null +++ b/bundlewrap/cmdline/items.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os import makedirs +from os.path import dirname, exists, join +from sys import exit + +from ..utils.cmdline import get_node +from ..utils.text import force_text, mark_for_translation as _ +from ..utils.ui import io + + +def write_preview(file_item, base_path): + """ + Writes the content of the given file item to the given path. + """ + file_path = join(base_path, file_item.name.lstrip("/")) + dir_path = dirname(file_path) + if not exists(dir_path): + makedirs(dir_path) + with open(file_path, 'wb') as f: + f.write(file_item.content) + + +def bw_items(repo, args): + node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) + if args['file_preview']: + item = node.get_item("file:{}".format(args['file_preview'])) + if ( + item.attributes['content_type'] in ('any', 'base64', 'binary') or + item.attributes['delete'] is True + ): + io.stderr(_( + "cannot preview {node} (unsuitable content_type or deleted)" + ).format(node=node.name)) + exit(1) + else: + io.stdout(item.content.decode(item.attributes['encoding']), append_newline=False) + elif args['file_preview_path']: + if exists(args['file_preview_path']): + io.stderr(_( + "not writing to existing path: {path}" + ).format(path=args['file_preview_path'])) + exit(1) + for item in node.items: + if not item.id.startswith("file:"): + continue + if item.attributes['content_type'] == 'any': + io.stderr(_( + "skipping file with 'any' content {filename}..." + ).format(filename=item.name)) + continue + if item.attributes['content_type'] == 'binary': + io.stderr(_( + "skipping binary file {filename}..." + ).format(filename=item.name)) + continue + if item.attributes['delete']: + io.stderr(_( + "skipping file with 'delete' flag {filename}..." + ).format(filename=item.name)) + continue + io.stdout(_("writing {path}...").format(path=join( + args['file_preview_path'], + item.name.lstrip("/"), + ))) + write_preview(item, args['file_preview_path']) + else: + for item in node.items: + if args['show_repr']: + io.stdout(force_text(repr(item))) + else: + io.stdout(force_text(str(item))) diff --git a/bundlewrap/cmdline/lock.py b/bundlewrap/cmdline/lock.py new file mode 100644 index 0000000..df2a9e4 --- /dev/null +++ b/bundlewrap/cmdline/lock.py @@ -0,0 +1,223 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from ..concurrency import WorkerPool +from ..lock import softlock_add, softlock_list, softlock_remove +from ..utils.cmdline import get_target_nodes +from ..utils.text import blue, bold, cyan, error_summary, green, mark_for_translation as _, \ + randstr +from ..utils.time import format_timestamp +from ..utils.ui import io + + +def bw_lock_add(repo, args): + errors = [] + target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + pending_nodes = target_nodes[:] + max_node_name_length = max([len(node.name) for node in target_nodes]) + lock_id = randstr(length=4).upper() + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': softlock_add, + 'task_id': node.name, + 'args': (node, lock_id), + 'kwargs': { + 'comment': args['comment'], + 'expiry': args['expiry'], + 'item_selectors': args['items'].split(","), + }, + } + + def handle_result(task_id, return_value, duration): + io.stdout(_("{x} {node} locked with ID {id} (expires in {exp})").format( + x=green("✓"), + node=bold(task_id.ljust(max_node_name_length)), + id=return_value, + exp=args['expiry'], + )) + + def handle_exception(task_id, exception, traceback): + msg = "{}: {}".format(task_id, exception) + io.stderr(traceback) + io.stderr(repr(exception)) + io.stderr(msg) + errors.append(msg) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_exception=handle_exception, + handle_result=handle_result, + pool_id="lock", + workers=args['node_workers'], + ) + worker_pool.run() + + error_summary(errors) + + +def bw_lock_remove(repo, args): + errors = [] + target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + pending_nodes = target_nodes[:] + max_node_name_length = max([len(node.name) for node in target_nodes]) + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': softlock_remove, + 'task_id': node.name, + 'args': (node, args['lock_id'].upper()), + } + + def handle_result(task_id, return_value, duration): + io.stdout(_("{x} {node} lock {id} removed").format( + x=green("✓"), + node=bold(task_id.ljust(max_node_name_length)), + id=args['lock_id'].upper(), + )) + + def handle_exception(task_id, exception, traceback): + msg = "{}: {}".format(task_id, exception) + io.stderr(traceback) + io.stderr(repr(exception)) + io.stderr(msg) + errors.append(msg) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_exception=handle_exception, + handle_result=handle_result, + pool_id="lock_remove", + workers=args['node_workers'], + ) + worker_pool.run() + + error_summary(errors) + + +def bw_lock_show(repo, args): + errors = [] + target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + pending_nodes = target_nodes[:] + max_node_name_length = max([len(node.name) for node in target_nodes]) + locks_on_node = {} + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': softlock_list, + 'task_id': node.name, + 'args': (node,), + } + + def handle_result(task_id, return_value, duration): + locks_on_node[task_id] = return_value + + def handle_exception(task_id, exception, traceback): + msg = "{}: {}".format(task_id, exception) + io.stderr(traceback) + io.stderr(repr(exception)) + io.stderr(msg) + errors.append(msg) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_exception=handle_exception, + handle_result=handle_result, + pool_id="lock_show", + workers=args['node_workers'], + ) + worker_pool.run() + + if errors: + error_summary(errors) + return + + headers = ( + ('id', _("ID")), + ('formatted_date', _("Created")), + ('formatted_expiry', _("Expires")), + ('user', _("User")), + ('items', _("Items")), + ('comment', _("Comment")), + ) + + locked_nodes = 0 + for node_name, locks in locks_on_node.items(): + if locks: + locked_nodes += 1 + + previous_node_was_unlocked = False + for node_name, locks in sorted(locks_on_node.items()): + if not locks: + io.stdout(_("{x} {node} no soft locks present").format( + x=green("✓"), + node=bold(node_name.ljust(max_node_name_length)), + )) + previous_node_was_unlocked = True + + output_counter = 0 + for node_name, locks in sorted(locks_on_node.items()): + if locks: + # Unlocked nodes are printed without empty lines in + # between them. Locked nodes can produce lengthy output, + # though, so we add empty lines. + if ( + previous_node_was_unlocked or ( + output_counter > 0 and output_counter < locked_nodes + ) + ): + previous_node_was_unlocked = False + io.stdout('') + + for lock in locks: + lock['formatted_date'] = format_timestamp(lock['date']) + lock['formatted_expiry'] = format_timestamp(lock['expiry']) + + lengths = {} + headline = "{x} {node} ".format( + x=blue("i"), + node=bold(node_name.ljust(max_node_name_length)), + ) + + for column, title in headers: + lengths[column] = len(title) + for lock in locks: + if column == 'items': + length = max([len(selector) for selector in lock[column]]) + else: + length = len(lock[column]) + lengths[column] = max(lengths[column], length) + headline += bold(title.ljust(lengths[column] + 2)) + + io.stdout(headline.rstrip()) + for lock in locks: + for lineno, item_selectors in enumerate(lock['items']): + line = "{x} {node} ".format( + x=cyan("›"), + node=bold(node_name.ljust(max_node_name_length)), + ) + for column, title in headers: + if column == 'items': + line += lock[column][lineno].ljust(lengths[column] + 2) + elif lineno == 0: + line += lock[column].ljust(lengths[column] + 2) + else: + line += " " * (lengths[column] + 2) + io.stdout(line.rstrip()) + + output_counter += 1 diff --git a/bundlewrap/cmdline/metadata.py b/bundlewrap/cmdline/metadata.py new file mode 100644 index 0000000..fd5675a --- /dev/null +++ b/bundlewrap/cmdline/metadata.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from json import dumps + +from ..metadata import MetadataJSONEncoder +from ..utils.cmdline import get_node +from ..utils.text import force_text +from ..utils.ui import io + + +def bw_metadata(repo, args): + node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) + for line in dumps( + node.metadata, + cls=MetadataJSONEncoder, + indent=4, + sort_keys=True, + ).splitlines(): + io.stdout(force_text(line)) diff --git a/bundlewrap/cmdline/nodes.py b/bundlewrap/cmdline/nodes.py new file mode 100644 index 0000000..87e3970 --- /dev/null +++ b/bundlewrap/cmdline/nodes.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from ..utils import names +from ..utils.cmdline import get_group, get_target_nodes +from ..utils.text import bold +from ..utils.ui import io +from ..group import GROUP_ATTR_DEFAULTS + + +ATTR_MAX_LENGTH = max([len(attr) for attr in GROUP_ATTR_DEFAULTS]) + + +def bw_nodes(repo, args): + if args['filter_group'] is not None: + nodes = get_group(repo, args['filter_group']).nodes + elif args['target'] is not None: + nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + else: + nodes = repo.nodes + max_node_name_length = 0 if not nodes else max([len(name) for name in names(nodes)]) + for node in nodes: + if args['show_attrs']: + for attr in sorted(list(GROUP_ATTR_DEFAULTS) + ['hostname']): + io.stdout("{}\t{}\t{}".format( + node.name.ljust(max_node_name_length), + bold(attr.ljust(ATTR_MAX_LENGTH)), + getattr(node, attr), + )) + + if args['inline']: + io.stdout("{}\t{}\t{}".format( + node.name.ljust(max_node_name_length), + bold("group".ljust(ATTR_MAX_LENGTH)), + ", ".join([group.name for group in node.groups]), + )) + else: + for group in node.groups: + io.stdout("{}\t{}\t{}".format( + node.name.ljust(max_node_name_length), + bold("group".ljust(ATTR_MAX_LENGTH)), + group.name, + )) + + if args['inline']: + io.stdout("{}\t{}\t{}".format( + node.name.ljust(max_node_name_length), + bold("bundle".ljust(ATTR_MAX_LENGTH)), + ", ".join([bundle.name for bundle in node.bundles]), + )) + else: + for bundle in node.bundles: + io.stdout("{}\t{}\t{}".format( + node.name.ljust(max_node_name_length), + bold("bundle".ljust(ATTR_MAX_LENGTH)), + bundle.name, + )) + continue + line = "" + if args['show_hostnames']: + line += node.hostname + else: + line += node.name + if args['show_bundles']: + line += ": " + ", ".join(sorted(names(node.bundles))) + elif args['show_groups']: + line += ": " + ", ".join(sorted(names(node.groups))) + elif args['show_os']: + line += ": " + node.os + io.stdout(line) diff --git a/bundlewrap/cmdline/parser.py b/bundlewrap/cmdline/parser.py new file mode 100644 index 0000000..4506c0f --- /dev/null +++ b/bundlewrap/cmdline/parser.py @@ -0,0 +1,804 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from argparse import ArgumentParser +from os import environ + +from .. import VERSION_STRING +from ..utils.text import mark_for_translation as _ +from .apply import bw_apply +from .debug import bw_debug +from .groups import bw_groups +from .hash import bw_hash +from .items import bw_items +from .lock import bw_lock_add, bw_lock_remove, bw_lock_show +from .metadata import bw_metadata +from .nodes import bw_nodes +from .plot import bw_plot_group, bw_plot_node, bw_plot_node_groups +from .repo import bw_repo_bundle_create, bw_repo_create, bw_repo_plugin_install, \ + bw_repo_plugin_list, bw_repo_plugin_search, bw_repo_plugin_remove, bw_repo_plugin_update +from .run import bw_run +from .stats import bw_stats +from .test import bw_test +from .verify import bw_verify +from .zen import bw_zen + + +def build_parser_bw(): + parser = ArgumentParser( + prog="bw", + description=_("BundleWrap - Config Management with Python"), + ) + parser.add_argument( + "-a", + "--add-host-keys", + action='store_true', + default=False, + dest='add_ssh_host_keys', + help=_("set StrictHostKeyChecking=no instead of yes for SSH"), + ) + parser.add_argument( + "-A", + "--adhoc-nodes", + action='store_true', + default=False, + dest='adhoc_nodes', + help=_( + "treat unknown node names as adhoc 'virtual' nodes that receive configuration only " + "through groups whose member_patterns match the node name given on the command line " + "(which also has to be a resolvable hostname)"), + ) + parser.add_argument( + "-d", + "--debug", + action='store_true', + default=False, + dest='debug', + help=_("print debugging info (implies -v)"), + ) + parser.add_argument( + "--version", + action='version', + version=VERSION_STRING, + ) + subparsers = parser.add_subparsers( + title=_("subcommands"), + help=_("use 'bw --help' for more info"), + ) + + # bw apply + help_apply = _("Applies the configuration defined in your repository to your nodes") + parser_apply = subparsers.add_parser("apply", description=help_apply, help=help_apply) + parser_apply.set_defaults(func=bw_apply) + parser_apply.add_argument( + 'target', + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + type=str, + help=_("target nodes, groups and/or bundle selectors"), + ) + parser_apply.add_argument( + "-f", + "--force", + action='store_true', + default=False, + dest='force', + help=_("ignore existing hard node locks"), + ) + parser_apply.add_argument( + "-i", + "--interactive", + action='store_true', + default=False, + dest='interactive', + help=_("ask before applying each item"), + ) + bw_apply_p_default = int(environ.get("BW_NODE_WORKERS", "4")) + parser_apply.add_argument( + "-p", + "--parallel-nodes", + default=bw_apply_p_default, + dest='node_workers', + help=_("number of nodes to apply to simultaneously " + "(defaults to {})").format(bw_apply_p_default), + type=int, + ) + bw_apply_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) + parser_apply.add_argument( + "-P", + "--parallel-items", + default=bw_apply_p_items_default, + dest='item_workers', + help=_("number of items to apply simultaneously on each node " + "(defaults to {})").format(bw_apply_p_items_default), + type=int, + ) + parser_apply.add_argument( + "--profiling", + action='store_true', + default=False, + dest='profiling', + help=_("print time elapsed for each item"), + ) + parser_apply.add_argument( + "-s", + "--skip", + default="", + dest='autoskip', + help=_( + "e.g. 'file:/foo,tag:foo,bundle:bar,node:baz,group:frob' " + "to skip all instances of file:/foo " + "and items with tag 'foo', " + "or in bundle 'bar', " + "or on node 'baz', " + "or on a node in group 'frob'" + ), + metavar=_("SELECTOR"), + type=str, + ) + + # bw debug + help_debug = _("Start an interactive Python shell for this repository") + parser_debug = subparsers.add_parser("debug", description=help_debug, help=help_debug) + parser_debug.set_defaults(func=bw_debug) + parser_debug.add_argument( + "-c", + "--command", + default=None, + dest='command', + metavar=_("COMMAND"), + required=False, + type=str, + help=_("command to execute in lieu of REPL"), + ) + parser_debug.add_argument( + "-n", + "--node", + default=None, + dest='node', + metavar=_("NODE"), + required=False, + type=str, + help=_("name of node to inspect"), + ) + + # bw groups + help_groups = _("Lists groups in this repository (deprecated, use `bw nodes -a`)") + parser_groups = subparsers.add_parser("groups", description=help_groups, help=help_groups) + parser_groups.set_defaults(func=bw_groups) + parser_groups.add_argument( + "-n", + "--nodes", + action='store_true', + dest='show_nodes', + help=_("show nodes for each group"), + ) + + # bw hash + help_hash = _("Shows a SHA1 hash that summarizes the entire configuration for this repo, node, group, or item.") + parser_hash = subparsers.add_parser("hash", description=help_hash, help=help_hash) + parser_hash.set_defaults(func=bw_hash) + parser_hash.add_argument( + "-d", + "--dict", + action='store_true', + default=False, + dest='dict', + help=_("instead show the data this hash is derived from"), + ) + parser_hash.add_argument( + "-g", + "--group", + action='store_true', + default=False, + dest='group_membership', + help=_("hash group membership instead of configuration"), + ) + parser_hash.add_argument( + "-m", + "--metadata", + action='store_true', + default=False, + dest='metadata', + help=_("hash metadata instead of configuration (not available for items)"), + ) + parser_hash.add_argument( + 'node_or_group', + metavar=_("NODE|GROUP"), + type=str, + nargs='?', + help=_("show config hash for this node or group"), + ) + parser_hash.add_argument( + 'item', + metavar=_("ITEM"), + type=str, + nargs='?', + help=_("show config hash for this item on the given node"), + ) + + # bw items + help_items = _("List and preview items for a specific node") + parser_items = subparsers.add_parser("items", description=help_items, help=help_items) + parser_items.set_defaults(func=bw_items) + parser_items.add_argument( + 'node', + metavar=_("NODE"), + type=str, + help=_("list items for this node"), + ) + parser_items.add_argument( + "-f", + "--file-preview", + dest='file_preview', + help=_("print preview of given file"), + metavar=_("FILE"), + required=False, + type=str, + ) + parser_items.add_argument( + "-w", + "--write-file-previews", + default=None, + dest='file_preview_path', + metavar=_("DIRECTORY"), + required=False, + type=str, + help=_("create DIRECTORY and fill it with rendered file previews"), + ) + parser_items.add_argument( + "--repr", + action='store_true', + dest='show_repr', + help=_("show more verbose representation of each item"), + ) + + # bw lock + help_lock = _("Manage locks on nodes used to prevent collisions between BundleWrap users") + parser_lock = subparsers.add_parser("lock", description=help_lock, help=help_lock) + parser_lock_subparsers = parser_lock.add_subparsers() + + # bw lock add + help_lock_add = _("Add a new lock to one or more nodes") + parser_lock_add = parser_lock_subparsers.add_parser( + "add", + description=help_lock_add, + help=help_lock_add, + ) + parser_lock_add.set_defaults(func=bw_lock_add) + parser_lock_add.add_argument( + 'target', + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + type=str, + help=_("target nodes, groups and/or bundle selectors"), + ) + parser_lock_add.add_argument( + "-c", + "--comment", + default="", + dest='comment', + help=_("brief description of the purpose of the lock"), + type=str, + ) + bw_lock_add_e_default = environ.get("BW_SOFTLOCK_EXPIRY", "8h") + parser_lock_add.add_argument( + "-e", + "--expires-in", + default=bw_lock_add_e_default, + dest='expiry', + help=_("how long before the lock is ignored and removed automatically " + "(defaults to \"{}\")").format(bw_lock_add_e_default), + type=str, + ) + parser_lock_add.add_argument( + "-i", + "--items", + default="*", + dest='items', + help=_("comma-separated list of item selectors the lock applies to " + "(defaults to \"*\" meaning all)"), + type=str, + ) + bw_lock_add_p_default = int(environ.get("BW_NODE_WORKERS", "4")) + parser_lock_add.add_argument( + "-p", + "--parallel-nodes", + default=bw_lock_add_p_default, + dest='node_workers', + help=_("number of nodes to lock simultaneously " + "(defaults to {})").format(bw_lock_add_p_default), + type=int, + ) + + # bw lock remove + help_lock_remove = _("Remove a lock from a node") + parser_lock_remove = parser_lock_subparsers.add_parser( + "remove", + description=help_lock_remove, + help=help_lock_remove, + ) + parser_lock_remove.set_defaults(func=bw_lock_remove) + parser_lock_remove.add_argument( + 'target', + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + type=str, + help=_("target nodes, groups and/or bundle selectors"), + ) + parser_lock_remove.add_argument( + 'lock_id', + metavar=_("LOCK_ID"), + type=str, + help=_("ID of the lock to remove (obtained with `bw lock show`)"), + ) + bw_lock_remove_p_default = int(environ.get("BW_NODE_WORKERS", "4")) + parser_lock_remove.add_argument( + "-p", + "--parallel-nodes", + default=bw_lock_remove_p_default, + dest='node_workers', + help=_("number of nodes to remove lock from simultaneously " + "(defaults to {})").format(bw_lock_remove_p_default), + type=int, + ) + + # bw lock show + help_lock_show = _("Show details of locks present on a node") + parser_lock_show = parser_lock_subparsers.add_parser( + "show", + description=help_lock_show, + help=help_lock_show, + ) + parser_lock_show.set_defaults(func=bw_lock_show) + parser_lock_show.add_argument( + 'target', + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + type=str, + help=_("target node"), + ) + bw_lock_show_p_default = int(environ.get("BW_NODE_WORKERS", "4")) + parser_lock_show.add_argument( + "-p", + "--parallel-nodes", + default=bw_lock_show_p_default, + dest='node_workers', + help=_("number of nodes to retrieve locks from simultaneously " + "(defaults to {})").format(bw_lock_show_p_default), + type=int, + ) + + # bw metadata + help_metadata = ("View a JSON representation of a node's metadata") + parser_metadata = subparsers.add_parser( + "metadata", + description=help_metadata, + help=help_metadata, + ) + parser_metadata.set_defaults(func=bw_metadata) + parser_metadata.add_argument( + 'node', + metavar=_("NODE"), + type=str, + help=_("node to print JSON-formatted metadata for"), + ) + + # bw nodes + help_nodes = _("List all nodes in this repository") + parser_nodes = subparsers.add_parser("nodes", description=help_nodes, help=help_nodes) + parser_nodes.set_defaults(func=bw_nodes) + parser_nodes.add_argument( + "-a", + "--attrs", + action='store_true', + dest='show_attrs', + help=_("show attributes for each node"), + ) + parser_nodes.add_argument( + "--bundles", + action='store_true', + dest='show_bundles', + help=_("show bundles for each node (deprecated, use --attrs)"), + ) + parser_nodes.add_argument( + "--hostnames", + action='store_true', + dest='show_hostnames', + help=_("show hostnames instead of node names (deprecated, use --attrs)"), + ) + parser_nodes.add_argument( + "-g", + "--filter-group", + default=None, + dest='filter_group', + metavar=_("GROUP"), + required=False, + type=str, + help=_("show only nodes in the given group (deprecated)"), + ) + parser_nodes.add_argument( + "--groups", + action='store_true', + dest='show_groups', + help=_("show group membership for each node (deprecated, use --attrs)"), + ) + parser_nodes.add_argument( + "-i", + "--inline", + action='store_true', + dest='inline', + help=_("show multiple values on the same line (use with --attrs)"), + ) + parser_nodes.add_argument( + "--os", + action='store_true', + dest='show_os', + help=_("show OS for each node (deprecated, use --attrs)"), + ) + parser_nodes.add_argument( + 'target', + default=None, + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + nargs='?', + type=str, + help=_("filter according to nodes, groups and/or bundle selectors"), + ) + + # bw plot + help_plot = _("Generates DOT output that can be piped into `dot -Tsvg -ooutput.svg`. " + "The resulting output.svg can be viewed using most browsers.") + parser_plot = subparsers.add_parser("plot", description=help_plot, help=help_plot) + parser_plot_subparsers = parser_plot.add_subparsers() + + # bw plot group + help_plot_group = _("Plot subgroups and node members for the given group " + "or the entire repository") + parser_plot_subparsers_group = parser_plot_subparsers.add_parser( + "group", + description=help_plot_group, + help=help_plot_group, + ) + parser_plot_subparsers_group.set_defaults(func=bw_plot_group) + parser_plot_subparsers_group.add_argument( + 'group', + default=None, + metavar=_("GROUP"), + nargs='?', + type=str, + help=_("group to plot"), + ) + parser_plot_subparsers_group.add_argument( + "-N", "--no-nodes", + action='store_false', + dest='show_nodes', + help=_("do not include nodes in output"), + ) + + # bw plot node + help_plot_node = _("Plot items and their dependencies for the given node") + parser_plot_subparsers_node = parser_plot_subparsers.add_parser( + "node", + description=help_plot_node, + help=help_plot_node, + ) + parser_plot_subparsers_node.set_defaults(func=bw_plot_node) + parser_plot_subparsers_node.add_argument( + 'node', + metavar=_("NODE"), + type=str, + help=_("node to plot"), + ) + parser_plot_subparsers_node.add_argument( + "--no-cluster", + action='store_false', + dest='cluster', + help=_("do not cluster items by bundle"), + ) + parser_plot_subparsers_node.add_argument( + "--no-depends-auto", + action='store_false', + dest='depends_auto', + help=_("do not show auto-generated dependencies and items"), + ) + parser_plot_subparsers_node.add_argument( + "--no-depends-conc", + action='store_false', + dest='depends_concurrency', + help=_("do not show concurrency blocker dependencies"), + ) + parser_plot_subparsers_node.add_argument( + "--no-depends-regular", + action='store_false', + dest='depends_regular', + help=_("do not show regular user-defined dependencies"), + ) + parser_plot_subparsers_node.add_argument( + "--no-depends-reverse", + action='store_false', + dest='depends_reverse', + help=_("do not show reverse dependencies ('needed_by')"), + ) + parser_plot_subparsers_node.add_argument( + "--no-depends-static", + action='store_false', + dest='depends_static', + help=_("do not show static dependencies"), + ) + + # bw plot groups-for-node + help_plot_node_groups = _("Show where a specific node gets its groups from") + parser_plot_subparsers_node_groups = parser_plot_subparsers.add_parser( + "groups-for-node", + description=help_plot_node_groups, + help=help_plot_node_groups, + ) + parser_plot_subparsers_node_groups.set_defaults(func=bw_plot_node_groups) + parser_plot_subparsers_node_groups.add_argument( + 'node', + metavar=_("NODE"), + type=str, + help=_("node to plot"), + ) + + # bw repo + help_repo = _("Various subcommands to manipulate your repository") + parser_repo = subparsers.add_parser("repo", description=help_repo, help=help_repo) + parser_repo_subparsers = parser_repo.add_subparsers() + + # bw repo bundle + parser_repo_subparsers_bundle = parser_repo_subparsers.add_parser("bundle") + parser_repo_subparsers_bundle_subparsers = parser_repo_subparsers_bundle.add_subparsers() + + # bw repo bundle create + parser_repo_subparsers_bundle_create = \ + parser_repo_subparsers_bundle_subparsers.add_parser("create") + parser_repo_subparsers_bundle_create.set_defaults(func=bw_repo_bundle_create) + parser_repo_subparsers_bundle_create.add_argument( + 'bundle', + metavar=_("BUNDLE"), + type=str, + help=_("name of bundle to create"), + ) + + # bw repo create + parser_repo_subparsers_create = parser_repo_subparsers.add_parser("create") + parser_repo_subparsers_create.set_defaults(func=bw_repo_create) + + # bw repo plugin + parser_repo_subparsers_plugin = parser_repo_subparsers.add_parser("plugin") + parser_repo_subparsers_plugin_subparsers = parser_repo_subparsers_plugin.add_subparsers() + + # bw repo plugin install + parser_repo_subparsers_plugin_install = parser_repo_subparsers_plugin_subparsers.add_parser("install") + parser_repo_subparsers_plugin_install.set_defaults(func=bw_repo_plugin_install) + parser_repo_subparsers_plugin_install.add_argument( + 'plugin', + metavar=_("PLUGIN_NAME"), + type=str, + help=_("name of plugin to install"), + ) + parser_repo_subparsers_plugin_install.add_argument( + "-f", + "--force", + action='store_true', + dest='force', + help=_("overwrite existing files when installing"), + ) + + # bw repo plugin list + parser_repo_subparsers_plugin_list = parser_repo_subparsers_plugin_subparsers.add_parser("list") + parser_repo_subparsers_plugin_list.set_defaults(func=bw_repo_plugin_list) + + # bw repo plugin remove + parser_repo_subparsers_plugin_remove = parser_repo_subparsers_plugin_subparsers.add_parser("remove") + parser_repo_subparsers_plugin_remove.set_defaults(func=bw_repo_plugin_remove) + parser_repo_subparsers_plugin_remove.add_argument( + 'plugin', + metavar=_("PLUGIN_NAME"), + type=str, + help=_("name of plugin to remove"), + ) + parser_repo_subparsers_plugin_remove.add_argument( + "-f", + "--force", + action='store_true', + dest='force', + help=_("remove files even if locally modified"), + ) + + # bw repo plugin search + parser_repo_subparsers_plugin_search = parser_repo_subparsers_plugin_subparsers.add_parser("search") + parser_repo_subparsers_plugin_search.set_defaults(func=bw_repo_plugin_search) + parser_repo_subparsers_plugin_search.add_argument( + 'term', + metavar=_("SEARCH_STRING"), + nargs='?', + type=str, + help=_("look for this string in plugin names and descriptions"), + ) + + # bw repo plugin update + parser_repo_subparsers_plugin_update = parser_repo_subparsers_plugin_subparsers.add_parser("update") + parser_repo_subparsers_plugin_update.set_defaults(func=bw_repo_plugin_update) + parser_repo_subparsers_plugin_update.add_argument( + 'plugin', + default=None, + metavar=_("PLUGIN_NAME"), + nargs='?', + type=str, + help=_("name of plugin to update"), + ) + parser_repo_subparsers_plugin_update.add_argument( + "-c", + "--check-only", + action='store_true', + dest='check_only', + help=_("only show what would be updated"), + ) + parser_repo_subparsers_plugin_update.add_argument( + "-f", + "--force", + action='store_true', + dest='force', + help=_("overwrite local modifications when updating"), + ) + + # bw run + help_run = _("Run a one-off command on a number of nodes") + parser_run = subparsers.add_parser("run", description=help_run, help=help_run) + parser_run.set_defaults(func=bw_run) + parser_run.add_argument( + 'target', + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + type=str, + help=_("target nodes, groups and/or bundle selectors"), + ) + parser_run.add_argument( + 'command', + metavar=_("COMMAND"), + type=str, + help=_("command to run"), + ) + parser_run.add_argument( + "-f", + "--may-fail", + action='store_true', + dest='may_fail', + help=_("ignore non-zero exit codes"), + ) + parser_run.add_argument( + "--force", + action='store_true', + dest='ignore_locks', + help=_("ignore soft locks on target nodes"), + ) + bw_run_p_default = int(environ.get("BW_NODE_WORKERS", "1")) + parser_run.add_argument( + "-p", + "--parallel-nodes", + default=bw_run_p_default, + dest='node_workers', + help=_("number of nodes to run command on simultaneously " + "(defaults to {})").format(bw_run_p_default), + type=int, + ) + + # bw stats + help_stats = _("Show some statistics about your repository") + parser_stats = subparsers.add_parser("stats", description=help_stats, help=help_stats) + parser_stats.set_defaults(func=bw_stats) + + # bw test + help_test = _("Test your repository for consistency " + "(you can use this with a CI tool like Jenkins)") + parser_test = subparsers.add_parser("test", description=help_test, help=help_test) + parser_test.set_defaults(func=bw_test) + parser_test.add_argument( + 'target', + default=None, + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + nargs='?', + type=str, + help=_("target nodes, groups and/or bundle selectors"), + ) + parser_test.add_argument( + "-c", + "--plugin-conflict-error", + action='store_true', + dest='plugin_conflict_error', + help=_("check for local modifications to files installed by plugins"), + ) + parser_test.add_argument( + "-d", + "--config-determinism", + default=0, + dest='determinism_config', + help=_("verify determinism of configuration by running `bw hash` N times " + "and checking for consistent results (with N > 1)"), + metavar="N", + type=int, + ) + parser_test.add_argument( + "-i", + "--ignore-missing-faults", + action='store_true', + dest='ignore_missing_faults', + help=_("do not fail when encountering a missing Fault"), + ) + parser_test.add_argument( + "-m", + "--metadata-determinism", + default=0, + dest='determinism_metadata', + help=_("verify determinism of metadata by running `bw hash -m` N times " + "and checking for consistent results (with N > 1)"), + metavar="N", + type=int, + ) + bw_test_p_default = int(environ.get("BW_NODE_WORKERS", "1")) + parser_test.add_argument( + "-p", + "--parallel-nodes", + default=bw_test_p_default, + dest='node_workers', + help=_("number of nodes to test simultaneously " + "(defaults to {})").format(bw_test_p_default), + type=int, + ) + bw_test_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) + parser_test.add_argument( + "-P", + "--parallel-items", + default=bw_test_p_items_default, + dest='item_workers', + help=_("number of items to test simultaneously for each node " + "(defaults to {})").format(bw_test_p_items_default), + type=int, + ) + + # bw verify + help_verify = _("Inspect the health or 'correctness' of a node without changing it") + parser_verify = subparsers.add_parser("verify", description=help_verify, help=help_verify) + parser_verify.set_defaults(func=bw_verify) + parser_verify.add_argument( + 'target', + metavar=_("NODE1,NODE2,GROUP1,bundle:BUNDLE1..."), + type=str, + help=_("target nodes, groups and/or bundle selectors"), + ) + parser_verify.add_argument( + "-a", + "--show-all", + action='store_true', + dest='show_all', + help=_("show correct items as well as incorrect ones"), + ) + bw_verify_p_default = int(environ.get("BW_NODE_WORKERS", "4")) + parser_verify.add_argument( + "-p", + "--parallel-nodes", + default=bw_verify_p_default, + dest='node_workers', + help=_("number of nodes to verify simultaneously " + "(defaults to {})").format(bw_verify_p_default), + type=int, + ) + bw_verify_p_items_default = int(environ.get("BW_ITEM_WORKERS", "4")) + parser_verify.add_argument( + "-P", + "--parallel-items", + default=bw_verify_p_items_default, + dest='item_workers', + help=_("number of items to verify simultaneously on each node " + "(defaults to {})").format(bw_verify_p_items_default), + type=int, + ) + parser_verify.add_argument( + "-S", + "--no-summary", + action='store_false', + dest='summary', + help=_("don't show stats summary"), + ) + + # bw zen + parser_zen = subparsers.add_parser("zen") + parser_zen.set_defaults(func=bw_zen) + return parser diff --git a/bundlewrap/cmdline/plot.py b/bundlewrap/cmdline/plot.py new file mode 100644 index 0000000..d64422d --- /dev/null +++ b/bundlewrap/cmdline/plot.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from ..deps import prepare_dependencies +from ..utils import graph_for_items, names +from ..utils.cmdline import get_group, get_node +from ..utils.ui import io + + +def bw_plot_group(repo, args): + group = get_group(repo, args['group']) if args['group'] else None + + if args['show_nodes']: + nodes = group.nodes if group else repo.nodes + else: + nodes = [] + + if group: + groups = [group] + groups.extend(group.subgroups) + else: + groups = repo.groups + + for line in plot_group(groups, nodes, args['show_nodes']): + io.stdout(line) + + +def plot_group(groups, nodes, show_nodes): + yield "digraph bundlewrap" + yield "{" + + # Print subgraphs *below* each other + yield "rankdir = LR" + + # Global attributes + yield ("node [color=\"#303030\"; " + "fillcolor=\"#303030\"; " + "fontname=Helvetica]") + yield "edge [arrowhead=vee]" + + for group in groups: + yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) + + for node in nodes: + yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) + + for group in groups: + for subgroup in group.immediate_subgroup_names: + yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) + + if show_nodes: + for group in groups: + for node in group._nodes_from_members: + yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( + group.name, node.name) + + for node in group._nodes_from_patterns: + yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( + group.name, node.name) + + for node in nodes: + if group in node._groups_dynamic: + yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format( + group.name, node.name) + + yield "}" + + +def bw_plot_node(repo, args): + node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) + for line in graph_for_items( + node.name, + prepare_dependencies(node.items), + cluster=args['cluster'], + concurrency=args['depends_concurrency'], + static=args['depends_static'], + regular=args['depends_regular'], + reverse=args['depends_reverse'], + auto=args['depends_auto'], + ): + io.stdout(line) + + +def bw_plot_node_groups(repo, args): + node = get_node(repo, args['node'], adhoc_nodes=args['adhoc_nodes']) + for line in plot_node_groups(node): + io.stdout(line) + + +def plot_node_groups(node): + yield "digraph bundlewrap" + yield "{" + + # Print subgraphs *below* each other + yield "rankdir = LR" + + # Global attributes + yield ("node [color=\"#303030\"; " + "fillcolor=\"#303030\"; " + "fontname=Helvetica]") + yield "edge [arrowhead=vee]" + + for group in node.groups: + yield "\"{}\" [fontcolor=white,style=filled];".format(group.name) + + yield "\"{}\" [fontcolor=\"#303030\",shape=box,style=rounded];".format(node.name) + + for group in node.groups: + for subgroup in group.immediate_subgroup_names: + if subgroup in names(node.groups): + yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, subgroup) + for pattern in group.immediate_subgroup_patterns: + compiled_pattern = re.compile(pattern) + for group2 in node.groups: + if compiled_pattern.search(group2.name) is not None and group2 != group: + yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(group.name, group2.name) + + for group in node.groups: + if node in group._nodes_from_members: + yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format( + group.name, node.name) + elif node in group._nodes_from_patterns: + yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format( + group.name, node.name) + elif group in node._groups_dynamic: + yield "\"{}\" -> \"{}\" [color=\"#FF0000\",penwidth=2]".format( + group.name, node.name) + + yield "}" diff --git a/bundlewrap/cmdline/repo.py b/bundlewrap/cmdline/repo.py new file mode 100644 index 0000000..a4e4406 --- /dev/null +++ b/bundlewrap/cmdline/repo.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from sys import exit + +from ..exceptions import NoSuchPlugin, PluginLocalConflict +from ..plugins import PluginManager +from ..repo import Repository +from ..utils.text import blue, bold, mark_for_translation as _, red +from ..utils.ui import io + + +def bw_repo_bundle_create(repo, args): + repo.create_bundle(args['bundle']) + + +def bw_repo_create(path, args): + Repository.create(path) + + +def bw_repo_plugin_install(repo, args): + pm = PluginManager(repo.path) + try: + manifest = pm.install(args['plugin'], force=args['force']) + io.stdout(_("{x} Installed '{plugin}' (v{version})").format( + x=blue("i"), + plugin=args['plugin'], + version=manifest['version'], + )) + if 'help' in manifest: + io.stdout("") + for line in manifest['help'].split("\n"): + io.stdout(line) + except NoSuchPlugin: + io.stderr(_("{x} No such plugin: {plugin}").format(x=red("!!!"), plugin=args['plugin'])) + exit(1) + except PluginLocalConflict as e: + io.stderr(_("{x} Plugin installation failed: {reason}").format( + reason=e.message, + x=red("!!!"), + )) + exit(1) + + +def bw_repo_plugin_list(repo, args): + pm = PluginManager(repo.path) + for plugin, version in pm.list(): + io.stdout(_("{plugin} (v{version})").format(plugin=plugin, version=version)) + + +def bw_repo_plugin_remove(repo, args): + pm = PluginManager(repo.path) + try: + pm.remove(args['plugin'], force=args['force']) + except NoSuchPlugin: + io.stdout(_("{x} Plugin '{plugin}' is not installed").format( + x=red("!!!"), + plugin=args['plugin'], + )) + exit(1) + + +def bw_repo_plugin_search(repo, args): + pm = PluginManager(repo.path) + for plugin, desc in pm.search(args['term']): + io.stdout(_("{plugin} {desc}").format(desc=desc, plugin=bold(plugin))) + + +def bw_repo_plugin_update(repo, args): + pm = PluginManager(repo.path) + if args['plugin']: + old_version, new_version = pm.update( + args['plugin'], + check_only=args['check_only'], + force=args['force'], + ) + if old_version != new_version: + io.stdout(_("{plugin} {old_version} → {new_version}").format( + new_version=new_version, + old_version=old_version, + plugin=bold(args['plugin']), + )) + else: + for plugin, version in pm.list(): + old_version, new_version = pm.update( + plugin, + check_only=args['check_only'], + force=args['force'], + ) + if old_version != new_version: + io.stdout(_("{plugin} {old_version} → {new_version}").format( + new_version=new_version, + old_version=old_version, + plugin=bold(plugin), + )) diff --git a/bundlewrap/cmdline/run.py b/bundlewrap/cmdline/run.py new file mode 100644 index 0000000..96deefb --- /dev/null +++ b/bundlewrap/cmdline/run.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from datetime import datetime + +from ..concurrency import WorkerPool +from ..exceptions import NodeLockedException +from ..utils.cmdline import get_target_nodes +from ..utils.text import mark_for_translation as _ +from ..utils.text import bold, error_summary, green, red, yellow +from ..utils.ui import io + + +def run_on_node(node, command, may_fail, ignore_locks, log_output): + if node.dummy: + io.stdout(_("{x} {node} is a dummy node").format(node=bold(node.name), x=yellow("!"))) + return + + node.repo.hooks.node_run_start( + node.repo, + node, + command, + ) + + start = datetime.now() + result = node.run( + command, + may_fail=may_fail, + log_output=log_output, + ) + end = datetime.now() + duration = end - start + + node.repo.hooks.node_run_end( + node.repo, + node, + command, + duration=duration, + return_code=result.return_code, + stdout=result.stdout, + stderr=result.stderr, + ) + + if result.return_code == 0: + io.stdout("{x} {node} {msg}".format( + msg=_("completed successfully after {time}s").format( + time=duration.total_seconds(), + ), + node=bold(node.name), + x=green("✓"), + )) + else: + io.stderr("{x} {node} {msg}".format( + msg=_("failed after {time}s (return code {rcode})").format( + rcode=result.return_code, + time=duration.total_seconds(), + ), + node=bold(node.name), + x=red("✘"), + )) + + +def bw_run(repo, args): + errors = [] + target_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + pending_nodes = target_nodes[:] + + repo.hooks.run_start( + repo, + args['target'], + target_nodes, + args['command'], + ) + start_time = datetime.now() + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': run_on_node, + 'task_id': node.name, + 'args': ( + node, + args['command'], + args['may_fail'], + args['ignore_locks'], + True, + ), + } + + def handle_exception(task_id, exception, traceback): + if isinstance(exception, NodeLockedException): + msg = _( + "{node_bold} locked by {user} " + "(see `bw lock show {node}` for details)" + ).format( + node_bold=bold(task_id), + node=task_id, + user=exception.args[0]['user'], + ) + else: + msg = "{} {}".format(bold(task_id), exception) + io.stderr(traceback) + io.stderr(repr(exception)) + io.stderr("{} {}".format(red("!"), msg)) + errors.append(msg) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_exception=handle_exception, + pool_id="run", + workers=args['node_workers'], + ) + worker_pool.run() + + error_summary(errors) + + repo.hooks.run_end( + repo, + args['target'], + target_nodes, + args['command'], + duration=datetime.now() - start_time, + ) diff --git a/bundlewrap/cmdline/stats.py b/bundlewrap/cmdline/stats.py new file mode 100644 index 0000000..8d13e80 --- /dev/null +++ b/bundlewrap/cmdline/stats.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from operator import itemgetter + +from ..utils.text import mark_for_translation as _ +from ..utils.ui import io + + +def bw_stats(repo, args): + io.stdout(_("{} nodes").format(len(repo.nodes))) + io.stdout(_("{} groups").format(len(repo.groups))) + io.stdout(_("{} items").format(sum([len(list(node.items)) for node in repo.nodes]))) + items = {} + for node in repo.nodes: + for item in node.items: + items.setdefault(item.ITEM_TYPE_NAME, 0) + items[item.ITEM_TYPE_NAME] += 1 + for item_type, count in sorted(items.items(), key=itemgetter(1), reverse=True): + io.stdout(" {} {}".format(count, item_type)) diff --git a/bundlewrap/cmdline/test.py b/bundlewrap/cmdline/test.py new file mode 100644 index 0000000..ae7e770 --- /dev/null +++ b/bundlewrap/cmdline/test.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from copy import copy +from sys import exit + +from ..concurrency import WorkerPool +from ..plugins import PluginManager +from ..repo import Repository +from ..utils.cmdline import get_target_nodes +from ..utils.text import bold, green, mark_for_translation as _, red +from ..utils.ui import io + + +def bw_test(repo, args): + if args['target']: + pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + else: + pending_nodes = copy(list(repo.nodes)) + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': node.test, + 'task_id': node.name, + 'kwargs': { + 'ignore_missing_faults': args['ignore_missing_faults'], + 'workers': args['item_workers'], + }, + } + + worker_pool = WorkerPool( + tasks_available, + next_task, + pool_id="test", + workers=args['node_workers'], + ) + worker_pool.run() + + checked_groups = [] + for group in repo.groups: + if group in checked_groups: + continue + with io.job(_(" {group} checking for subgroup loops...").format(group=group.name)): + checked_groups.extend(group.subgroups) # the subgroups property has the check built in + io.stdout(_("{x} {group} has no subgroup loops").format( + x=green("✓"), + group=bold(group.name), + )) + + # check for plugin inconsistencies + if args['plugin_conflict_error']: + pm = PluginManager(repo.path) + for plugin, version in pm.list(): + local_changes = pm.local_modifications(plugin) + if local_changes: + io.stderr(_("{x} Plugin '{plugin}' has local modifications:").format( + plugin=plugin, + x=red("✘"), + )) + for path, actual_checksum, should_checksum in local_changes: + io.stderr(_("\t{path} ({actual_checksum}) should be {should_checksum}").format( + actual_checksum=actual_checksum, + path=path, + should_checksum=should_checksum, + )) + exit(1) + else: + io.stdout(_("{x} Plugin '{plugin}' has no local modifications.").format( + plugin=plugin, + x=green("✓"), + )) + + # generate metadata a couple of times for every node and see if + # anything changes between iterations + if args['determinism_metadata'] > 1: + hashes = {} + for i in range(args['determinism_metadata']): + repo = Repository(repo.path) + if args['target']: + nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + else: + nodes = repo.nodes + for node in nodes: + with io.job(_(" {node} generating metadata ({i}/{n})... ").format( + i=i + 1, + n=args['determinism_metadata'], + node=node.name, + )): + result = node.metadata_hash() + hashes.setdefault(node.name, result) + if hashes[node.name] != result: + io.stderr(_( + "{x} Metadata for node {node} changed when generated repeatedly " + "(use `bw hash -d {node}` to debug)" + ).format(node=node.name, x=red("✘"))) + exit(1) + io.stdout(_("{x} Metadata remained the same after being generated {n} times").format( + n=args['determinism_metadata'], + x=green("✓"), + )) + + # generate configuration a couple of times for every node and see if + # anything changes between iterations + if args['determinism_config'] > 1: + hashes = {} + for i in range(args['determinism_config']): + repo = Repository(repo.path) + if args['target']: + nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + else: + nodes = repo.nodes + for node in nodes: + with io.job(_(" {node} generating configuration ({i}/{n})...").format( + i=i + 1, + n=args['determinism_config'], + node=node.name, + )): + result = node.hash() + hashes.setdefault(node.name, result) + if hashes[node.name] != result: + io.stderr(_( + "{x} Configuration for node {node} changed when generated repeatedly " + "(use `bw hash -d {node}` to debug)" + ).format(node=node.name, x=red("✘"))) + exit(1) + io.stdout(_("{x} Configuration remained the same after being generated {n} times").format( + n=args['determinism_config'], + x=green("✓"), + )) + + if not args['target']: + repo.hooks.test(repo) diff --git a/bundlewrap/cmdline/verify.py b/bundlewrap/cmdline/verify.py new file mode 100644 index 0000000..5b70239 --- /dev/null +++ b/bundlewrap/cmdline/verify.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from sys import exit + +from ..concurrency import WorkerPool +from ..utils.cmdline import get_target_nodes +from ..utils.text import error_summary, mark_for_translation as _ +from ..utils.ui import io + + +def stats_summary(node_stats): + for node in node_stats.keys(): + node_stats[node]['total'] = node_stats[node]['good'] + node_stats[node]['bad'] + try: + node_stats[node]['health'] = \ + (node_stats[node]['good'] / float(node_stats[node]['total'])) * 100.0 + except ZeroDivisionError: + node_stats[node]['health'] = 0 + + total_items = 0 + total_good = 0 + + node_ranking = [] + + for node_name, stats in node_stats.items(): + total_items += stats['total'] + total_good += stats['good'] + node_ranking.append(( + stats['health'], + node_name, + stats['good'], + stats['total'], + )) + + node_ranking = sorted(node_ranking) + node_ranking.reverse() + + try: + overall_health = (total_good / float(total_items)) * 100.0 + except ZeroDivisionError: + overall_health = 0 + + if len(node_ranking) == 1: + io.stdout(_("node health: {health:.1f}% ({good}/{total} OK)").format( + good=node_ranking[0][2], + health=node_ranking[0][0], + total=node_ranking[0][3], + )) + else: + io.stdout(_("node health:")) + for health, node_name, good, total in node_ranking: + io.stdout(_(" {health}% {node_name} ({good}/{total} OK)").format( + good=good, + health="{:.1f}".format(health).rjust(5, " "), + node_name=node_name, + total=total, + )) + io.stdout(_("overall: {health:.1f}% ({good}/{total} OK)").format( + good=total_good, + health=overall_health, + total=total_items, + )) + + +def bw_verify(repo, args): + errors = [] + node_stats = {} + pending_nodes = get_target_nodes(repo, args['target'], adhoc_nodes=args['adhoc_nodes']) + + def tasks_available(): + return bool(pending_nodes) + + def next_task(): + node = pending_nodes.pop() + return { + 'target': node.verify, + 'task_id': node.name, + 'kwargs': { + 'show_all': args['show_all'], + 'workers': args['item_workers'], + }, + } + + def handle_result(task_id, return_value, duration): + node_stats[task_id] = return_value + + def handle_exception(task_id, exception, traceback): + msg = "{}: {}".format( + task_id, + exception, + ) + io.stderr(traceback) + io.stderr(repr(exception)) + io.stderr(msg) + errors.append(msg) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_result=handle_result, + handle_exception=handle_exception, + pool_id="verify", + workers=args['node_workers'], + ) + worker_pool.run() + + if args['summary']: + stats_summary(node_stats) + + error_summary(errors) + + exit(1 if errors else 0) diff --git a/bundlewrap/cmdline/zen.py b/bundlewrap/cmdline/zen.py new file mode 100644 index 0000000..c13dcb1 --- /dev/null +++ b/bundlewrap/cmdline/zen.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from ..utils.text import mark_for_translation as _ +from ..utils.ui import io + +ZEN = _(""" + , + @@ + @@@@ + @@@@@ + @@@@@ + @@@@@ + @@@@@ + @@@@@ + @@@@@ '@@@@@@, .@@@@@@+ +@@@@@@. + @@@@@@, `@@@@@@@ +@@@@@@, `@@@@@@# + @@@@@@@@+ :@@@@@@' `@@@@@@@ ;@@@@@@: + @@@@@@@@@@@` #@@@@@@. :@@@@@@' @@@@@@@` + @@@@@ ;@@@@@@; .@@@@@@# #@@@@@@` ,@@@@@@+ + @@@@@ `@@@@@@#'@@@@@@: .@@@@@@+ +@@@@@@. + @@@@@ +@@@@@@@@@ +@@@@@@, `@@@@@@# + @@@@@ ,@@@@@@+ `@@@@@@@@@` ;@@@@@@: + @@@@@ @@@@@@@` :@@@@@@'@@@@@@' @@@@@@@` + @@@@@ ;@@@@@@#@@@@@@` `@@@@@@@@@@@@@+ + @@@@@@@@@@@@@@@@@@@@@@@@@@@@@# +@@@@@@@@. + @@@@@@@@@@@@@@@@@@@@@@@@@@@, .@@@# + + + The Zen of BundleWrap + ───────────────────── + + BundleWrap is a tool, not a solution. + BundleWrap will not write your configuration for you. + BundleWrap is Python all the way down. + BundleWrap will adapt rather than grow. + BundleWrap is the single point of truth. +""") + +def bw_zen(repo, args): + io.stdout(ZEN) diff --git a/bundlewrap/concurrency.py b/bundlewrap/concurrency.py new file mode 100644 index 0000000..a1db286 --- /dev/null +++ b/bundlewrap/concurrency.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED +from datetime import datetime +from random import randint +from sys import exit +from traceback import format_tb + +from .utils.text import mark_for_translation as _ +from .utils.ui import io, QUIT_EVENT + +JOIN_TIMEOUT = 5 # seconds + + +class WorkerPool(object): + """ + Manages a bunch of worker threads. + """ + def __init__( + self, + tasks_available, + next_task, + handle_result=None, + handle_exception=None, + pool_id=None, + workers=4, + ): + if workers < 1: + raise ValueError(_("at least one worker is required")) + + self.tasks_available = tasks_available + self.next_task = next_task + self.handle_result = handle_result + self.handle_exception = handle_exception + + self.number_of_workers = workers + self.idle_workers = set(range(self.number_of_workers)) + + self.pool_id = "unnamed_pool_{}".format(randint(1, 99999)) if pool_id is None else pool_id + self.pending_futures = {} + + def _get_result(self): + """ + Blocks until a result from a worker is received. + """ + io.debug(_("worker pool {pool} waiting for next task to complete").format( + pool=self.pool_id, + )) + while True: + # we must use a timeout here to allow Python <3.3 to call + # its SIGINT handler + # see also http://stackoverflow.com/q/25676835 + completed, pending = wait( + self.pending_futures.keys(), + return_when=FIRST_COMPLETED, + timeout=0.1, + ) + if completed: + break + future = completed.pop() + + start_time = self.pending_futures[future]['start_time'] + task_id = self.pending_futures[future]['task_id'] + worker_id = self.pending_futures[future]['worker_id'] + + del self.pending_futures[future] + self.idle_workers.add(worker_id) + + exception = future.exception() + if exception: + io.debug(_( + "exception raised while executing task {task} on worker #{worker} " + "of worker pool {pool}" + ).format( + pool=self.pool_id, + task=task_id, + worker=worker_id, + )) + if not hasattr(exception, '__traceback__'): # Python 2 + exception.__traceback__ = future.exception_info()[1] + exception.__task_id = task_id + raise exception + else: + io.debug(_( + "worker pool {pool} delivering result of {task} on worker #{worker}" + ).format( + pool=self.pool_id, + task=task_id, + worker=worker_id, + )) + return (task_id, future.result(), datetime.now() - start_time) + + def start_task(self, target=None, task_id=None, args=None, kwargs=None): + """ + target any callable (includes bound methods) + task_id something to remember this worker by + args list of positional arguments passed to target + kwargs dictionary of keyword arguments passed to target + """ + if args is None: + args = [] + else: + args = list(args) + if kwargs is None: + kwargs = {} + + task_id = "unnamed_task_{}".format(randint(1, 99999)) if task_id is None else task_id + worker_id = self.idle_workers.pop() + + io.debug(_("worker pool {pool} is starting task {task} on worker #{worker}").format( + pool=self.pool_id, + task=task_id, + worker=worker_id, + )) + self.pending_futures[self.executor.submit(target, *args, **kwargs)] = { + 'start_time': datetime.now(), + 'task_id': task_id, + 'worker_id': worker_id, + } + + def run(self): + io.debug(_("spinning up worker pool {pool}").format(pool=self.pool_id)) + processed_results = [] + exit_code = None + self.executor = ThreadPoolExecutor(max_workers=self.number_of_workers) + try: + while ( + (self.tasks_available() and not QUIT_EVENT.is_set()) or + self.workers_are_running + ): + while ( + self.tasks_available() and + self.workers_are_available and + not QUIT_EVENT.is_set() + ): + task = self.next_task() + if task is not None: + self.start_task(**task) + + if self.workers_are_running: + try: + result = self._get_result() + except SystemExit as exc: + if exit_code is None: + # Don't overwrite exit code if it has already been set. + # This may be a worker exiting with 0 only because + # a previous worker raised SystemExit with 1. + # We must preserve that original exit code. + exit_code = exc.code + # just make sure QUIT_EVENT is set and continue + # waiting for pending results + QUIT_EVENT.set() + except Exception as exc: + traceback = "".join(format_tb(exc.__traceback__)) + if self.handle_exception is None: + raise exc + else: + processed_results.append( + self.handle_exception(exc.__task_id, exc, traceback) + ) + else: + if self.handle_result is not None: + processed_results.append(self.handle_result(*result)) + if QUIT_EVENT.is_set(): + # we have reaped all our workers, let's stop this thread + # before it does anything else + exit(0 if exit_code is None else exit_code) + return processed_results + finally: + io.debug(_("shutting down worker pool {pool}").format(pool=self.pool_id)) + self.executor.shutdown() + io.debug(_("worker pool {pool} has been shut down").format(pool=self.pool_id)) + + @property + def workers_are_available(self): + return bool(self.idle_workers) + + @property + def workers_are_running(self): + return bool(self.pending_futures) diff --git a/bundlewrap/deps.py b/bundlewrap/deps.py new file mode 100644 index 0000000..b065bc2 --- /dev/null +++ b/bundlewrap/deps.py @@ -0,0 +1,597 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from .exceptions import BundleError, NoSuchItem +from .items import Item +from .items.actions import Action +from .utils.text import mark_for_translation as _ +from .utils.ui import io + + +class DummyItem(object): + bundle = None + triggered = False + + def __init__(self, *args, **kwargs): + self.needed_by = [] + self.needs = [] + self.preceded_by = [] + self.precedes = [] + self.tags = [] + self.triggered_by = [] + self.triggers = [] + self._deps = [] + self._precedes_items = [] + + def __lt__(self, other): + return self.id < other.id + + def _precedes_incorrect_item(self): + return False + + def apply(self, *args, **kwargs): + return (Item.STATUS_OK, []) + + def test(self): + pass + + +class BundleItem(DummyItem): + """ + Represents a dependency on all items in a certain bundle. + """ + ITEM_TYPE_NAME = 'bundle' + + def __init__(self, bundle): + self.bundle = bundle + super(BundleItem, self).__init__() + + def __repr__(self): + return "".format(self.bundle.name) + + @property + def id(self): + return "bundle:{}".format(self.bundle.name) + + +class TagItem(DummyItem): + """ + This item depends on all items with the given tag. + """ + ITEM_TYPE_NAME = 'tag' + + def __init__(self, tag_name): + self.tag_name = tag_name + super(TagItem, self).__init__() + + def __repr__(self): + return "".format(self.tag_name) + + @property + def id(self): + return "tag:{}".format(self.tag_name) + + +class TypeItem(DummyItem): + """ + Represents a dependency on all items of a certain type. + """ + ITEM_TYPE_NAME = 'type' + + def __init__(self, item_type): + self.item_type = item_type + super(TypeItem, self).__init__() + + def __repr__(self): + return "".format(self.item_type) + + @property + def id(self): + return "{}:".format(self.item_type) + + +def find_item(item_id, items): + """ + Returns the first item with the given ID within the given list of + items. + """ + try: + item = list(filter(lambda item: item.id == item_id, items))[0] + except IndexError: + raise NoSuchItem(_("item not found: {}").format(item_id)) + return item + + +def _find_items_of_types(item_types, items, include_dummy=False): + """ + Returns a subset of items with any of the given types. + """ + return list(filter( + lambda item: + item.id.split(":", 1)[0] in item_types and ( + include_dummy or not isinstance(item, DummyItem) + ), + items, + )) + + +def _flatten_dependencies(items): + """ + This will cause all dependencies - direct AND inherited - to be + listed in item._flattened_deps. + """ + for item in items: + item._flattened_deps = list(set( + item._deps + _get_deps_for_item(item, items) + )) + return items + + +def _get_deps_for_item(item, items, deps_found=None): + """ + Recursively retrieves and returns a list of all inherited + dependencies of the given item. + + Note: This can handle loops, but won't detect them. + """ + if deps_found is None: + deps_found = [] + deps = [] + for dep in item._deps: + if dep not in deps_found: + deps.append(dep) + deps_found.append(dep) + deps += _get_deps_for_item( + find_item(dep, items), + items, + deps_found, + ) + return deps + + +def _has_trigger_path(items, item, target_item_id): + """ + Returns True if the given item directly or indirectly (trough + other items) triggers the item with the given target item id. + """ + if target_item_id in item.triggers: + return True + for triggered_id in item.triggers: + try: + triggered_item = find_item(triggered_id, items) + except NoSuchItem: + # the triggered item may already have been skipped by + # `bw apply -s` + continue + if _has_trigger_path(items, triggered_item, target_item_id): + return True + return False + + +def _inject_bundle_items(items): + """ + Adds virtual items that depend on every item in a bundle. + """ + bundle_items = {} + for item in items: + if item.bundle is None: + continue + if item.bundle.name not in bundle_items: + bundle_items[item.bundle.name] = BundleItem(item.bundle) + bundle_items[item.bundle.name]._deps.append(item.id) + return list(bundle_items.values()) + items + + +def _inject_canned_actions(items): + """ + Looks for canned actions like "svc_upstart:mysql:reload" in item + triggers and adds them to the list of items. + """ + added_actions = {} + for item in items: + for triggered_item_id in item.triggers: + if triggered_item_id in added_actions: + # action has already been triggered + continue + + try: + type_name, item_name, action_name = triggered_item_id.split(":") + except ValueError: + # not a canned action + continue + + target_item_id = "{}:{}".format(type_name, item_name) + + try: + target_item = find_item(target_item_id, items) + except NoSuchItem: + raise BundleError(_( + "{item} in bundle '{bundle}' triggers unknown item '{target_item}'" + ).format( + bundle=item.bundle.name, + item=item.id, + target_item=target_item_id, + )) + + try: + action_attrs = target_item.get_canned_actions()[action_name] + except KeyError: + raise BundleError(_( + "{item} in bundle '{bundle}' triggers unknown " + "canned action '{action}' on {target_item}" + ).format( + action=action_name, + bundle=item.bundle.name, + item=item.id, + target_item=target_item_id, + )) + + action_attrs.update({'triggered': True}) + action = Action( + item.bundle, + triggered_item_id, + action_attrs, + skip_name_validation=True, + ) + action._prepare_deps(items) + added_actions[triggered_item_id] = action + + return items + list(added_actions.values()) + + +def _inject_concurrency_blockers(items): + """ + Looks for items with BLOCK_CONCURRENT set to True and inserts + dependencies to force a sequential apply. + """ + # find every item type that cannot be applied in parallel + item_types = set() + for item in items: + item._concurrency_deps = [] + if ( + not isinstance(item, DummyItem) and + item.BLOCK_CONCURRENT + ): + item_types.add(item.__class__) + + # daisy-chain all items of the blocking type and all items of the + # blocked types while respecting existing dependencies between them + for item_type in item_types: + blocked_types = item_type.BLOCK_CONCURRENT + [item_type.ITEM_TYPE_NAME] + type_items = _find_items_of_types( + blocked_types, + items, + ) + processed_items = [] + for item in type_items: + # disregard deps to items of other types + item.__deps = list(filter( + lambda dep: dep.split(":", 1)[0] in blocked_types, + item._flattened_deps, + )) + previous_item = None + while len(processed_items) < len(type_items): + # find the first item without same-type deps we haven't + # processed yet + try: + item = list(filter( + lambda item: not item.__deps and item not in processed_items, + type_items, + ))[0] + except IndexError: + # this can happen if the flattened deps of all items of + # this type already contain a dependency on another + # item of this type + break + if previous_item is not None: # unless we're at the first item + # add dep to previous item -- unless it's already in there + if previous_item.id not in item._deps: + item._deps.append(previous_item.id) + item._concurrency_deps.append(previous_item.id) + item._flattened_deps.append(previous_item.id) + previous_item = item + processed_items.append(item) + for other_item in type_items: + try: + other_item.__deps.remove(item.id) + except ValueError: + pass + return items + + +def _inject_tag_items(items): + """ + Takes a list of items and adds tag items depending on each type of + item in the list. Returns the appended list. + """ + tag_items = {} + items = list(items) + for item in items: + for tag in item.tags: + if tag not in tag_items: + tag_items[tag] = TagItem(tag) + tag_items[tag]._deps.append(item.id) + + return list(tag_items.values()) + items + + +def _inject_type_items(items): + """ + Takes a list of items and adds dummy items depending on each type of + item in the list. Returns the appended list. + """ + # first, find all types of items and add dummy deps + type_items = {} + items = list(items) + for item in items: + # create dummy items that depend on each item of their type + item_type = item.id.split(":")[0] + if item_type not in type_items: + type_items[item_type] = TypeItem(item_type) + type_items[item_type]._deps.append(item.id) + + # create DummyItem for every type + for dep in item._deps: + item_type = dep.split(":")[0] + if item_type not in type_items: + type_items[item_type] = TypeItem(item_type) + return list(type_items.values()) + items + + +def _inject_reverse_dependencies(items): + """ + Looks for 'needed_by' deps and creates standard dependencies + accordingly. + """ + def add_dep(item, dep): + if dep not in item._deps: + item._deps.append(dep) + item._reverse_deps.append(dep) + + for item in items: + item._reverse_deps = [] + + for item in items: + for depending_item_id in item.needed_by: + # bundle items + if depending_item_id.startswith("bundle:"): + depending_bundle_name = depending_item_id.split(":")[1] + for depending_item in items: + if depending_item.bundle.name == depending_bundle_name: + add_dep(depending_item, item.id) + + # tag items + if depending_item_id.startswith("tag:"): + tag_name = depending_item_id.split(":")[1] + for depending_item in items: + if tag_name in depending_item.tags: + add_dep(depending_item, item.id) + + # type items + if depending_item_id.endswith(":"): + target_type = depending_item_id[:-1] + for depending_item in _find_items_of_types([target_type], items): + add_dep(depending_item, item.id) + + # single items + else: + depending_item = find_item(depending_item_id, items) + add_dep(depending_item, item.id) + return items + + +def _inject_reverse_triggers(items): + """ + Looks for 'triggered_by' and 'precedes' attributes and turns them + into standard triggers (defined on the opposing end). + """ + for item in items: + for triggering_item_id in item.triggered_by: + triggering_item = find_item(triggering_item_id, items) + if triggering_item.id.startswith("bundle:"): # bundle items + bundle_name = triggering_item.id.split(":")[1] + for actual_triggering_item in items: + if triggering_item.bundle.name == bundle_name: + actual_triggering_item.triggers.append(item.id) + elif triggering_item.id.startswith("tag:"): # tag items + tag_name = triggering_item.id.split(":")[1] + for actual_triggering_item in items: + if tag_name in triggering_item.tags: + actual_triggering_item.triggers.append(item.id) + elif triggering_item.id.endswith(":"): # type items + target_type = triggering_item.id[:-1] + for actual_triggering_item in _find_items_of_types([target_type], items): + actual_triggering_item.triggers.append(item.id) + else: + triggering_item.triggers.append(item.id) + for preceded_item_id in item.precedes: + preceded_item = find_item(preceded_item_id, items) + if preceded_item.id.startswith("bundle:"): # bundle items + bundle_name = preceded_item.id.split(":")[1] + for actual_preceded_item in items: + if actual_preceded_item.bundle.name == bundle_name: + actual_preceded_item.preceded_by.append(item.id) + elif preceded_item.id.startswith("tag:"): # tag items + tag_name = preceded_item.id.split(":")[1] + for actual_preceded_item in items: + if tag_name in actual_preceded_item.tags: + actual_preceded_item.preceded_by.append(item.id) + elif preceded_item.id.endswith(":"): # type items + target_type = preceded_item.id[:-1] + for actual_preceded_item in _find_items_of_types([target_type], items): + actual_preceded_item.preceded_by.append(item.id) + else: + preceded_item.preceded_by.append(item.id) + return items + + +def _inject_trigger_dependencies(items): + """ + Injects dependencies from all triggered items to their triggering + items. + """ + for item in items: + for triggered_item_id in item.triggers: + try: + triggered_item = find_item(triggered_item_id, items) + except NoSuchItem: + raise BundleError(_( + "unable to find definition of '{item1}' triggered " + "by '{item2}' in bundle '{bundle}'" + ).format( + bundle=item.bundle.name, + item1=triggered_item_id, + item2=item.id, + )) + if not triggered_item.triggered: + raise BundleError(_( + "'{item1}' in bundle '{bundle1}' triggered " + "by '{item2}' in bundle '{bundle2}', " + "but missing 'triggered' attribute" + ).format( + item1=triggered_item.id, + bundle1=triggered_item.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + triggered_item._deps.append(item.id) + return items + + +def _inject_preceded_by_dependencies(items): + """ + Injects dependencies from all triggering items to their + preceded_by items and attaches triggering items to preceding items. + """ + for item in items: + if item.preceded_by and item.triggered: + raise BundleError(_( + "triggered item '{item}' in bundle '{bundle}' must not use " + "'preceded_by' (use chained triggers instead)".format( + bundle=item.bundle.name, + item=item.id, + ), + )) + for triggered_item_id in item.preceded_by: + try: + triggered_item = find_item(triggered_item_id, items) + except NoSuchItem: + raise BundleError(_( + "unable to find definition of '{item1}' preceding " + "'{item2}' in bundle '{bundle}'" + ).format( + bundle=item.bundle.name, + item1=triggered_item_id, + item2=item.id, + )) + if not triggered_item.triggered: + raise BundleError(_( + "'{item1}' in bundle '{bundle1}' precedes " + "'{item2}' in bundle '{bundle2}', " + "but missing 'triggered' attribute" + ).format( + item1=triggered_item.id, + bundle1=triggered_item.bundle.name, + item2=item.id, + bundle2=item.bundle.name if item.bundle else "N/A", + )) + triggered_item._precedes_items.append(item) + item._deps.append(triggered_item.id) + return items + + +def prepare_dependencies(items): + """ + Performs all dependency preprocessing on a list of items. + """ + items = list(items) + + for item in items: + item._check_bundle_collisions(items) + item._prepare_deps(items) + + items = _inject_bundle_items(items) + items = _inject_tag_items(items) + items = _inject_type_items(items) + items = _inject_canned_actions(items) + items = _inject_reverse_triggers(items) + items = _inject_reverse_dependencies(items) + items = _inject_trigger_dependencies(items) + items = _inject_preceded_by_dependencies(items) + items = _flatten_dependencies(items) + items = _inject_concurrency_blockers(items) + + for item in items: + if not isinstance(item, DummyItem): + item._check_redundant_dependencies() + + return items + + +def remove_dep_from_items(items, dep): + """ + Removes the given item id (dep) from the temporary list of + dependencies of all items in the given list. + """ + for item in items: + try: + item._deps.remove(dep) + except ValueError: + pass + return items + + +def remove_item_dependents(items, dep_item, skipped=False): + """ + Removes the items depending on the given item from the list of items. + """ + removed_items = [] + for item in items: + if dep_item.id in item._deps: + if _has_trigger_path(items, dep_item, item.id): + # triggered items cannot be removed here since they + # may yet be triggered by another item and will be + # skipped anyway if they aren't + item._deps.remove(dep_item.id) + elif skipped and isinstance(item, DummyItem) and \ + dep_item.triggered and not dep_item.has_been_triggered: + # don't skip dummy items because of untriggered members + # see issue #151; separate elif for clarity + item._deps.remove(dep_item.id) + else: + removed_items.append(item) + + for item in removed_items: + items.remove(item) + + if removed_items: + io.debug( + "skipped these items because they depend on {item}, which was " + "skipped previously: {skipped}".format( + item=dep_item.id, + skipped=", ".join([item.id for item in removed_items]), + ) + ) + + all_recursively_removed_items = [] + for removed_item in removed_items: + items, recursively_removed_items = \ + remove_item_dependents(items, removed_item, skipped=skipped) + all_recursively_removed_items += recursively_removed_items + + return (items, removed_items + all_recursively_removed_items) + + +def split_items_without_deps(items): + """ + Takes a list of items and extracts the ones that don't have any + dependencies. The extracted deps are returned as a list. + """ + items = list(items) # make sure we're not returning a generator + removed_items = [] + for item in items: + if not item._deps: + removed_items.append(item) + for item in removed_items: + items.remove(item) + return (items, removed_items) diff --git a/bundlewrap/exceptions.py b/bundlewrap/exceptions.py new file mode 100644 index 0000000..0a86e4d --- /dev/null +++ b/bundlewrap/exceptions.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from sys import version_info + + +class UnicodeException(Exception): + def __init__(self, msg=""): + if version_info >= (3, 0): + super(UnicodeException, self).__init__(msg) + else: + super(UnicodeException, self).__init__(msg.encode('utf-8')) + + +class ActionFailure(UnicodeException): + """ + Raised when an action failes to meet the expected rcode/output. + """ + pass + + +class DontCache(Exception): + """ + Used in the cached_property decorator to temporily prevent caching + the returned result + """ + def __init__(self, obj): + self.obj = obj + + +class FaultUnavailable(UnicodeException): + """ + Raised when a Fault object cannot be resolved. + """ + pass + + +class NoSuchBundle(UnicodeException): + """ + Raised when a bundle of unknown name is requested. + """ + pass + + +class NoSuchGroup(UnicodeException): + """ + Raised when a group of unknown name is requested. + """ + pass + + +class NoSuchItem(UnicodeException): + """ + Raised when an item of unknown name is requested. + """ + pass + + +class NoSuchNode(UnicodeException): + """ + Raised when a node of unknown name is requested. + """ + pass + + +class NoSuchPlugin(UnicodeException): + """ + Raised when a plugin of unknown name is requested. + """ + pass + + +class RemoteException(UnicodeException): + """ + Raised when a shell command on a node fails. + """ + pass + + +class RepositoryError(UnicodeException): + """ + Indicates that somethings is wrong with the current repository. + """ + pass + + +class BundleError(RepositoryError): + """ + Indicates an error in a bundle. + """ + pass + + +class ItemDependencyError(RepositoryError): + """ + Indicates a problem with item dependencies (e.g. loops). + """ + pass + + +class NoSuchRepository(RepositoryError): + """ + Raised when trying to get a Repository object from a directory that + is not in fact a repository. + """ + pass + + +class MissingRepoDependency(RepositoryError): + """ + Raised when a dependency from requirements.txt is missing. + """ + pass + + +class PluginError(RepositoryError): + """ + Indicates an error related to a plugin. + """ + pass + + +class PluginLocalConflict(PluginError): + """ + Raised when a plugin tries to overwrite locally-modified files. + """ + pass + + +class TemplateError(RepositoryError): + """ + Raised when an error occurs while rendering a template. + """ + pass + + +class UsageException(UnicodeException): + """ + Raised when command line options don't make sense. + """ + pass + + +class NodeLockedException(Exception): + """ + Raised when a node is already locked during an 'apply' run. + """ + pass diff --git a/bundlewrap/group.py b/bundlewrap/group.py new file mode 100644 index 0000000..b19c57d --- /dev/null +++ b/bundlewrap/group.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import re + +from .exceptions import NoSuchGroup, NoSuchNode, RepositoryError +from .utils import cached_property, names +from .utils.statedict import hash_statedict +from .utils.text import mark_for_translation as _, validate_name + + +GROUP_ATTR_DEFAULTS = { + 'cmd_wrapper_inner': "export LANG=C; {}", + 'cmd_wrapper_outer': "sudo sh -c {}", + 'dummy': False, + 'os': 'linux', + # Setting os_version to 0 by default will probably yield less + # surprises than setting it to max_int. Users will probably + # start at a certain version and then gradually update their + # systems, adding conditions like this: + # + # if node.os_version >= (2,): + # new_behavior() + # else: + # old_behavior() + # + # If we set os_version to max_int, nodes without an explicit + # os_version would automatically adopt the new_behavior() as + # soon as it appears in the repo - which is probably not what + # people want. + 'os_version': (0,), + 'use_shadow_passwords': True, +} + + +def _build_error_chain(loop_node, last_node, nodes_in_between): + """ + Used to illustrate subgroup loop paths in error messages. + + loop_node: name of node that loops back to itself + last_node: name of last node pointing back to loop_node, + causing the loop + nodes_in_between: names of nodes traversed during loop detection, + does include loop_node if not a direct loop, + but not last_node + """ + error_chain = [] + for visited in nodes_in_between: + if (loop_node in error_chain) != (loop_node == visited): + error_chain.append(visited) + error_chain.append(last_node) + error_chain.append(loop_node) + return error_chain + + +class Group(object): + """ + A group of nodes. + """ + def __init__(self, group_name, infodict=None): + if infodict is None: + infodict = {} + + if not validate_name(group_name): + raise RepositoryError(_("'{}' is not a valid group name.").format(group_name)) + + self.name = group_name + self.bundle_names = infodict.get('bundles', []) + self.immediate_subgroup_names = infodict.get('subgroups', []) + self.immediate_subgroup_patterns = infodict.get('subgroup_patterns', []) + self.members_add = infodict.get('members_add', None) + self.members_remove = infodict.get('members_remove', None) + self.metadata = infodict.get('metadata', {}) + self.node_patterns = infodict.get('member_patterns', []) + self.static_member_names = infodict.get('members', []) + + for attr in GROUP_ATTR_DEFAULTS: + # defaults are applied in node.py + setattr(self, attr, infodict.get(attr)) + + def __lt__(self, other): + return self.name < other.name + + def __repr__(self): + return "".format(self.name) + + def __str__(self): + return self.name + + @cached_property + def cdict(self): + group_dict = {} + for node in self.nodes: + group_dict[node.name] = node.hash() + return group_dict + + def group_membership_hash(self): + return hash_statedict(sorted(names(self.nodes))) + + def hash(self): + return hash_statedict(self.cdict) + + def metadata_hash(self): + group_dict = {} + for node in self.nodes: + group_dict[node.name] = node.metadata_hash() + return hash_statedict(group_dict) + + @cached_property + def nodes(self): + for node in self.repo.nodes: + if node.in_group(self.name): + yield node + + @cached_property + def _static_nodes(self): + result = set() + result.update(self._nodes_from_members) + result.update(self._nodes_from_patterns) + return result + + @property + def _nodes_from_members(self): + for node_name in self.static_member_names: + try: + yield self.repo.get_node(node_name) + except NoSuchNode: + raise RepositoryError(_( + "Group '{group}' has '{node}' listed as a member in groups.py, " + "but no such node could be found." + ).format( + group=self.name, + node=node_name, + )) + + @property + def _nodes_from_patterns(self): + for pattern in self.node_patterns: + compiled_pattern = re.compile(pattern) + for node in self.repo.nodes: + if not compiled_pattern.search(node.name) is None: + yield node + + def _check_subgroup_names(self, visited_names): + """ + Recursively finds subgroups and checks for loops. + """ + names_from_patterns = [] + for pattern in self.immediate_subgroup_patterns: + compiled_pattern = re.compile(pattern) + for group in self.repo.groups: + if compiled_pattern.search(group.name) is not None and group != self: + names_from_patterns.append(group.name) + + for name in list(self.immediate_subgroup_names) + names_from_patterns: + if name not in visited_names: + try: + group = self.repo.get_group(name) + except NoSuchGroup: + raise RepositoryError(_( + "Group '{group}' has '{subgroup}' listed as a subgroup in groups.py, " + "but no such group could be found." + ).format( + group=self.name, + subgroup=name, + )) + for group_name in group._check_subgroup_names( + visited_names + [self.name], + ): + yield group_name + else: + error_chain = _build_error_chain( + name, + self.name, + visited_names, + ) + raise RepositoryError(_( + "Group '{group}' can't be a subgroup of itself. " + "({chain})" + ).format( + group=name, + chain=" -> ".join(error_chain), + )) + if self.name not in visited_names: + yield self.name + + @cached_property + def parent_groups(self): + for group in self.repo.groups: + if self in group.subgroups: + yield group + + @cached_property + def subgroups(self): + """ + Iterator over all subgroups as group objects. + """ + for group_name in self._check_subgroup_names([self.name]): + yield self.repo.get_group(group_name) diff --git a/bundlewrap/itemqueue.py b/bundlewrap/itemqueue.py new file mode 100644 index 0000000..f5f6fec --- /dev/null +++ b/bundlewrap/itemqueue.py @@ -0,0 +1,153 @@ +from .deps import ( + DummyItem, + find_item, + prepare_dependencies, + remove_item_dependents, + remove_dep_from_items, + split_items_without_deps, +) +from .exceptions import NoSuchItem +from .utils.text import mark_for_translation as _ +from .utils.ui import io + + +class BaseQueue(object): + def __init__(self, items): + self.items_with_deps = prepare_dependencies(items) + self.items_without_deps = [] + self._split() + self.pending_items = [] + + def _split(self): + self.items_with_deps, self.items_without_deps = \ + split_items_without_deps(self.all_items) + + @property + def all_items(self): + return self.items_with_deps + self.items_without_deps + + +class ItemQueue(BaseQueue): + def item_failed(self, item): + """ + Called when an item could not be fixed. Yields all items that + have been skipped as a result by cascading. + """ + for skipped_item in self.item_skipped(item, _skipped=False): + yield skipped_item + + def item_fixed(self, item): + """ + Called when an item has successfully been fixed. + """ + self.item_ok(item) + self._fire_triggers_for_item(item) + + def item_ok(self, item): + """ + Called when an item didn't need to be fixed. + """ + self.pending_items.remove(item) + # if an item is applied successfully, all dependencies on it can + # be removed from the remaining items + self.items_with_deps = remove_dep_from_items( + self.items_with_deps, + item.id, + ) + self._split() + + def item_skipped(self, item, _skipped=True): + """ + Called when an item has been skipped. Yields all items that have + been skipped as a result by cascading. + """ + self.pending_items.remove(item) + if item.cascade_skip: + # if an item fails or is skipped, all items that depend on + # it shall be removed from the queue + self.items_with_deps, skipped_items = remove_item_dependents( + self.items_with_deps, + item, + skipped=_skipped, + ) + # since we removed them from further processing, we + # fake the status of the removed items so they still + # show up in the result statistics + for skipped_item in skipped_items: + if not isinstance(skipped_item, DummyItem): + yield skipped_item + else: + self.items_with_deps = remove_dep_from_items( + self.items_with_deps, + item.id, + ) + self._split() + + def pop(self, interactive=False): + """ + Gets the next item available for processing and moves it into + self.pending_items. Will raise IndexError if no item is + available. Otherwise, it will return the item and a list of + items that have been skipped while looking for the item. + """ + skipped_items = [] + + if not self.items_without_deps: + raise IndexError + + while self.items_without_deps: + item = self.items_without_deps.pop() + + if item._precedes_items: + if item._precedes_incorrect_item(interactive=interactive): + item.has_been_triggered = True + else: + # we do not have to cascade here at all because + # all chained preceding items will be skipped by + # this same mechanism + io.debug( + _("skipping {node}:{bundle}:{item} because its precede trigger " + "did not fire").format( + bundle=item.bundle.name, + item=item.id, + node=item.node.name, + ), + ) + self.items_with_deps = remove_dep_from_items(self.items_with_deps, item.id) + self._split() + skipped_items.append(item) + item = None + continue + break + assert item is not None + self.pending_items.append(item) + return (item, skipped_items) + + def _fire_triggers_for_item(self, item): + for triggered_item_id in item.triggers: + try: + triggered_item = find_item( + triggered_item_id, + self.all_items, + ) + triggered_item.has_been_triggered = True + except NoSuchItem: + io.debug(_( + "{item} tried to trigger {triggered_item}, " + "but it wasn't available. It must have been skipped previously." + ).format( + item=item.id, + triggered_item=triggered_item_id, + )) + + +class ItemTestQueue(BaseQueue): + """ + A simpler variation of ItemQueue that is used by `bw test` to check + for circular dependencies. + """ + def pop(self): + item = self.items_without_deps.pop() + self.items_with_deps = remove_dep_from_items(self.items_with_deps, item.id) + self._split() + return item diff --git a/bundlewrap/items/__init__.py b/bundlewrap/items/__init__.py new file mode 100644 index 0000000..b67a54d --- /dev/null +++ b/bundlewrap/items/__init__.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +""" +Note that modules in this package have to use absolute imports because +Repository.item_classes loads them as files. +""" +from __future__ import unicode_literals +from copy import copy +from datetime import datetime +from os.path import join + +from bundlewrap.exceptions import BundleError, FaultUnavailable +from bundlewrap.utils import cached_property +from bundlewrap.utils.statedict import diff_keys, diff_value, hash_statedict, validate_statedict +from bundlewrap.utils.text import force_text, mark_for_translation as _ +from bundlewrap.utils.text import blue, bold, wrap_question +from bundlewrap.utils.ui import io + +BUILTIN_ITEM_ATTRIBUTES = { + 'cascade_skip': None, + 'needed_by': [], + 'needs': [], + 'preceded_by': [], + 'precedes': [], + 'error_on_missing_fault': False, + 'tags': [], + 'triggered': False, + 'triggered_by': [], + 'triggers': [], + 'unless': "", +} + + +class ItemStatus(object): + """ + Holds information on a particular Item such as whether it needs + fixing and what's broken. + """ + + def __init__(self, cdict, sdict): + self.cdict = cdict + self.sdict = sdict + self.keys_to_fix = [] + self.must_be_deleted = (self.sdict is not None and self.cdict is None) + self.must_be_created = (self.cdict is not None and self.sdict is None) + if not self.must_be_deleted and not self.must_be_created: + self.keys_to_fix = diff_keys(cdict, sdict) + + def __repr__(self): + return "".format(self.correct) + + @property + def correct(self): + return not self.must_be_deleted and not self.must_be_created and not bool(self.keys_to_fix) + + +class Item(object): + """ + A single piece of configuration (e.g. a file, a package, a service). + """ + BINARY_ATTRIBUTES = [] + BLOCK_CONCURRENT = [] + BUNDLE_ATTRIBUTE_NAME = None + ITEM_ATTRIBUTES = {} + ITEM_TYPE_NAME = None + REQUIRED_ATTRIBUTES = [] + STATUS_OK = 1 + STATUS_FIXED = 2 + STATUS_FAILED = 3 + STATUS_SKIPPED = 4 + STATUS_ACTION_SUCCEEDED = 5 + + def __init__( + self, + bundle, + name, + attributes, + has_been_triggered=False, + skip_validation=False, + skip_name_validation=False, + ): + self.attributes = {} + self.bundle = bundle + self.has_been_triggered = has_been_triggered + self.item_dir = join(bundle.bundle_dir, self.BUNDLE_ATTRIBUTE_NAME) + self.item_data_dir = join(bundle.bundle_data_dir, self.BUNDLE_ATTRIBUTE_NAME) + self.name = name + self.node = bundle.node + self._faults_missing_for_attributes = set() + self._precedes_items = [] + + if not skip_validation: + if not skip_name_validation: + self._validate_name(bundle, name) + self.validate_name(bundle, name) + self._validate_attribute_names(bundle, self.id, attributes) + self._validate_required_attributes(bundle, self.id, attributes) + self.validate_attributes(bundle, self.id, attributes) + + try: + attributes = self.patch_attributes(attributes) + except FaultUnavailable: + self._faults_missing_for_attributes.add(_("unknown")) + + for attribute_name, attribute_default in \ + BUILTIN_ITEM_ATTRIBUTES.items(): + setattr(self, attribute_name, force_text(attributes.get( + attribute_name, + copy(attribute_default), + ))) + + for attribute_name, attribute_default in \ + self.ITEM_ATTRIBUTES.items(): + if attribute_name not in BUILTIN_ITEM_ATTRIBUTES: + try: + self.attributes[attribute_name] = force_text(attributes.get( + attribute_name, + attribute_default, + )) + except FaultUnavailable: + self._faults_missing_for_attributes.add(attribute_name) + + if self.cascade_skip is None: + self.cascade_skip = not (self.unless or self.triggered) + + if self.id in self.triggers: + raise BundleError(_( + "item {item} in bundle '{bundle}' can't trigger itself" + ).format( + bundle=self.bundle.name, + item=self.id, + )) + + def __lt__(self, other): + return self.id < other.id + + def __str__(self): + return self.id + + def __repr__(self): + return "".format(self.id) + + def _check_bundle_collisions(self, items): + for item in items: + if item == self: + continue + if item.id == self.id: + raise BundleError(_( + "duplicate definition of {item} in bundles '{bundle1}' and '{bundle2}'" + ).format( + item=item.id, + bundle1=item.bundle.name, + bundle2=self.bundle.name, + )) + + def _check_redundant_dependencies(self): + """ + Alerts the user if they have defined a redundant dependency + (such as settings 'needs' on a triggered item pointing to the + triggering item). + """ + for dep in self._deps: + if self._deps.count(dep) > 1: + raise BundleError(_( + "redundant dependency of {item1} in bundle '{bundle}' on {item2}" + ).format( + bundle=self.bundle.name, + item1=self.id, + item2=dep, + )) + + @cached_property + def cached_cdict(self): + if self._faults_missing_for_attributes: + self._raise_for_faults() + + cdict = self.cdict() + try: + validate_statedict(cdict) + except ValueError as e: + raise ValueError(_( + "{item} from bundle '{bundle}' returned invalid cdict: {msg}" + ).format( + bundle=self.bundle.name, + item=self.id, + msg=repr(e), + )) + return cdict + + @cached_property + def cached_sdict(self): + status = self.sdict() + try: + validate_statedict(status) + except ValueError as e: + raise ValueError(_( + "{item} from bundle '{bundle}' returned invalid status: {msg}" + ).format( + bundle=self.bundle.name, + item=self.id, + msg=repr(e), + )) + return status + + @cached_property + def cached_status(self): + return self.get_status() + + @cached_property + def cached_unless_result(self): + if self.unless and not self.cached_status.correct: + unless_result = self.node.run(self.unless, may_fail=True) + return unless_result.return_code == 0 + else: + return False + + def _precedes_incorrect_item(self, interactive=False): + """ + Returns True if this item precedes another and the triggering + item is in need of fixing. + """ + for item in self._precedes_items: + if item._precedes_incorrect_item(): + return True + if self.cached_unless_result: + # triggering item failed unless, so there is nothing to do + return False + if self.ITEM_TYPE_NAME == 'action': + if self.attributes['interactive'] != interactive or \ + self.attributes['interactive'] is None: + return False + else: + return True + return not self.cached_status.correct + + def _prepare_deps(self, items): + # merge automatic and user-defined deps + self._deps = list(self.needs) + list(self.get_auto_deps(items)) + + def _raise_for_faults(self): + raise FaultUnavailable(_( + "{item} on {node} is missing faults " + "for these attributes: {attrs} " + "(most of the time this means you're missing " + "a required key in your .secrets.cfg)" + ).format( + attrs=", ".join(sorted(self._faults_missing_for_attributes)), + item=self.id, + node=self.node.name, + )) + + def _skip_with_soft_locks(self, mine, others): + """ + Returns True/False depending on whether the item should be + skipped based on the given set of locks. + """ + for lock in mine: + for selector in lock['items']: + if self.covered_by_autoskip_selector(selector): + io.debug(_("{item} on {node} whitelisted by lock {lock}").format( + item=self.id, + lock=lock['id'], + node=self.node.name, + )) + return False + for lock in others: + for selector in lock['items']: + if self.covered_by_autoskip_selector(selector): + io.debug(_("{item} on {node} blacklisted by lock {lock}").format( + item=self.id, + lock=lock['id'], + node=self.node.name, + )) + return True + return False + + def _test(self): + if self._faults_missing_for_attributes: + self._raise_for_faults() + return self.test() + + @classmethod + def _validate_attribute_names(cls, bundle, item_id, attributes): + invalid_attributes = set(attributes.keys()).difference( + set(cls.ITEM_ATTRIBUTES.keys()).union( + set(BUILTIN_ITEM_ATTRIBUTES.keys()) + ), + ) + if invalid_attributes: + raise BundleError( + _("invalid attribute(s) for '{item}' in bundle '{bundle}': {attrs}").format( + item=item_id, + bundle=bundle.name, + attrs=", ".join(invalid_attributes), + ) + ) + + @classmethod + def _validate_name(cls, bundle, name): + if ":" in name: + raise BundleError(_( + "invalid name for {type} in bundle '{bundle}': {name} (must not contain colon)" + ).format( + bundle=bundle.name, + name=name, + type=cls.ITEM_TYPE_NAME, + )) + + def _validate_required_attributes(cls, bundle, item_id, attributes): + missing = [] + for attrname in cls.REQUIRED_ATTRIBUTES: + if attrname not in attributes: + missing.append(attrname) + if missing: + raise BundleError(_( + "{item} in bundle '{bundle}' missing required attribute(s): {attrs}" + ).format( + item=item_id, + bundle=bundle.name, + attrs=", ".join(missing), + )) + + def apply( + self, + autoskip_selector="", + my_soft_locks=(), + other_peoples_soft_locks=(), + interactive=False, + interactive_default=True, + ): + self.node.repo.hooks.item_apply_start( + self.node.repo, + self.node, + self, + ) + keys_to_fix = None + status_code = None + status_before = None + status_after = None + start_time = datetime.now() + + if self.covered_by_autoskip_selector(autoskip_selector): + io.debug(_( + "autoskip matches {item} on {node}" + ).format(item=self.id, node=self.node.name)) + status_code = self.STATUS_SKIPPED + keys_to_fix = [_("cmdline")] + + if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): + status_code = self.STATUS_SKIPPED + keys_to_fix = [_("soft locked")] + + if self.triggered and not self.has_been_triggered and status_code is None: + io.debug(_( + "skipping {item} on {node} because it wasn't triggered" + ).format(item=self.id, node=self.node.name)) + status_code = self.STATUS_SKIPPED + keys_to_fix = [_("not triggered")] + + if status_code is None and self.cached_unless_result and status_code is None: + io.debug(_( + "'unless' for {item} on {node} succeeded, not fixing" + ).format(item=self.id, node=self.node.name)) + status_code = self.STATUS_SKIPPED + keys_to_fix = ["unless"] + + if self._faults_missing_for_attributes and status_code is None: + if self.error_on_missing_fault: + self._raise_for_faults() + else: + io.debug(_( + "skipping {item} on {node} because it is missing faults " + "for these attributes: {attrs} " + "(most of the time this means you're missing " + "a required key in your .secrets.cfg)" + ).format( + attrs=", ".join(sorted(self._faults_missing_for_attributes)), + item=self.id, + node=self.node.name, + )) + status_code = self.STATUS_SKIPPED + keys_to_fix = [_("Fault unavailable")] + + if status_code is None: + try: + status_before = self.cached_status + except FaultUnavailable: + if self.error_on_missing_fault: + self._raise_for_faults() + else: + io.debug(_( + "skipping {item} on {node} because it is missing Faults " + "(most of the time this means you're missing " + "a required key in your .secrets.cfg)" + ).format( + item=self.id, + node=self.node.name, + )) + status_code = self.STATUS_SKIPPED + keys_to_fix = [_("Fault unavailable")] + else: + if status_before.correct: + status_code = self.STATUS_OK + + if status_code is None: + keys_to_fix = self.display_keys( + copy(self.cached_cdict), + copy(status_before.sdict), + status_before.keys_to_fix[:], + ) + if not interactive: + with io.job(_(" {node} {bundle} {item} fixing...").format( + bundle=self.bundle.name, + item=self.id, + node=self.node.name, + )): + self.fix(status_before) + else: + if status_before.must_be_created: + question_text = _("Doesn't exist. Will be created.") + elif status_before.must_be_deleted: + question_text = _("Found on node. Will be removed.") + else: + cdict, sdict = self.display_dicts( + copy(self.cached_cdict), + copy(status_before.sdict), + keys_to_fix, + ) + question_text = self.ask(cdict, sdict, keys_to_fix) + question = wrap_question( + self.id, + question_text, + _("Fix {}?").format(bold(self.id)), + prefix="{x} {node} ".format( + node=bold(self.node.name), + x=blue("?"), + ), + ) + answer = io.ask( + question, + interactive_default, + epilogue="{x} {node}".format( + node=bold(self.node.name), + x=blue("?"), + ), + ) + if answer: + with io.job(_(" {node} {bundle} {item} fixing...").format( + bundle=self.bundle.name, + item=self.id, + node=self.node.name, + )): + self.fix(status_before) + else: + status_code = self.STATUS_SKIPPED + keys_to_fix = [_("interactive")] + + if status_code is None: + status_after = self.get_status(cached=False) + status_code = self.STATUS_FIXED if status_after.correct else self.STATUS_FAILED + + if status_code == self.STATUS_SKIPPED: + # can't use else for this because status_before is None + changes = keys_to_fix + elif status_before.must_be_created: + changes = True + elif status_before.must_be_deleted: + changes = False + elif status_code == self.STATUS_FAILED: + changes = self.display_keys( + self.cached_cdict.copy(), + status_after.sdict.copy(), + status_after.keys_to_fix[:], + ) + else: + changes = keys_to_fix + + self.node.repo.hooks.item_apply_end( + self.node.repo, + self.node, + self, + duration=datetime.now() - start_time, + status_code=status_code, + status_before=status_before, + status_after=status_after, + ) + return (status_code, changes) + + def ask(self, status_should, status_actual, relevant_keys): + """ + Returns a string asking the user if this item should be + implemented. + """ + result = [] + for key in relevant_keys: + result.append(diff_value(key, status_actual[key], status_should[key])) + return "\n\n".join(result) + + def cdict(self): + """ + Return a statedict that describes the target state of this item + as configured in the repo. An empty dict means that the item + should not exist. + + MAY be overridden by subclasses. + """ + return self.attributes + + def covered_by_autoskip_selector(self, autoskip_selector): + """ + True if this item should be skipped based on the given selector + string (e.g. "tag:foo,bundle:bar"). + """ + components = [c.strip() for c in autoskip_selector.split(",")] + if ( + "*" in components or + self.id in components or + "bundle:{}".format(self.bundle.name) in components or + "{}:".format(self.ITEM_TYPE_NAME) in components + ): + return True + for tag in self.tags: + if "tag:{}".format(tag) in components: + return True + return False + + def fix(self, status): + """ + This is supposed to actually implement stuff on the target node. + + MUST be overridden by subclasses. + """ + raise NotImplementedError() + + def get_auto_deps(self, items): + """ + Return a list of item IDs this item should have dependencies on. + + Be very careful when using this. There are few circumstances + where this is really necessary. Only use this if you really need + to examine the actual list of items in order to figure out your + dependencies. + + MAY be overridden by subclasses. + """ + return [] + + def get_canned_actions(self): + """ + Return a dictionary of action definitions (mapping action names + to dicts of action attributes, as in bundles). + + MAY be overridden by subclasses. + """ + return {} + + def get_status(self, cached=True): + """ + Returns an ItemStatus instance describing the current status of + the item on the actual node. + """ + with io.job(_(" {node} {bundle} {item} checking...").format( + bundle=self.bundle.name, + item=self.id, + node=self.node.name, + )): + if not cached: + del self._cache['cached_sdict'] + return ItemStatus(self.cached_cdict, self.cached_sdict) + + def hash(self): + return hash_statedict(self.cached_cdict) + + @property + def id(self): + if self.ITEM_TYPE_NAME == 'action' and ":" in self.name: + # canned actions don't have an "action:" prefix + return self.name + return "{}:{}".format(self.ITEM_TYPE_NAME, self.name) + + def display_dicts(self, cdict, sdict, keys): + """ + Given cdict and sdict as implemented above, modify them to + better suit interactive presentation. The keys parameter is the + return value of display_keys (see below) and provided for + reference only. + + MAY be overridden by subclasses. + """ + return (cdict, sdict) + + def display_keys(self, cdict, sdict, keys): + """ + Given a list of keys whose values differ between cdict and + sdict, modify them to better suit presentation to the user. + + MAY be overridden by subclasses. + """ + return keys + + def patch_attributes(self, attributes): + """ + Allows an item to preprocess the attributes it is initialized + with. Returns the modified attributes dictionary. + + MAY be overridden by subclasses. + """ + return attributes + + def sdict(self): + """ + Return a statedict that describes the actual state of this item + on the node. An empty dict means that the item does not exist + on the node. + + For the item to validate as correct, the values for all keys in + self.cdict() have to match this statedict. + + MUST be overridden by subclasses. + """ + raise NotImplementedError() + + def test(self): + """ + Used by `bw repo test`. Should do as much as possible to detect + what would become a runtime error during a `bw apply`. Files + will attempt to render their templates for example. + + SHOULD be overridden by subclasses + """ + pass + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + """ + Raises BundleError if something is amiss with the user-specified + attributes. + + SHOULD be overridden by subclasses. + """ + pass + + @classmethod + def validate_name(cls, bundle, name): + """ + Raise BundleError if the given name is not valid (e.g. contains + invalid characters for this kind of item. + + MAY be overridden by subclasses. + """ + pass diff --git a/bundlewrap/items/actions.py b/bundlewrap/items/actions.py new file mode 100644 index 0000000..fce5eae --- /dev/null +++ b/bundlewrap/items/actions.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from datetime import datetime + +from bundlewrap.exceptions import ActionFailure, BundleError +from bundlewrap.items import Item +from bundlewrap.utils.ui import io +from bundlewrap.utils.text import mark_for_translation as _ +from bundlewrap.utils.text import blue, bold, wrap_question + + +class Action(Item): + """ + A command that is run on a node. + """ + BUNDLE_ATTRIBUTE_NAME = 'actions' + ITEM_ATTRIBUTES = { + 'command': None, + 'expected_stderr': None, + 'expected_stdout': None, + 'expected_return_code': 0, + 'interactive': None, + } + ITEM_TYPE_NAME = 'action' + REQUIRED_ATTRIBUTES = ['command'] + + def _get_result( + self, + autoskip_selector="", + my_soft_locks=(), + other_peoples_soft_locks=(), + interactive=False, + interactive_default=True, + ): + + if self.covered_by_autoskip_selector(autoskip_selector): + io.debug(_( + "autoskip matches {item} on {node}" + ).format(item=self.id, node=self.node.name)) + return (self.STATUS_SKIPPED, [_("cmdline")]) + + if self._skip_with_soft_locks(my_soft_locks, other_peoples_soft_locks): + return (self.STATUS_SKIPPED, [_("soft locked")]) + + if interactive is False and self.attributes['interactive'] is True: + return (self.STATUS_SKIPPED, [_("interactive only")]) + + if self.triggered and not self.has_been_triggered: + io.debug(_("skipping {} because it wasn't triggered").format(self.id)) + return (self.STATUS_SKIPPED, [_("no trigger")]) + + if self.unless: + with io.job(_(" {node} {bundle} {item} checking 'unless' condition...").format( + bundle=self.bundle.name, + item=self.id, + node=self.node.name, + )): + unless_result = self.bundle.node.run( + self.unless, + may_fail=True, + ) + if unless_result.return_code == 0: + io.debug(_("{node}:{bundle}:action:{name}: failed 'unless', not running").format( + bundle=self.bundle.name, + name=self.name, + node=self.bundle.node.name, + )) + return (self.STATUS_SKIPPED, ["unless"]) + + if ( + interactive and + self.attributes['interactive'] is not False and + not io.ask( + wrap_question( + self.id, + self.attributes['command'], + _("Run action {}?").format( + bold(self.name), + ), + prefix="{x} {node} ".format( + node=bold(self.node.name), + x=blue("?"), + ), + ), + interactive_default, + epilogue="{x} {node}".format( + node=bold(self.node.name), + x=blue("?"), + ), + ) + ): + return (self.STATUS_SKIPPED, [_("interactive")]) + try: + self.run() + return (self.STATUS_ACTION_SUCCEEDED, None) + except ActionFailure as exc: + return (self.STATUS_FAILED, [str(exc)]) + + def apply(self, *args, **kwargs): + return self.get_result(*args, **kwargs) + + def cdict(self): + raise AttributeError(_("actions don't have cdicts")) + + def get_result(self, *args, **kwargs): + self.node.repo.hooks.action_run_start( + self.node.repo, + self.node, + self, + ) + start_time = datetime.now() + + status_code = self._get_result(*args, **kwargs) + + self.node.repo.hooks.action_run_end( + self.node.repo, + self.node, + self, + duration=datetime.now() - start_time, + status=status_code[0], + ) + + return status_code + + def run(self): + with io.job(_(" {node} {bundle} {item} running...").format( + bundle=self.bundle.name, + item=self.id, + node=self.node.name, + )): + result = self.bundle.node.run( + self.attributes['command'], + may_fail=True, + ) + + if self.attributes['expected_return_code'] is not None and \ + not result.return_code == self.attributes['expected_return_code']: + raise ActionFailure(_("wrong return code: {}").format(result.return_code)) + + if self.attributes['expected_stderr'] is not None and \ + result.stderr_text != self.attributes['expected_stderr']: + raise ActionFailure(_("wrong stderr")) + + if self.attributes['expected_stdout'] is not None and \ + result.stdout_text != self.attributes['expected_stdout']: + raise ActionFailure(_("wrong stdout")) + + return result + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if attributes.get('interactive', None) not in (True, False, None): + raise BundleError(_( + "invalid interactive setting for action '{item}' in bundle '{bundle}'" + ).format(item=item_id, bundle=bundle.name)) diff --git a/bundlewrap/items/directories.py b/bundlewrap/items/directories.py new file mode 100644 index 0000000..20d8d8e --- /dev/null +++ b/bundlewrap/items/directories.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from collections import defaultdict +from os.path import normpath +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.remote import PathInfo +from bundlewrap.utils.text import mark_for_translation as _ +from bundlewrap.utils.text import is_subdirectory +from bundlewrap.utils.ui import io + + +UNMANAGED_PATH_DESC = _("unmanaged subpaths") + + +def validator_mode(item_id, value): + value = str(value) + if not value.isdigit(): + raise BundleError( + _("mode for {item} should be written as digits, got: '{value}'" + "").format(item=item_id, value=value) + ) + for digit in value: + if int(digit) > 7 or int(digit) < 0: + raise BundleError(_( + "invalid mode for {item}: '{value}'" + ).format(item=item_id, value=value)) + if not len(value) == 3 and not len(value) == 4: + raise BundleError(_( + "mode for {item} should be three or four digits long, was: '{value}'" + ).format(item=item_id, value=value)) + +ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) +ATTRIBUTE_VALIDATORS.update({ + 'mode': validator_mode, +}) + + +class Directory(Item): + """ + A directory. + """ + BUNDLE_ATTRIBUTE_NAME = "directories" + ITEM_ATTRIBUTES = { + 'group': None, + 'mode': None, + 'owner': None, + 'purge': False, + } + ITEM_TYPE_NAME = "directory" + + def __repr__(self): + return "".format( + quote(self.name), + ) + + def cdict(self): + cdict = { + 'paths_to_purge': [], + 'type': 'directory', + } + for optional_attr in ('group', 'mode', 'owner'): + if self.attributes[optional_attr] is not None: + cdict[optional_attr] = self.attributes[optional_attr] + return cdict + + def display_dicts(self, cdict, sdict, keys): + if UNMANAGED_PATH_DESC in keys: + cdict[UNMANAGED_PATH_DESC] = cdict['paths_to_purge'] + sdict[UNMANAGED_PATH_DESC] = sdict['paths_to_purge'] + del cdict['paths_to_purge'] + del sdict['paths_to_purge'] + return (cdict, sdict) + + def display_keys(self, cdict, sdict, keys): + try: + keys.remove('paths_to_purge') + except ValueError: + pass + else: + keys.append(UNMANAGED_PATH_DESC) + return keys + + def fix(self, status): + if status.must_be_created or 'type' in status.keys_to_fix: + # fixing the type fixes everything + self._fix_type(status) + return + + for path in status.sdict.get('paths_to_purge', []): + self.node.run("rm -rf -- {}".format(quote(path))) + + for fix_type in ('mode', 'owner', 'group'): + if fix_type in status.keys_to_fix: + if fix_type == 'group' and 'owner' in status.keys_to_fix: + # owner and group are fixed with a single chown + continue + getattr(self, "_fix_" + fix_type)(status) + + def _fix_mode(self, status): + if self.node.os in self.node.OS_FAMILY_BSD: + chmod_command = "chmod {} {}" + else: + chmod_command = "chmod {} -- {}" + self.node.run(chmod_command.format( + self.attributes['mode'], + quote(self.name), + )) + + def _fix_owner(self, status): + group = self.attributes['group'] or "" + if group: + group = ":" + quote(group) + if self.node.os in self.node.OS_FAMILY_BSD: + command = "chown {}{} {}" + else: + command = "chown {}{} -- {}" + self.node.run(command.format( + quote(self.attributes['owner'] or ""), + group, + quote(self.name), + )) + _fix_group = _fix_owner + + def _fix_type(self, status): + self.node.run("rm -rf -- {}".format(quote(self.name))) + self.node.run("mkdir -p -- {}".format(quote(self.name))) + if self.attributes['mode']: + self._fix_mode(status) + if self.attributes['owner'] or self.attributes['group']: + self._fix_owner(status) + + def _get_paths_to_purge(self): + result = self.node.run("find {} -maxdepth 1 -print0".format(quote(self.name))) + for line in result.stdout.split(b"\0"): + line = line.decode('utf-8') + found = False + for item_type in ('directory', 'file', 'symlink'): + if found: + break + for item in self.node.items: + if ( + item.id == "{}:{}".format(item_type, line) or + item.id.startswith("{}:{}/".format(item_type, line)) + ): + found = True + break + if not found: + # this file or directory is not managed + io.debug(( + "found unmanaged path below {dirpath} on {node}, " + "marking for removal: {path}" + ).format( + dirpath=self.name, + node=self.node.name, + path=line, + )) + yield line + + + + def get_auto_deps(self, items): + deps = [] + for item in items: + if item == self: + continue + if (( + item.ITEM_TYPE_NAME == "file" and + is_subdirectory(item.name, self.name) + ) or ( + item.ITEM_TYPE_NAME in ("file", "symlink") and + item.name == self.name + )): + raise BundleError(_( + "{item1} (from bundle '{bundle1}') blocking path to " + "{item2} (from bundle '{bundle2}')" + ).format( + item1=item.id, + bundle1=item.bundle.name, + item2=self.id, + bundle2=self.bundle.name, + )) + elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + elif item.ITEM_TYPE_NAME in ("directory", "symlink"): + if is_subdirectory(item.name, self.name): + deps.append(item.id) + return deps + + def sdict(self): + path_info = PathInfo(self.node, self.name) + if not path_info.exists: + return None + else: + paths_to_purge = [] + if self.attributes['purge']: + paths_to_purge = list(self._get_paths_to_purge()) + return { + 'type': path_info.path_type, + 'mode': path_info.mode, + 'owner': path_info.owner, + 'group': path_info.group, + 'paths_to_purge': paths_to_purge, + } + + def patch_attributes(self, attributes): + if 'mode' in attributes and attributes['mode'] is not None: + attributes['mode'] = str(attributes['mode']).zfill(4) + return attributes + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + for key, value in attributes.items(): + ATTRIBUTE_VALIDATORS[key](item_id, value) + + @classmethod + def validate_name(cls, bundle, name): + if normpath(name) != name: + raise BundleError(_( + "'{path}' is an invalid directory path, " + "should be '{normpath}' (bundle '{bundle}')" + ).format( + bundle=bundle.name, + normpath=normpath(name), + path=name, + )) diff --git a/bundlewrap/items/files.py b/bundlewrap/items/files.py new file mode 100644 index 0000000..cf248a8 --- /dev/null +++ b/bundlewrap/items/files.py @@ -0,0 +1,486 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from base64 import b64decode +from collections import defaultdict +from contextlib import contextmanager +from datetime import datetime +from os.path import basename, dirname, exists, join, normpath +from pipes import quote +from subprocess import call +from sys import exc_info +from traceback import format_exception + +from bundlewrap.exceptions import BundleError, FaultUnavailable, TemplateError +from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item +from bundlewrap.items.directories import validator_mode +from bundlewrap.utils import cached_property, hash_local_file, sha1, tempfile +from bundlewrap.utils.remote import PathInfo +from bundlewrap.utils.text import force_text, mark_for_translation as _ +from bundlewrap.utils.text import is_subdirectory +from bundlewrap.utils.ui import io + + +DIFF_MAX_FILE_SIZE = 1024 * 1024 * 5 # bytes + + +def content_processor_base64(item): + # .encode() is required for pypy3 only + return b64decode(item._template_content.encode()) + + +def content_processor_jinja2(item): + try: + from jinja2 import Environment, FileSystemLoader + except ImportError: + raise TemplateError(_( + "Unable to load Jinja2 (required to render {item}). " + "You probably have to install it using `pip install Jinja2`." + ).format(item=item.id)) + + loader = FileSystemLoader(searchpath=[item.item_data_dir, item.item_dir]) + env = Environment(loader=loader) + + template = env.from_string(item._template_content) + + io.debug("{node}:{bundle}:{item}: rendering with Jinja2...".format( + bundle=item.bundle.name, + item=item.id, + node=item.node.name, + )) + start = datetime.now() + try: + content = template.render( + item=item, + bundle=item.bundle, + node=item.node, + repo=item.node.repo, + **item.attributes['context'] + ) + except FaultUnavailable: + raise + except Exception as e: + io.debug("".join(format_exception(*exc_info()))) + raise TemplateError(_( + "Error while rendering template for {node}:{bundle}:{item}: {error}" + ).format( + bundle=item.bundle.name, + error=e, + item=item.id, + node=item.node.name, + )) + duration = datetime.now() - start + io.debug("{node}:{bundle}:{item}: rendered in {time}s".format( + bundle=item.bundle.name, + item=item.id, + node=item.node.name, + time=duration.total_seconds(), + )) + return content.encode(item.attributes['encoding']) + + +def content_processor_mako(item): + from mako.lookup import TemplateLookup + from mako.template import Template + template = Template( + item._template_content.encode('utf-8'), + input_encoding='utf-8', + lookup=TemplateLookup(directories=[item.item_data_dir, item.item_dir]), + output_encoding=item.attributes['encoding'], + ) + io.debug("{node}:{bundle}:{item}: rendering with Mako...".format( + bundle=item.bundle.name, + item=item.id, + node=item.node.name, + )) + start = datetime.now() + try: + content = template.render( + item=item, + bundle=item.bundle, + node=item.node, + repo=item.node.repo, + **item.attributes['context'] + ) + except FaultUnavailable: + raise + except Exception as e: + io.debug("".join(format_exception(*exc_info()))) + if isinstance(e, NameError) and str(e) == "Undefined": + # Mako isn't very verbose here. Try to give a more useful + # error message - even though we can't pinpoint the excat + # location of the error. :/ + e = _("Undefined variable (look for '${...}')") + raise TemplateError(_( + "Error while rendering template for {node}:{bundle}:{item}: {error}" + ).format( + bundle=item.bundle.name, + error=e, + item=item.id, + node=item.node.name, + )) + duration = datetime.now() - start + io.debug("{node}:{bundle}:{item}: rendered in {time}s".format( + bundle=item.bundle.name, + item=item.id, + node=item.node.name, + time=duration.total_seconds(), + )) + return content + + +def content_processor_text(item): + return item._template_content.encode(item.attributes['encoding']) + + +CONTENT_PROCESSORS = { + 'any': lambda item: b"", + 'base64': content_processor_base64, + 'binary': None, + 'jinja2': content_processor_jinja2, + 'mako': content_processor_mako, + 'text': content_processor_text, +} + + +def get_remote_file_contents(node, path): + """ + Returns the contents of the given path as a string. + """ + with tempfile() as tmp_file: + node.download(path, tmp_file) + with open(tmp_file, 'rb') as f: + content = f.read() + return content + + +def validator_content_type(item_id, value): + if value not in CONTENT_PROCESSORS: + raise BundleError(_( + "invalid content_type for {item}: '{value}'" + ).format(item=item_id, value=value)) + + +ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) +ATTRIBUTE_VALIDATORS.update({ + 'content_type': validator_content_type, + 'mode': validator_mode, +}) + + +class File(Item): + """ + A file. + """ + BUNDLE_ATTRIBUTE_NAME = "files" + ITEM_ATTRIBUTES = { + 'content': None, + 'content_type': 'text', + 'context': None, + 'delete': False, + 'encoding': "utf-8", + 'group': None, + 'mode': None, + 'owner': None, + 'source': None, + 'verify_with': None, + } + ITEM_TYPE_NAME = "file" + + def __repr__(self): + return "".format(quote(self.name)) + + @property + def _template_content(self): + if self.attributes['source'] is not None: + filename = join(self.item_data_dir, self.attributes['source']) + if exists(filename): + with open(filename, 'rb') as f: + content = f.read() + else: + filename = join(self.item_dir, self.attributes['source']) + with open(filename, 'rb') as f: + content = f.read() + return force_text(content) + else: + return force_text(self.attributes['content']) + + @cached_property + def content(self): + return CONTENT_PROCESSORS[self.attributes['content_type']](self) + + @cached_property + def content_hash(self): + if self.attributes['content_type'] == 'binary': + return hash_local_file(self.template) + else: + return sha1(self.content) + + @cached_property + def template(self): + data_template = join(self.item_data_dir, self.attributes['source']) + if exists(data_template): + return data_template + return join(self.item_dir, self.attributes['source']) + + def cdict(self): + if self.attributes['delete']: + return None + cdict = {'type': 'file'} + if self.attributes['content_type'] != 'any': + cdict['content_hash'] = self.content_hash + for optional_attr in ('group', 'mode', 'owner'): + if self.attributes[optional_attr] is not None: + cdict[optional_attr] = self.attributes[optional_attr] + return cdict + + def fix(self, status): + if status.must_be_created or status.must_be_deleted or 'type' in status.keys_to_fix: + self._fix_type(status) + else: + for fix_type in ('content_hash', 'mode', 'owner', 'group'): + if fix_type in status.keys_to_fix: + if fix_type == 'group' and \ + 'owner' in status.keys_to_fix: + # owner and group are fixed with a single chown + continue + if fix_type in ('mode', 'owner', 'group') and \ + 'content' in status.keys_to_fix: + # fixing content implies settings mode and owner/group + continue + getattr(self, "_fix_" + fix_type)(status) + + def _fix_content_hash(self, status): + with self._write_local_file() as local_path: + self.node.upload( + local_path, + self.name, + mode=self.attributes['mode'], + owner=self.attributes['owner'] or "", + group=self.attributes['group'] or "", + ) + + def _fix_mode(self, status): + if self.node.os in self.node.OS_FAMILY_BSD: + command = "chmod {} {}" + else: + command = "chmod {} -- {}" + self.node.run(command.format( + self.attributes['mode'], + quote(self.name), + )) + + def _fix_owner(self, status): + group = self.attributes['group'] or "" + if group: + group = ":" + quote(group) + if self.node.os in self.node.OS_FAMILY_BSD: + command = "chown {}{} {}" + else: + command = "chown {}{} -- {}" + self.node.run(command.format( + quote(self.attributes['owner'] or ""), + group, + quote(self.name), + )) + _fix_group = _fix_owner + + def _fix_type(self, status): + if status.sdict: + self.node.run("rm -rf -- {}".format(quote(self.name))) + if not status.must_be_deleted: + self.node.run("mkdir -p -- {}".format(quote(dirname(self.name)))) + self._fix_content_hash(status) + + def get_auto_deps(self, items): + deps = [] + for item in items: + if item.ITEM_TYPE_NAME == "file" and is_subdirectory(item.name, self.name): + raise BundleError(_( + "{item1} (from bundle '{bundle1}') blocking path to " + "{item2} (from bundle '{bundle2}')" + ).format( + item1=item.id, + bundle1=item.bundle.name, + item2=self.id, + bundle2=self.bundle.name, + )) + elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + elif item.ITEM_TYPE_NAME in ("directory", "symlink"): + if is_subdirectory(item.name, self.name): + deps.append(item.id) + return deps + + def sdict(self): + path_info = PathInfo(self.node, self.name) + if not path_info.exists: + return None + else: + return { + 'type': path_info.path_type, + 'content_hash': path_info.sha1 if path_info.path_type == 'file' else None, + 'mode': path_info.mode, + 'owner': path_info.owner, + 'group': path_info.group, + 'size': path_info.size, + } + + def display_dicts(self, cdict, sdict, keys): + if 'content' in keys: + del cdict['content_hash'] + del sdict['content_hash'] + cdict['content'] = self.content + sdict['content'] = get_remote_file_contents(self.node, self.name) + return (cdict, sdict) + + def display_keys(self, cdict, sdict, keys): + if ( + 'content_hash' in keys and + self.attributes['content_type'] not in ('base64', 'binary') and + sdict['size'] < DIFF_MAX_FILE_SIZE and + len(self.content) < DIFF_MAX_FILE_SIZE + ): + keys.remove('content_hash') + keys.append('content') + return keys + + def patch_attributes(self, attributes): + if ( + 'content' not in attributes and + 'source' not in attributes and + attributes.get('content_type', 'text') != 'any' and + attributes.get('delete', False) is False + ): + attributes['source'] = basename(self.name) + if 'context' not in attributes: + attributes['context'] = {} + if 'mode' in attributes and attributes['mode'] is not None: + attributes['mode'] = str(attributes['mode']).zfill(4) + return attributes + + def test(self): + if self.attributes['source'] and not exists(self.template): + raise BundleError(_( + "{item} from bundle '{bundle}' refers to missing " + "file '{path}' in its 'source' attribute" + ).format( + bundle=self.bundle.name, + item=self.id, + path=self.template, + )) + + if not self.attributes['delete'] and not self.attributes['content_type'] == 'any': + with self._write_local_file(): + pass + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if attributes.get('delete', False): + for attr in attributes.keys(): + if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): + raise BundleError(_( + "{item} from bundle '{bundle}' cannot have other " + "attributes besides 'delete'" + ).format(item=item_id, bundle=bundle.name)) + if 'content' in attributes and 'source' in attributes: + raise BundleError(_( + "{item} from bundle '{bundle}' cannot have both 'content' and 'source'" + ).format(item=item_id, bundle=bundle.name)) + + if 'content' in attributes and attributes.get('content_type') == 'binary': + raise BundleError(_( + "{item} from bundle '{bundle}' cannot have binary inline content " + "(use content_type 'base64' instead)" + ).format(item=item_id, bundle=bundle.name)) + + if 'encoding' in attributes and attributes.get('content_type') in ( + 'any', + 'base64', + 'binary', + ): + raise BundleError(_( + "content_type of {item} from bundle '{bundle}' cannot provide different encoding " + "(remove the 'encoding' attribute)" + ).format(item=item_id, bundle=bundle.name)) + + if ( + attributes.get('content_type', None) == "any" and ( + 'content' in attributes or + 'encoding' in attributes or + 'source' in attributes + ) + ): + raise BundleError(_( + "{item} from bundle '{bundle}' with content_type 'any' " + "must not define 'content', 'encoding' and/or 'source'" + ).format(item=item_id, bundle=bundle.name)) + + for key, value in attributes.items(): + ATTRIBUTE_VALIDATORS[key](item_id, value) + + @classmethod + def validate_name(cls, bundle, name): + if normpath(name) == "/": + raise BundleError(_("'/' cannot be a file")) + if normpath(name) != name: + raise BundleError(_( + "'{path}' is an invalid file path, should be '{normpath}' (bundle '{bundle}')" + ).format( + bundle=bundle.name, + normpath=normpath(name), + path=name, + )) + + @contextmanager + def _write_local_file(self): + """ + Makes the file contents available at the returned temporary path + and performs local verification if necessary or requested. + + The calling method is responsible for cleaning up the file at + the returned path (only if not a binary). + """ + with tempfile() as tmp_file: + if self.attributes['content_type'] == 'binary': + local_path = self.template + else: + local_path = tmp_file + with open(local_path, 'wb') as f: + f.write(self.content) + + if self.attributes['verify_with']: + cmd = self.attributes['verify_with'].format(quote(local_path)) + io.debug("calling local verify command for {i}: {c}".format(c=cmd, i=self.id)) + if call(cmd, shell=True) == 0: + io.debug("{i} passed local validation".format(i=self.id)) + else: + raise BundleError(_( + "{i} failed local validation using: {c}" + ).format(c=cmd, i=self.id)) + + yield local_path diff --git a/bundlewrap/items/groups.py b/bundlewrap/items/groups.py new file mode 100644 index 0000000..e0349f7 --- /dev/null +++ b/bundlewrap/items/groups.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item +from bundlewrap.items.users import _USERNAME_VALID_CHARACTERS +from bundlewrap.utils.text import mark_for_translation as _ + + +def _parse_group_line(line): + """ + Parses a line from /etc/group and returns the information as a + dictionary. + """ + result = dict(zip( + ('groupname', 'password', 'gid', 'members'), + line.strip().split(":"), + )) + result['gid'] = result['gid'] + del result['password'] # nothing useful here + return result + + +class Group(Item): + """ + A group. + """ + BUNDLE_ATTRIBUTE_NAME = "groups" + ITEM_ATTRIBUTES = { + 'delete': False, + 'gid': None, + } + ITEM_TYPE_NAME = "group" + REQUIRED_ATTRIBUTES = [] + + def __repr__(self): + return "".format(self.name) + + def cdict(self): + if self.attributes['delete']: + return None + cdict = {} + if self.attributes.get('gid') is not None: + cdict['gid'] = self.attributes['gid'] + return cdict + + def fix(self, status): + if status.must_be_created: + if self.attributes['gid'] is None: + command = "groupadd {}".format(self.name) + else: + command = "groupadd -g {gid} {groupname}".format( + gid=self.attributes['gid'], + groupname=self.name, + ) + self.node.run(command, may_fail=True) + elif status.must_be_deleted: + self.node.run("groupdel {}".format(self.name), may_fail=True) + else: + self.node.run( + "groupmod -g {gid} {groupname}".format( + gid=self.attributes['gid'], + groupname=self.name, + ), + may_fail=True, + ) + + def sdict(self): + # verify content of /etc/group + grep_result = self.node.run( + "grep -e '^{}:' /etc/group".format(self.name), + may_fail=True, + ) + if grep_result.return_code != 0: + return None + else: + return _parse_group_line(grep_result.stdout_text) + + def patch_attributes(self, attributes): + if isinstance(attributes.get('gid'), int): + attributes['gid'] = str(attributes['gid']) + return attributes + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if attributes.get('delete', False): + for attr in attributes.keys(): + if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): + raise BundleError(_( + "{item} from bundle '{bundle}' cannot have other " + "attributes besides 'delete'" + ).format(item=item_id, bundle=bundle.name)) + + @classmethod + def validate_name(cls, bundle, name): + for char in name: + if char not in _USERNAME_VALID_CHARACTERS: + raise BundleError(_( + "Invalid character in group name '{name}': {char} (bundle '{bundle}')" + ).format( + char=char, + bundle=bundle.name, + name=name, + )) + + if name.endswith("_") or name.endswith("-"): + raise BundleError(_( + "Group name '{name}' must not end in dash or underscore (bundle '{bundle}')" + ).format( + bundle=bundle.name, + name=name, + )) + + if len(name) > 30: + raise BundleError(_( + "Group name '{name}' is longer than 30 characters (bundle '{bundle}')" + ).format( + bundle=bundle.name, + name=name, + )) diff --git a/bundlewrap/items/pkg_apt.py b/bundlewrap/items/pkg_apt.py new file mode 100644 index 0000000..f1ed2c3 --- /dev/null +++ b/bundlewrap/items/pkg_apt.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def pkg_install(node, pkgname): + return node.run("DEBIAN_FRONTEND=noninteractive " + "apt-get -qy -o Dpkg::Options::=--force-confold --no-install-recommends " + "install {}".format(quote(pkgname))) + + +def pkg_installed(node, pkgname): + result = node.run( + "dpkg -s {} | grep '^Status: '".format(quote(pkgname)), + may_fail=True, + ) + if result.return_code != 0 or " installed" not in result.stdout_text: + return False + else: + return True + + +def pkg_remove(node, pkgname): + return node.run("DEBIAN_FRONTEND=noninteractive " + "apt-get -qy purge {}".format(quote(pkgname))) + + +class AptPkg(Item): + """ + A package installed by apt. + """ + BLOCK_CONCURRENT = ["pkg_apt"] + BUNDLE_ATTRIBUTE_NAME = "pkg_apt" + ITEM_ATTRIBUTES = { + 'installed': True, + } + ITEM_TYPE_NAME = "pkg_apt" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + ) + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + pkg_install(self.node, self.name) + + def sdict(self): + return { + 'installed': pkg_installed(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/pkg_dnf.py b/bundlewrap/items/pkg_dnf.py new file mode 100644 index 0000000..af0196c --- /dev/null +++ b/bundlewrap/items/pkg_dnf.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def pkg_install(node, pkgname): + return node.run("dnf -d0 -e0 -y install {}".format(quote(pkgname))) + + +def pkg_installed(node, pkgname): + result = node.run( + "dnf -d0 -e0 list installed {}".format(quote(pkgname)), + may_fail=True, + ) + if result.return_code != 0: + return False + else: + return True + + +def pkg_remove(node, pkgname): + return node.run("dnf -d0 -e0 -y remove {}".format(quote(pkgname))) + + +class DnfPkg(Item): + """ + A package installed by dnf. + """ + BLOCK_CONCURRENT = ["pkg_dnf", "pkg_yum"] + BUNDLE_ATTRIBUTE_NAME = "pkg_dnf" + ITEM_ATTRIBUTES = { + 'installed': True, + } + ITEM_TYPE_NAME = "pkg_dnf" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + ) + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + pkg_install(self.node, self.name) + + def sdict(self): + return { + 'installed': pkg_installed(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/pkg_openbsd.py b/bundlewrap/items/pkg_openbsd.py new file mode 100644 index 0000000..059af9d --- /dev/null +++ b/bundlewrap/items/pkg_openbsd.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote +import re + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +PKGSPEC_REGEX = re.compile(r"^(.+)-(\d.+)$") + + +def pkg_install(node, pkgname, version): + full_name = "{}-{}".format(pkgname, version) if version else pkgname + return node.run("pkg_add -r -I {}".format(full_name)) + + +def pkg_installed(node, pkgname): + result = node.run( + "pkg_info | cut -f 1 -d ' '", + may_fail=True, + ) + for line in result.stdout.decode('utf-8').strip().split("\n"): + installed_package, installed_version = PKGSPEC_REGEX.match(line).groups() + if installed_package == pkgname: + return installed_version + return False + + +def pkg_remove(node, pkgname): + return node.run("pkg_delete -I -D dependencies {}".format(quote(pkgname))) + + +class OpenBSDPkg(Item): + """ + A package installed by pkg_add/pkg_delete. + """ + BLOCK_CONCURRENT = ["pkg_openbsd"] + BUNDLE_ATTRIBUTE_NAME = "pkg_openbsd" + ITEM_ATTRIBUTES = { + 'installed': True, + 'version': None, + } + ITEM_TYPE_NAME = "pkg_openbsd" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + ) + + def cdict(self): + cdict = self.attributes.copy() + if cdict['version'] is None or not cdict['installed']: + del cdict['version'] + return cdict + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + pkg_install(self.node, self.name, self.attributes['version']) + + def sdict(self): + version = pkg_installed(self.node, self.name) + return { + 'installed': bool(version), + 'version': version if version else _("none"), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/pkg_pacman.py b/bundlewrap/items/pkg_pacman.py new file mode 100644 index 0000000..dc2477a --- /dev/null +++ b/bundlewrap/items/pkg_pacman.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os.path import basename, join +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def pkg_install(node, pkgname, operation='S'): + return node.run("pacman --noconfirm -{} {}".format(operation, + quote(pkgname))) + + +def pkg_install_tarball(node, local_file): + remote_file = "/tmp/{}".format(basename(local_file)) + node.upload(local_file, remote_file) + pkg_install(node, remote_file, operation='U') + node.run("rm -- {}".format(quote(remote_file))) + + +def pkg_installed(node, pkgname): + result = node.run( + "pacman -Q {}".format(quote(pkgname)), + may_fail=True, + ) + if result.return_code != 0: + return False + else: + return True + + +def pkg_remove(node, pkgname): + return node.run("pacman --noconfirm -Rs {}".format(quote(pkgname))) + + +class PacmanPkg(Item): + """ + A package installed by pacman. + """ + BLOCK_CONCURRENT = ["pkg_pacman"] + BUNDLE_ATTRIBUTE_NAME = "pkg_pacman" + ITEM_ATTRIBUTES = { + 'installed': True, + 'tarball': None, + } + ITEM_TYPE_NAME = "pkg_pacman" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + self.attributes['tarball'], + ) + + def cdict(self): + # TODO/FIXME: this is bad because it ignores tarball + return {'installed': self.attributes['installed']} + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + if self.attributes['tarball']: + pkg_install_tarball(self.node, join(self.item_dir, + self.attributes['tarball'])) + else: + pkg_install(self.node, self.name) + + def sdict(self): + return { + 'installed': pkg_installed(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/pkg_pip.py b/bundlewrap/items/pkg_pip.py new file mode 100644 index 0000000..bd0d1e4 --- /dev/null +++ b/bundlewrap/items/pkg_pip.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os.path import join, split +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def pkg_install(node, pkgname, version=None): + if version: + pkgname = "{}=={}".format(pkgname, version) + pip_path, pkgname = split_path(pkgname) + return node.run("{} install -U {}".format(quote(pip_path), quote(pkgname))) + + +def pkg_installed(node, pkgname): + pip_path, pkgname = split_path(pkgname) + result = node.run( + "{} freeze | grep -i '^{}=='".format(quote(pip_path), pkgname), + may_fail=True, + ) + if result.return_code != 0: + return False + else: + return result.stdout_text.split("=")[-1].strip() + + +def pkg_remove(node, pkgname): + pip_path, pkgname = split_path(pkgname) + return node.run("{} uninstall -y {}".format(quote(pip_path), quote(pkgname))) + + +class PipPkg(Item): + """ + A package installed by pip. + """ + BLOCK_CONCURRENT = ["pkg_pip"] + BUNDLE_ATTRIBUTE_NAME = "pkg_pip" + ITEM_ATTRIBUTES = { + 'installed': True, + 'version': None, + } + ITEM_TYPE_NAME = "pkg_pip" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + ) + + def cdict(self): + cdict = {'installed': self.attributes['installed']} + if self.attributes.get('version') is not None: + cdict['version'] = self.attributes['version'] + return cdict + + def get_auto_deps(self, items): + for item in items: + if item == self: + continue + if ( + item.ITEM_TYPE_NAME == self.ITEM_TYPE_NAME and + item.name.lower() == self.name.lower() + ): + raise BundleError(_( + "{item1} (from bundle '{bundle1}') has name collision with " + "{item2} (from bundle '{bundle2}')" + ).format( + item1=item.id, + bundle1=item.bundle.name, + item2=self.id, + bundle2=self.bundle.name, + )) + return [] + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + pkg_install(self.node, self.name, version=self.attributes['version']) + + def sdict(self): + install_status = pkg_installed(self.node, self.name) + return { + 'installed': bool(install_status), + 'version': None if install_status is False else install_status, + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) + + if 'version' in attributes and attributes.get('installed', True) is False: + raise BundleError(_( + "cannot set version for uninstalled package on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) + + +def split_path(pkgname): + virtualenv, pkgname = split(pkgname) + pip_path = join(virtualenv, "bin", "pip") if virtualenv else "pip" + return pip_path, pkgname diff --git a/bundlewrap/items/pkg_yum.py b/bundlewrap/items/pkg_yum.py new file mode 100644 index 0000000..6a274a2 --- /dev/null +++ b/bundlewrap/items/pkg_yum.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def pkg_install(node, pkgname): + return node.run("yum -d0 -e0 -y install {}".format(quote(pkgname))) + + +def pkg_installed(node, pkgname): + result = node.run( + "yum -d0 -e0 list installed {}".format(quote(pkgname)), + may_fail=True, + ) + if result.return_code != 0: + return False + else: + return True + + +def pkg_remove(node, pkgname): + return node.run("yum -d0 -e0 -y remove {}".format(quote(pkgname))) + + +class YumPkg(Item): + """ + A package installed by yum. + """ + BLOCK_CONCURRENT = ["pkg_yum", "pkg_dnf"] + BUNDLE_ATTRIBUTE_NAME = "pkg_yum" + ITEM_ATTRIBUTES = { + 'installed': True, + } + ITEM_TYPE_NAME = "pkg_yum" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + ) + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + pkg_install(self.node, self.name) + + def sdict(self): + return { + 'installed': pkg_installed(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/pkg_zypper.py b/bundlewrap/items/pkg_zypper.py new file mode 100644 index 0000000..1a289ff --- /dev/null +++ b/bundlewrap/items/pkg_zypper.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +ZYPPER_OPTS = "--non-interactive " + \ + "--non-interactive-include-reboot-patches " + \ + "--quiet" + + +def pkg_install(node, pkgname): + return node.run("zypper {} install {}".format(ZYPPER_OPTS, quote(pkgname))) + + +def pkg_installed(node, pkgname): + result = node.run( + "zypper search --match-exact --installed-only " + "--type package {}".format(quote(pkgname)), + may_fail=True, + ) + if result.return_code != 0: + return False + else: + return True + + +def pkg_remove(node, pkgname): + return node.run("zypper {} remove {}".format(ZYPPER_OPTS, quote(pkgname))) + + +class ZypperPkg(Item): + """ + A package installed by zypper. + """ + BLOCK_CONCURRENT = ["pkg_zypper"] + BUNDLE_ATTRIBUTE_NAME = "pkg_zypper" + ITEM_ATTRIBUTES = { + 'installed': True, + } + ITEM_TYPE_NAME = "pkg_zypper" + + def __repr__(self): + return "".format( + self.name, + self.attributes['installed'], + ) + + def fix(self, status): + if self.attributes['installed'] is False: + pkg_remove(self.node, self.name) + else: + pkg_install(self.node, self.name) + + def sdict(self): + return { + 'installed': pkg_installed(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('installed', True), bool): + raise BundleError(_( + "expected boolean for 'installed' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/postgres_dbs.py b/bundlewrap/items/postgres_dbs.py new file mode 100644 index 0000000..636b7cf --- /dev/null +++ b/bundlewrap/items/postgres_dbs.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import force_text, mark_for_translation as _ + + +def create_db(node, name, owner): + return node.run("sudo -u postgres createdb -wO {owner} {name}".format( + name=name, + owner=owner, + )) + + +def drop_db(node, name): + return node.run("sudo -u postgres dropdb -w {}".format(quote(name))) + + +def get_databases(node): + output = node.run("echo '\\l' | sudo -u postgres psql -Anqt -F '|' | grep '|'").stdout + result = {} + for line in force_text(output).strip().split("\n"): + db, owner = line.strip().split("|", 2)[:2] + result[db] = { + 'owner': owner, + } + return result + + +def set_owner(node, name, owner): + return node.run( + "echo 'ALTER DATABASE {name} OWNER TO {owner}' | " + "sudo -u postgres psql -nqw".format( + name=name, + owner=owner, + ), + ) + + +class PostgresDB(Item): + """ + A postgres database. + """ + BUNDLE_ATTRIBUTE_NAME = "postgres_dbs" + ITEM_ATTRIBUTES = { + 'delete': False, + 'owner': "postgres", + } + ITEM_TYPE_NAME = "postgres_db" + + def __repr__(self): + return "".format(self.name) + + def cdict(self): + if self.attributes['delete']: + return None + else: + return {'owner': self.attributes['owner']} + + def fix(self, status): + if status.must_be_deleted: + drop_db(self.node, self.name) + elif status.must_be_created: + create_db(self.node, self.name, self.attributes['owner']) + elif 'owner' in status.keys_to_fix: + set_owner(self.node, self.name, self.attributes['owner']) + else: + raise AssertionError("this shouldn't happen") + + def get_auto_deps(self, items): + deps = [] + for item in items: + if item.ITEM_TYPE_NAME == "postgres_role" and item.name == self.attributes['owner']: + deps.append(item.id) + return deps + + def sdict(self): + databases = get_databases(self.node) + if self.name not in databases: + return None + else: + return {'owner': databases[self.name]['owner']} + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('delete', True), bool): + raise BundleError(_( + "expected boolean for 'delete' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/postgres_roles.py b/bundlewrap/items/postgres_roles.py new file mode 100644 index 0000000..bb77e00 --- /dev/null +++ b/bundlewrap/items/postgres_roles.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from passlib.apps import postgres_context + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import force_text, mark_for_translation as _ + + +AUTHID_COLUMNS = { + "rolcanlogin": 'can_login', + "rolsuper": 'superuser', + "rolpassword": 'password_hash', +} + + +def delete_role(node, role): + node.run("sudo -u postgres dropuser -w {}".format(role)) + + +def fix_role(node, role, attrs, create=False): + password = " PASSWORD '{}'".format(attrs['password_hash']) + node.run( + "echo \"{operation} ROLE {role} WITH LOGIN {superuser}SUPERUSER{password}\" " + "| sudo -u postgres psql -nqw".format( + operation="CREATE" if create else "ALTER", + password="" if attrs['password_hash'] is None else password, + role=role, + superuser="" if attrs['superuser'] is True else "NO", + ) + ) + + +def get_role(node, role): + result = node.run("echo \"SELECT rolcanlogin, rolsuper, rolpassword from pg_authid " + "WHERE rolname='{}'\" " + "| sudo -u postgres psql -Anqwx -F '|'".format(role)) + + role_attrs = {} + for line in force_text(result.stdout).strip().split("\n"): + try: + key, value = line.split("|") + except ValueError: + pass + else: + role_attrs[AUTHID_COLUMNS[key]] = value + + for bool_attr in ('can_login', 'superuser'): + if bool_attr in role_attrs: + role_attrs[bool_attr] = role_attrs[bool_attr] == "t" + + return role_attrs if role_attrs else None + + +class PostgresRole(Item): + """ + A postgres role. + """ + BUNDLE_ATTRIBUTE_NAME = "postgres_roles" + ITEM_ATTRIBUTES = { + 'can_login': True, + 'delete': False, + 'password': None, + 'password_hash': None, + 'superuser': False, + } + ITEM_TYPE_NAME = "postgres_role" + + def __repr__(self): + return "".format(self.name) + + def cdict(self): + if self.attributes['delete']: + return None + cdict = self.attributes.copy() + del cdict['delete'] + del cdict['password'] + return cdict + + def fix(self, status): + if status.must_be_deleted: + delete_role(self.node, self.name) + elif status.must_be_created: + fix_role(self.node, self.name, self.attributes, create=True) + else: + fix_role(self.node, self.name, self.attributes) + + def sdict(self): + return get_role(self.node, self.name) + + def patch_attributes(self, attributes): + if 'password' in attributes: + attributes['password_hash'] = postgres_context.encrypt( + force_text(attributes['password']), + user=self.name, + ) + return attributes + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not attributes.get('delete', False): + if attributes.get('password') is None and attributes.get('password_hash') is None: + raise BundleError(_( + "expected either 'password' or 'password_hash' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) + if attributes.get('password') is not None and attributes.get('password_hash') is not None: + raise BundleError(_( + "can't define both 'password' and 'password_hash' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) + if not isinstance(attributes.get('delete', True), bool): + raise BundleError(_( + "expected boolean for 'delete' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/svc_openbsd.py b/bundlewrap/items/svc_openbsd.py new file mode 100644 index 0000000..9b4351e --- /dev/null +++ b/bundlewrap/items/svc_openbsd.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def svc_start(node, svcname): + return node.run("/etc/rc.d/{} start".format(quote(svcname))) + + +def svc_running(node, svcname): + result = node.run("/etc/rc.d/{} check".format(quote(svcname)), may_fail=True) + return "ok" in result.stdout_text + + +def svc_stop(node, svcname): + return node.run("/etc/rc.d/{} stop".format(quote(svcname))) + + +def svc_enable(node, svcname): + return node.run("rcctl set {} status on".format(quote(svcname))) + + +def svc_enabled(node, svcname): + result = node.run( + "rcctl ls on | grep '^{}$'".format(svcname), + may_fail=True, + ) + return result.return_code == 0 + + +def svc_disable(node, svcname): + return node.run("rcctl set {} status off".format(quote(svcname))) + + +class SvcOpenBSD(Item): + """ + A service managed by OpenBSD rc.d. + """ + BUNDLE_ATTRIBUTE_NAME = "svc_openbsd" + ITEM_ATTRIBUTES = { + 'running': True, + 'enabled': True + } + ITEM_TYPE_NAME = "svc_openbsd" + + def __repr__(self): + return "".format( + self.name, + self.attributes['running'], + self.attributes['enabled'], + ) + + def fix(self, status): + if 'enabled' in status.keys_to_fix: + if self.attributes['enabled'] is False: + svc_disable(self.node, self.name) + else: + svc_enable(self.node, self.name) + + if self.attributes['running'] is False: + svc_stop(self.node, self.name) + else: + svc_start(self.node, self.name) + + def get_canned_actions(self): + return { + 'restart': { + 'command': "/etc/rc.d/{} restart".format(self.name), + 'needs': [self.id], + }, + 'stopstart': { + 'command': "/etc/rc.d/{0} stop && /etc/rc.d/{0} start".format(self.name), + 'needs': [self.id], + }, + } + + def sdict(self): + return { + 'enabled': svc_enabled(self.node, self.name), + 'running': svc_running(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('running', True), bool): + raise BundleError(_( + "expected boolean for 'running' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/svc_systemd.py b/bundlewrap/items/svc_systemd.py new file mode 100644 index 0000000..c627997 --- /dev/null +++ b/bundlewrap/items/svc_systemd.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def svc_start(node, svcname): + return node.run("systemctl start -- {}".format(quote(svcname))) + + +def svc_running(node, svcname): + result = node.run( + "systemctl status -- {}".format(quote(svcname)), + may_fail=True, + ) + return result.return_code == 0 + + +def svc_stop(node, svcname): + return node.run("systemctl stop -- {}".format(quote(svcname))) + + +def svc_enable(node, svcname): + return node.run("systemctl enable -- {}".format(quote(svcname))) + + +def svc_enabled(node, svcname): + result = node.run( + "systemctl is-enabled -- {}".format(quote(svcname)), + may_fail=True, + ) + return result.return_code == 0 + + +def svc_disable(node, svcname): + return node.run("systemctl disable -- {}".format(quote(svcname))) + + +class SvcSystemd(Item): + """ + A service managed by systemd. + """ + BUNDLE_ATTRIBUTE_NAME = "svc_systemd" + ITEM_ATTRIBUTES = { + 'enabled': None, + 'running': True, + } + ITEM_TYPE_NAME = "svc_systemd" + + def __repr__(self): + return "".format( + self.name, + self.attributes['enabled'], + self.attributes['running'], + ) + + # Note for bw 3.0: We're planning to make "True" the default value + # for "enabled". Once that's done, we can remove this custom cdict. + def cdict(self): + cdict = self.attributes.copy() + if 'enabled' in cdict and cdict['enabled'] is None: + del cdict['enabled'] + return cdict + + def fix(self, status): + if 'enabled' in status.keys_to_fix: + if self.attributes['enabled'] is False: + svc_disable(self.node, self.name) + else: + svc_enable(self.node, self.name) + + if 'running' in status.keys_to_fix: + if self.attributes['running'] is False: + svc_stop(self.node, self.name) + else: + svc_start(self.node, self.name) + + def get_canned_actions(self): + return { + 'reload': { + 'command': "systemctl reload -- {}".format(self.name), + 'needs': [self.id], + }, + 'restart': { + 'command': "systemctl restart -- {}".format(self.name), + 'needs': [self.id], + }, + } + + def sdict(self): + return { + 'enabled': svc_enabled(self.node, self.name), + 'running': svc_running(self.node, self.name), + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + for attribute in ('enabled', 'running'): + if not isinstance(attributes.get(attribute, True), bool): + raise BundleError(_( + "expected boolean for '{attribute}' on {item} in bundle '{bundle}'" + ).format( + attribute=attribute, + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/svc_systemv.py b/bundlewrap/items/svc_systemv.py new file mode 100644 index 0000000..55f2b00 --- /dev/null +++ b/bundlewrap/items/svc_systemv.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def svc_start(node, svcname): + return node.run("/etc/init.d/{} start".format(quote(svcname))) + + +def svc_running(node, svcname): + result = node.run( + "/etc/init.d/{} status".format(quote(svcname)), + may_fail=True, + ) + return result.return_code == 0 + + +def svc_stop(node, svcname): + return node.run("/etc/init.d/{} stop".format(quote(svcname))) + + +class SvcSystemV(Item): + """ + A service managed by traditional System V init scripts. + """ + BUNDLE_ATTRIBUTE_NAME = "svc_systemv" + ITEM_ATTRIBUTES = { + 'running': True, + } + ITEM_TYPE_NAME = "svc_systemv" + + def __repr__(self): + return "".format( + self.name, + self.attributes['running'], + ) + + def fix(self, status): + if self.attributes['running'] is False: + svc_stop(self.node, self.name) + else: + svc_start(self.node, self.name) + + def get_canned_actions(self): + return { + 'reload': { + 'command': "/etc/init.d/{} reload".format(self.name), + 'needs': [self.id], + }, + 'restart': { + 'command': "/etc/init.d/{} restart".format(self.name), + 'needs': [self.id], + }, + } + + def sdict(self): + return {'running': svc_running(self.node, self.name)} + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('running', True), bool): + raise BundleError(_( + "expected boolean for 'running' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/svc_upstart.py b/bundlewrap/items/svc_upstart.py new file mode 100644 index 0000000..cac13c9 --- /dev/null +++ b/bundlewrap/items/svc_upstart.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.text import mark_for_translation as _ + + +def svc_start(node, svcname): + return node.run("initctl start --no-wait -- {}".format(quote(svcname))) + + +def svc_running(node, svcname): + result = node.run("initctl status -- {}".format(quote(svcname))) + return " start/" in result.stdout_text + + +def svc_stop(node, svcname): + return node.run("initctl stop --no-wait -- {}".format(quote(svcname))) + + +class SvcUpstart(Item): + """ + A service managed by Upstart. + """ + BUNDLE_ATTRIBUTE_NAME = "svc_upstart" + ITEM_ATTRIBUTES = { + 'running': True, + } + ITEM_TYPE_NAME = "svc_upstart" + + def __repr__(self): + return "".format( + self.name, + self.attributes['running'], + ) + + def fix(self, status): + if self.attributes['running'] is False: + svc_stop(self.node, self.name) + else: + svc_start(self.node, self.name) + + def get_canned_actions(self): + return { + 'reload': { + 'command': "reload {}".format(self.name), + 'needs': [self.id], + }, + 'restart': { + 'command': "restart {}".format(self.name), + 'needs': [self.id], + }, + 'stopstart': { + 'command': "stop {0} && start {0}".format(self.name), + 'needs': [self.id], + }, + } + + def sdict(self): + return {'running': svc_running(self.node, self.name)} + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if not isinstance(attributes.get('running', True), bool): + raise BundleError(_( + "expected boolean for 'running' on {item} in bundle '{bundle}'" + ).format( + bundle=bundle.name, + item=item_id, + )) diff --git a/bundlewrap/items/symlinks.py b/bundlewrap/items/symlinks.py new file mode 100644 index 0000000..932f6d9 --- /dev/null +++ b/bundlewrap/items/symlinks.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from collections import defaultdict +from os.path import dirname, normpath +from pipes import quote + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import Item +from bundlewrap.utils.remote import PathInfo +from bundlewrap.utils.text import mark_for_translation as _ +from bundlewrap.utils.text import is_subdirectory + + +ATTRIBUTE_VALIDATORS = defaultdict(lambda: lambda id, value: None) + + +class Symlink(Item): + """ + A symbolic link. + """ + BUNDLE_ATTRIBUTE_NAME = "symlinks" + ITEM_ATTRIBUTES = { + 'group': None, + 'owner': None, + 'target': None, + } + ITEM_TYPE_NAME = "symlink" + REQUIRED_ATTRIBUTES = ['target'] + + def __repr__(self): + return "".format( + quote(self.name), + self.attributes['target'], + ) + + def cdict(self): + cdict = { + 'target': self.attributes['target'], + 'type': 'symlink', + } + for optional_attr in ('group', 'owner'): + if self.attributes[optional_attr] is not None: + cdict[optional_attr] = self.attributes[optional_attr] + return cdict + + def fix(self, status): + if status.must_be_created or 'type' in status.keys_to_fix: + # fixing the type fixes everything + self._fix_type(status) + return + + for fix_type in ('owner', 'group', 'target'): + if fix_type in status.keys_to_fix: + if fix_type == 'group' and 'owner' in status.keys_to_fix: + # owner and group are fixed with a single chown + continue + getattr(self, "_fix_" + fix_type)(status) + + def _fix_owner(self, status): + group = self.attributes['group'] or "" + if group: + group = ":" + quote(group) + self.node.run("chown -h {}{} -- {}".format( + quote(self.attributes['owner'] or ""), + group, + quote(self.name), + )) + _fix_group = _fix_owner + + def _fix_target(self, status): + self.node.run("ln -sf -- {} {}".format( + quote(self.attributes['target']), + quote(self.name), + )) + + def _fix_type(self, status): + self.node.run("rm -rf -- {}".format(quote(self.name))) + self.node.run("mkdir -p -- {}".format(quote(dirname(self.name)))) + self.node.run("ln -s -- {} {}".format( + quote(self.attributes['target']), + quote(self.name), + )) + if self.attributes['owner'] or self.attributes['group']: + self._fix_owner(status) + + def get_auto_deps(self, items): + deps = [] + for item in items: + if item == self: + continue + if item.ITEM_TYPE_NAME == "file" and ( + is_subdirectory(item.name, self.name) or + item.name == self.name + ): + raise BundleError(_( + "{item1} (from bundle '{bundle1}') blocking path to " + "{item2} (from bundle '{bundle2}')" + ).format( + item1=item.id, + bundle1=item.bundle.name, + item2=self.id, + bundle2=self.bundle.name, + )) + elif item.ITEM_TYPE_NAME == "user" and item.name == self.attributes['owner']: + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + elif item.ITEM_TYPE_NAME == "group" and item.name == self.attributes['group']: + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + elif item.ITEM_TYPE_NAME in ("directory", "symlink"): + if is_subdirectory(item.name, self.name): + deps.append(item.id) + return deps + + def sdict(self): + path_info = PathInfo(self.node, self.name) + if not path_info.exists: + return None + else: + return { + 'target': path_info.symlink_target if path_info.path_type == 'symlink' else "", + 'type': path_info.path_type, + 'owner': path_info.owner, + 'group': path_info.group, + } + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + for key, value in attributes.items(): + ATTRIBUTE_VALIDATORS[key](item_id, value) + + @classmethod + def validate_name(cls, bundle, name): + if normpath(name) == "/": + raise BundleError(_("'/' cannot be a file")) + if normpath(name) != name: + raise BundleError(_( + "'{path}' is an invalid symlink path, should be '{normpath}' (bundle '{bundle}')" + ).format( + path=name, + normpath=normpath(name), + bundle=bundle.name, + )) diff --git a/bundlewrap/items/users.py b/bundlewrap/items/users.py new file mode 100644 index 0000000..383dc6c --- /dev/null +++ b/bundlewrap/items/users.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from logging import ERROR, getLogger +from pipes import quote +from string import ascii_lowercase, digits + +from passlib.hash import bcrypt, md5_crypt, sha256_crypt, sha512_crypt + +from bundlewrap.exceptions import BundleError +from bundlewrap.items import BUILTIN_ITEM_ATTRIBUTES, Item +from bundlewrap.utils.text import force_text, mark_for_translation as _ + + +getLogger('passlib').setLevel(ERROR) + +_ATTRIBUTE_NAMES = { + 'full_name': _("full name"), + 'gid': _("GID"), + 'groups': _("groups"), + 'home': _("home dir"), + 'password_hash': _("password hash"), + 'shell': _("shell"), + 'uid': _("UID"), +} + +_ATTRIBUTE_OPTIONS = { + 'full_name': "-c", + 'gid': "-g", + 'groups': "-G", + 'home': "-d", + 'password_hash': "-p", + 'shell': "-s", + 'uid': "-u", +} + +# a random static salt if users don't provide one +_DEFAULT_SALT = "uJzJlYdG" + +# bcrypt needs special salts. 22 characters long, ending in ".", "O", "e", "u" +# see https://bitbucket.org/ecollins/passlib/issues/25 +_DEFAULT_BCRYPT_SALT = "oo2ahgheen9Tei0IeJohTO" + +HASH_METHODS = { + 'md5': md5_crypt, + 'sha256': sha256_crypt, + 'sha512': sha512_crypt, + 'bcrypt': bcrypt +} + +_USERNAME_VALID_CHARACTERS = ascii_lowercase + digits + "-_" + + +def _group_name_for_gid(node, gid): + """ + Returns the group name that matches the gid. + """ + group_output = node.run("grep -e ':{}:[^:]*$' /etc/group".format(gid), may_fail=True) + if group_output.return_code != 0: + return None + else: + return group_output.stdout_text.split(":")[0] + + +def _groups_for_user(node, username): + """ + Returns the list of group names for the given username on the given + node. + """ + groups = node.run("id -Gn {}".format(username)).stdout_text.strip().split(" ") + primary_group = node.run("id -gn {}".format(username)).stdout_text.strip() + groups.remove(primary_group) + return groups + + +def _parse_passwd_line(line, entries): + """ + Parses a line from /etc/passwd and returns the information as a + dictionary. + """ + + result = dict(zip( + entries, + line.strip().split(":"), + )) + result['full_name'] = result['gecos'].split(",")[0] + return result + + +class User(Item): + """ + A user account. + """ + BUNDLE_ATTRIBUTE_NAME = "users" + ITEM_ATTRIBUTES = { + 'delete': False, + 'full_name': None, + 'gid': None, + 'groups': None, + 'hash_method': 'sha512', + 'home': None, + 'password': None, + 'password_hash': None, + 'salt': None, + 'shell': None, + 'uid': None, + 'use_shadow': None, + } + ITEM_TYPE_NAME = "user" + + def __repr__(self): + return "".format(self.name) + + def cdict(self): + if self.attributes['delete']: + return None + cdict = self.attributes.copy() + del cdict['delete'] + del cdict['hash_method'] + del cdict['password'] + del cdict['salt'] + del cdict['use_shadow'] + for key in list(cdict.keys()): + if cdict[key] is None: + del cdict[key] + if 'groups' in cdict: + cdict['groups'] = set(cdict['groups']) + return cdict + + def fix(self, status): + if status.must_be_deleted: + self.node.run("userdel {}".format(self.name), may_fail=True) + else: + command = "useradd " if status.must_be_created else "usermod " + for attr, option in sorted(_ATTRIBUTE_OPTIONS.items()): + if (attr in status.keys_to_fix or status.must_be_created) and \ + self.attributes[attr] is not None: + if attr == 'groups': + value = ",".join(self.attributes[attr]) + else: + value = str(self.attributes[attr]) + command += "{} {} ".format(option, quote(value)) + command += self.name + self.node.run(command, may_fail=True) + + def get_auto_deps(self, items): + deps = [] + for item in items: + if item.ITEM_TYPE_NAME == "group": + if item.attributes['delete']: + raise BundleError(_( + "{item1} (from bundle '{bundle1}') depends on item " + "{item2} (from bundle '{bundle2}') which is set to be deleted" + ).format( + item1=self.id, + bundle1=self.bundle.name, + item2=item.id, + bundle2=item.bundle.name, + )) + else: + deps.append(item.id) + return deps + + def sdict(self): + # verify content of /etc/passwd + if self.node.os in self.node.OS_FAMILY_BSD: + password_command = "grep -ae '^{}:' /etc/master.passwd" + else: + password_command = "grep -ae '^{}:' /etc/passwd" + passwd_grep_result = self.node.run( + password_command.format(self.name), + may_fail=True, + ) + if passwd_grep_result.return_code != 0: + return None + + if self.node.os in self.node.OS_FAMILY_BSD: + entries = ( + 'username', + 'passwd_hash', + 'uid', + 'gid', + 'class', + 'change', + 'expire', + 'gecos', + 'home', + 'shell', + ) + else: + entries = ('username', 'passwd_hash', 'uid', 'gid', 'gecos', 'home', 'shell') + + sdict = _parse_passwd_line(passwd_grep_result.stdout_text, entries) + + if self.attributes['gid'] is not None and not self.attributes['gid'].isdigit(): + sdict['gid'] = _group_name_for_gid(self.node, sdict['gid']) + + if self.attributes['password_hash'] is not None: + if self.attributes['use_shadow'] and self.node.os not in self.node.OS_FAMILY_BSD: + # verify content of /etc/shadow unless we are on OpenBSD + shadow_grep_result = self.node.run( + "grep -e '^{}:' /etc/shadow".format(self.name), + may_fail=True, + ) + if shadow_grep_result.return_code != 0: + sdict['password_hash'] = None + else: + sdict['password_hash'] = shadow_grep_result.stdout_text.split(":")[1] + else: + sdict['password_hash'] = sdict['passwd_hash'] + del sdict['passwd_hash'] + + # verify content of /etc/group + sdict['groups'] = set(_groups_for_user(self.node, self.name)) + + return sdict + + def patch_attributes(self, attributes): + if attributes.get('password', None) is not None: + # defaults aren't set yet + hash_method = HASH_METHODS[attributes.get( + 'hash_method', + self.ITEM_ATTRIBUTES['hash_method'], + )] + salt = attributes.get('salt', None) + if self.node.os in self.node.OS_FAMILY_BSD: + attributes['password_hash'] = bcrypt.encrypt( + force_text(attributes['password']), + rounds=8, # default rounds for OpenBSD accounts + salt=_DEFAULT_BCRYPT_SALT if salt is None else salt, + ) + else: + attributes['password_hash'] = hash_method.encrypt( + force_text(attributes['password']), + rounds=5000, # default from glibc + salt=_DEFAULT_SALT if salt is None else salt, + ) + + if 'use_shadow' not in attributes: + attributes['use_shadow'] = self.node.use_shadow_passwords + + for attr in ('gid', 'uid'): + if isinstance(attributes.get(attr), int): + attributes[attr] = str(attributes[attr]) + + return attributes + + @classmethod + def validate_attributes(cls, bundle, item_id, attributes): + if attributes.get('delete', False): + for attr in attributes.keys(): + if attr not in ['delete'] + list(BUILTIN_ITEM_ATTRIBUTES.keys()): + raise BundleError(_( + "{item} from bundle '{bundle}' cannot have other " + "attributes besides 'delete'" + ).format(item=item_id, bundle=bundle.name)) + + if 'hash_method' in attributes and \ + attributes['hash_method'] not in HASH_METHODS: + raise BundleError( + _("Invalid hash method for {item} in bundle '{bundle}': '{method}'").format( + bundle=bundle.name, + item=item_id, + method=attributes['hash_method'], + ) + ) + + if 'password_hash' in attributes and ( + 'password' in attributes or + 'salt' in attributes + ): + raise BundleError(_( + "{item} in bundle '{bundle}': 'password_hash' " + "cannot be used with 'password' or 'salt'" + ).format(bundle=bundle.name, item=item_id)) + + if 'salt' in attributes and 'password' not in attributes: + raise BundleError( + _("{}: salt given without a password").format(item_id) + ) + + @classmethod + def validate_name(cls, bundle, name): + for char in name: + if char not in _USERNAME_VALID_CHARACTERS: + raise BundleError(_( + "Invalid character in username '{user}': {char} (bundle '{bundle}')" + ).format(bundle=bundle.name, char=char, user=name)) + + if name.endswith("_") or name.endswith("-"): + raise BundleError(_( + "Username '{user}' must not end in dash or underscore (bundle '{bundle}')" + ).format(bundle=bundle.name, user=name)) + + if len(name) > 30: + raise BundleError(_( + "Username '{user}' is longer than 30 characters (bundle '{bundle}')" + ).format(bundle=bundle.name, user=name)) diff --git a/bundlewrap/lock.py b/bundlewrap/lock.py new file mode 100644 index 0000000..43f47da --- /dev/null +++ b/bundlewrap/lock.py @@ -0,0 +1,198 @@ +from datetime import datetime +from getpass import getuser +import json +from os import environ +from pipes import quote +from socket import gethostname +from time import time + +from .exceptions import NodeLockedException +from .utils import cached_property, tempfile +from .utils.text import blue, bold, mark_for_translation as _, red, wrap_question +from .utils.time import format_duration, format_timestamp, parse_duration +from .utils.ui import io + + +HARD_LOCK_PATH = "/tmp/bundlewrap.lock" +HARD_LOCK_FILE = HARD_LOCK_PATH + "/info" +SOFT_LOCK_PATH = "/tmp/bundlewrap.softlock.d" +SOFT_LOCK_FILE = "/tmp/bundlewrap.softlock.d/{id}" + + +def identity(): + return environ.get('BW_IDENTITY', "{}@{}".format( + getuser(), + gethostname(), + )) + + +class NodeLock(object): + def __init__(self, node, interactive=False, ignore=False): + self.node = node + self.ignore = ignore + self.interactive = interactive + + def __enter__(self): + with tempfile() as local_path: + if not self.ignore: + with io.job(_(" {node} checking hard lock status...").format(node=self.node.name)): + result = self.node.run("mkdir " + quote(HARD_LOCK_PATH), may_fail=True) + if result.return_code != 0: + self.node.download(HARD_LOCK_FILE, local_path, ignore_failure=True) + with open(local_path, 'r') as f: + try: + info = json.loads(f.read()) + except: + io.stderr(_( + "{warning} corrupted lock on {node}: " + "unable to read or parse lock file contents " + "(clear it with `bw run {node} 'rm -R {path}'`)" + ).format( + node=self.node.name, + path=HARD_LOCK_FILE, + warning=red(_("WARNING")), + )) + info = {} + expired = False + try: + d = info['date'] + except KeyError: + info['date'] = _("") + info['duration'] = _("") + else: + duration = datetime.now() - datetime.fromtimestamp(d) + info['date'] = format_timestamp(d) + info['duration'] = format_duration(duration) + if duration > parse_duration(environ.get('BW_HARDLOCK_EXPIRY', "8h")): + expired = True + io.debug("ignoring expired hard lock on {}".format(self.node.name)) + if 'user' not in info: + info['user'] = _("") + if expired or self.ignore or (self.interactive and io.ask( + self._warning_message_hard(info), + False, + epilogue=blue("?") + " " + bold(self.node.name), + )): + pass + else: + raise NodeLockedException(info) + + with io.job(_(" {node} uploading lock file...").format(node=self.node.name)): + if self.ignore: + self.node.run("mkdir -p " + quote(HARD_LOCK_PATH)) + with open(local_path, 'w') as f: + f.write(json.dumps({ + 'date': time(), + 'user': identity(), + })) + self.node.upload(local_path, HARD_LOCK_FILE) + + return self + + def __exit__(self, type, value, traceback): + with io.job(_(" {node} removing hard lock...").format(node=self.node.name)): + result = self.node.run("rm -R {}".format(quote(HARD_LOCK_PATH)), may_fail=True) + + if result.return_code != 0: + io.stderr(_("{x} {node} could not release hard lock").format( + node=bold(self.node.name), + x=red("!"), + )) + + def _warning_message_hard(self, info): + return wrap_question( + red(_("NODE LOCKED")), + _( + "Looks like somebody is currently using BundleWrap on this node.\n" + "You should let them finish or override the lock if it has gone stale.\n" + "\n" + "locked by {user}\n" + " since {date} ({duration} ago)" + ).format( + user=bold(info['user']), + date=info['date'], + duration=info['duration'], + ), + bold(_("Override lock?")), + prefix="{x} {node} ".format(node=bold(self.node.name), x=blue("?")), + ) + + @cached_property + def soft_locks(self): + return softlock_list(self.node) + + @cached_property + def my_soft_locks(self): + for lock in self.soft_locks: + if lock['user'] == identity(): + yield lock + + @cached_property + def other_peoples_soft_locks(self): + for lock in self.soft_locks: + if lock['user'] != identity(): + yield lock + + +def softlock_add(node, lock_id, comment="", expiry="8h", item_selectors=None): + if "\n" in comment: + raise ValueError(_("Lock comments must not contain any newlines")) + if not item_selectors: + item_selectors = ["*"] + + expiry_timedelta = parse_duration(expiry) + now = time() + expiry_timestamp = now + expiry_timedelta.days * 86400 + expiry_timedelta.seconds + + content = json.dumps({ + 'comment': comment, + 'date': now, + 'expiry': expiry_timestamp, + 'id': lock_id, + 'items': item_selectors, + 'user': identity(), + }, indent=None, sort_keys=True) + + with tempfile() as local_path: + with open(local_path, 'w') as f: + f.write(content + "\n") + node.run("mkdir -p " + quote(SOFT_LOCK_PATH)) + node.upload(local_path, SOFT_LOCK_FILE.format(id=lock_id), mode='0644') + + return lock_id + + +def softlock_list(node): + with io.job(_(" {} checking soft locks...").format(node.name)): + cat = node.run("cat {}".format(SOFT_LOCK_FILE.format(id="*")), may_fail=True) + if cat.return_code != 0: + return [] + result = [] + for line in cat.stdout.decode('utf-8').strip().split("\n"): + try: + result.append(json.loads(line.strip())) + except json.decoder.JSONDecodeError: + io.stderr(_( + "{x} {node} unable to parse soft lock file contents, ignoring: {line}" + ).format( + x=red("!"), + node=bold(node.name), + line=line.strip(), + )) + for lock in result[:]: + if lock['expiry'] < time(): + io.debug(_("removing expired soft lock {id} from node {node}").format( + id=lock['id'], + node=node.name, + )) + softlock_remove(node, lock['id']) + result.remove(lock) + return result + + +def softlock_remove(node, lock_id): + io.debug(_("removing soft lock {id} from node {node}").format( + id=lock_id, + node=node.name, + )) + node.run("rm {}".format(SOFT_LOCK_FILE.format(id=lock_id))) diff --git a/bundlewrap/metadata.py b/bundlewrap/metadata.py new file mode 100644 index 0000000..10005cc --- /dev/null +++ b/bundlewrap/metadata.py @@ -0,0 +1,268 @@ +from copy import copy +from hashlib import sha1 +from json import dumps, JSONEncoder + +from .exceptions import RepositoryError +from .utils import ATOMIC_TYPES, Fault, merge_dict +from .utils.text import force_text, mark_for_translation as _ + + +try: + text_type = unicode + byte_type = str +except NameError: + text_type = str + byte_type = bytes + +METADATA_TYPES = ( + bool, + byte_type, + Fault, + int, + text_type, + type(None), +) + + +def atomic(obj): + try: + cls = ATOMIC_TYPES[type(obj)] + except KeyError: + raise ValueError("atomic() can only be applied to dicts, lists, sets, or tuples " + "(not: {})".format(repr(obj))) + else: + return cls(obj) + + +def check_for_unsolvable_metadata_key_conflicts(node): + """ + Finds metadata keys defined by two groups that are not part of a + shared subgroup hierarchy. + """ + # First, we build a list of subgroup chains. + # + # A chain is simply a list of groups starting with a parent group + # that has no parent groups itself and then descends depth-first + # into its subgroups until a subgroup is reached that the node is + # not a member of. + # Every possible path on every subgroup tree is a separate chain. + # + # group4 + # / \ + # group2 group3 + # \ / + # group1 + # + # This example has two chains, even though both start and end at the + # some groups: + # + # group1 -> group2 -> group4 + # group1 -> group3 -> group4 + # + + # find all groups whose subgroups this node is *not* a member of + lowest_subgroups = set() + for group in node.groups: + in_subgroup = False + for subgroup in group.subgroups: + if subgroup in node.groups: + in_subgroup = True + break + if not in_subgroup: + lowest_subgroups.add(group) + + chains = [] + incomplete_chains = [[group] for group in lowest_subgroups] + + while incomplete_chains: + for chain in incomplete_chains[:]: + highest_group = chain[-1] + if list(highest_group.parent_groups): + chain_so_far = chain[:] + # continue this chain with the first parent group + chain.append(list(highest_group.parent_groups)[0]) + # further parent groups form new chains + for further_parents in list(highest_group.parent_groups)[1:]: + new_chain = chain_so_far[:] + new_chain.append(further_parents) + incomplete_chains.append(new_chain) + else: + # chain has ended + chains.append(chain) + incomplete_chains.remove(chain) + + # chains now look like this (parents right of children): + # [ + # [group1], + # [group2, group3, group5], + # [group2, group4, group5], + # [group2, group4, group6, group7], + # ] + + # let's merge metadata for each chain + chain_metadata = [] + for chain in chains: + metadata = {} + for group in chain: + metadata = merge_dict(metadata, group.metadata) + chain_metadata.append(metadata) + + # create a "key path map" for each chain's metadata + chain_metadata_keys = [list(dictionary_key_map(metadata)) for metadata in chain_metadata] + + # compare all metadata keys with other chains and find matches + for index1, keymap1 in enumerate(chain_metadata_keys): + for keypath in keymap1: + for index2, keymap2 in enumerate(chain_metadata_keys): + if index1 == index2: + # same keymap, don't compare + continue + else: + if keypath in keymap2: + if ( + type(value_at_key_path(chain_metadata[index1], keypath)) == + type(value_at_key_path(chain_metadata[index2], keypath)) and + type(value_at_key_path(chain_metadata[index2], keypath)) in + (set, dict) + ): + continue + # We now know that there is a conflict between the first + # and second chain we're looking at right now. + # That is however not a problem if the conflict is caused + # by a group that is present in both chains. + # So all that's left is to figure out which two single groups + # within those chains are at fault so we can report them + # to the user if necessary. + find_groups_causing_metadata_conflict( + node.name, + chains[index1], + chains[index2], + keypath, + ) + + +def deepcopy_metadata(obj): + """ + Our own version of deepcopy.copy that doesn't pickle and ensures + a limited range of types is used in metadata. + """ + if isinstance(obj, dict): + new_obj = {} + for key, value in obj.items(): + if not isinstance(key, METADATA_TYPES): + raise ValueError(_("illegal metadata key type: {}").format(repr(key))) + new_key = copy(key) + new_obj[new_key] = deepcopy_metadata(value) + elif isinstance(obj, (list, tuple)): + new_obj = [] + for member in obj: + new_obj.append(deepcopy_metadata(member)) + elif isinstance(obj, set): + new_obj = set() + for member in obj: + new_obj.add(deepcopy_metadata(member)) + elif isinstance(obj, METADATA_TYPES): + return obj + else: + raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) + return new_obj + + +def dictionary_key_map(mapdict): + """ + For the dict + + { + "key1": 1, + "key2": { + "key3": 3, + "key4": ["foo"], + }, + } + + the key map would look like this: + + [ + ("key1",), + ("key2",), + ("key2", "key3"), + ("key2", "key4"), + ] + + """ + for key, value in mapdict.items(): + if isinstance(value, dict): + for child_keys in dictionary_key_map(value): + yield (key,) + child_keys + yield (key,) + + +def find_groups_causing_metadata_conflict(node_name, chain1, chain2, keypath): + """ + Given two chains (lists of groups), find one group in each chain + that has conflicting metadata with the other for the given key path. + """ + chain1_metadata = [list(dictionary_key_map(group.metadata)) for group in chain1] + chain2_metadata = [list(dictionary_key_map(group.metadata)) for group in chain2] + + bad_keypath = None + + for index1, keymap1 in enumerate(chain1_metadata): + for index2, keymap2 in enumerate(chain2_metadata): + if chain1[index1] == chain2[index2]: + # same group, ignore + continue + if ( + keypath in keymap1 and + keypath in keymap2 and + chain1[index1] not in chain2[index2].subgroups and + chain2[index2] not in chain1[index1].subgroups + ): + bad_keypath = keypath + bad_group1 = chain1[index1] + bad_group2 = chain2[index2] + + if bad_keypath is not None: + raise RepositoryError(_( + "Conflicting metadata keys between groups '{group1}' and '{group2}' on node '{node}':\n\n" + " metadata['{keypath}']\n\n" + "You must either connect both groups through subgroups or have them not define " + "conflicting metadata keys. Otherwise there is no way for BundleWrap to determine " + "which group's metadata should win when they are merged." + ).format( + keypath="']['".join(bad_keypath), + group1=bad_group1.name, + group2=bad_group2.name, + node=node_name, + )) + + +class MetadataJSONEncoder(JSONEncoder): + def default(self, obj): + if isinstance(obj, Fault): + return obj.value + if isinstance(obj, set): + return sorted(obj) + if isinstance(obj, bytes): + return force_text(obj) + else: + raise ValueError(_("illegal metadata value type: {}").format(repr(obj))) + + +def hash_metadata(sdict): + """ + Returns a canonical SHA1 hash to describe this dict. + """ + return sha1(dumps( + sdict, + cls=MetadataJSONEncoder, + indent=None, + sort_keys=True, + ).encode('utf-8')).hexdigest() + + +def value_at_key_path(dict_obj, path): + if not path: + return dict_obj + else: + return value_at_key_path(dict_obj[path[0]], path[1:]) diff --git a/bundlewrap/node.py b/bundlewrap/node.py new file mode 100644 index 0000000..a9a816d --- /dev/null +++ b/bundlewrap/node.py @@ -0,0 +1,928 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from datetime import datetime, timedelta +from os import environ +from sys import exit +from threading import Lock + +from . import operations +from .bundle import Bundle +from .concurrency import WorkerPool +from .deps import ( + DummyItem, + find_item, +) +from .exceptions import ( + DontCache, + FaultUnavailable, + ItemDependencyError, + NodeLockedException, + NoSuchBundle, + RepositoryError, +) +from .group import GROUP_ATTR_DEFAULTS +from .itemqueue import ItemQueue, ItemTestQueue +from .items import Item +from .lock import NodeLock +from .metadata import check_for_unsolvable_metadata_key_conflicts, hash_metadata +from .utils import cached_property, graph_for_items, names +from .utils.statedict import hash_statedict +from .utils.text import blue, bold, cyan, green, red, validate_name, yellow +from .utils.text import force_text, mark_for_translation as _ +from .utils.ui import io + + +class ApplyResult(object): + """ + Holds information about an apply run for a node. + """ + def __init__(self, node, item_results): + self.node_name = node.name + self.correct = 0 + self.fixed = 0 + self.skipped = 0 + self.failed = 0 + self.profiling_info = [] + + for item_id, result, time_elapsed in item_results: + self.profiling_info.append((time_elapsed, item_id)) + if result == Item.STATUS_ACTION_SUCCEEDED: + self.correct += 1 + elif result == Item.STATUS_OK: + self.correct += 1 + elif result == Item.STATUS_FIXED: + self.fixed += 1 + elif result == Item.STATUS_SKIPPED: + self.skipped += 1 + elif result == Item.STATUS_FAILED: + self.failed += 1 + else: + raise RuntimeError(_( + "can't make sense of results for {} on {}: {}" + ).format(item_id, self.node_name, result)) + + self.profiling_info.sort() + self.profiling_info.reverse() + + self.start = None + self.end = None + + @property + def duration(self): + return self.end - self.start + + +def format_node_result(result): + output = [] + output.append(("{count} OK").format(count=result.correct)) + + if result.fixed: + output.append(green(_("{count} fixed").format(count=result.fixed))) + else: + output.append(_("{count} fixed").format(count=result.fixed)) + + if result.skipped: + output.append(yellow(_("{count} skipped").format(count=result.skipped))) + else: + output.append(_("{count} skipped").format(count=result.skipped)) + + if result.failed: + output.append(red(_("{count} failed").format(count=result.failed))) + else: + output.append(_("{count} failed").format(count=result.failed)) + + return ", ".join(output) + + +def handle_apply_result(node, item, status_code, interactive, changes=None): + formatted_result = format_item_result( + status_code, + node.name, + item.bundle.name if item.bundle else "", # dummy items don't have bundles + item.id, + interactive=interactive, + changes=changes, + ) + if formatted_result is not None: + if status_code == Item.STATUS_FAILED: + io.stderr(formatted_result) + else: + io.stdout(formatted_result) + + +def apply_items( + node, + autoskip_selector="", + my_soft_locks=(), + other_peoples_soft_locks=(), + workers=1, + interactive=False, + profiling=False, +): + with io.job(_(" {node} processing dependencies...").format(node=node.name)): + item_queue = ItemQueue(node.items) + + results = [] + + def tasks_available(): + return bool(item_queue.items_without_deps) + + def next_task(): + item, skipped_items = item_queue.pop() + for skipped_item in skipped_items: + handle_apply_result( + node, + skipped_item, + Item.STATUS_SKIPPED, + interactive, + changes=[_("no pre-trigger")], + ) + results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) + + return { + 'task_id': "{}:{}".format(node.name, item.id), + 'target': item.apply, + 'kwargs': { + 'autoskip_selector': autoskip_selector, + 'my_soft_locks': my_soft_locks, + 'other_peoples_soft_locks': other_peoples_soft_locks, + 'interactive': interactive, + }, + } + + def handle_result(task_id, return_value, duration): + item_id = task_id.split(":", 1)[1] + item = find_item(item_id, item_queue.pending_items) + + status_code, changes = return_value + + if status_code == Item.STATUS_FAILED: + for skipped_item in item_queue.item_failed(item): + handle_apply_result( + node, + skipped_item, + Item.STATUS_SKIPPED, + interactive, + changes=[_("dep failed")], + ) + results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) + elif status_code in (Item.STATUS_FIXED, Item.STATUS_ACTION_SUCCEEDED): + item_queue.item_fixed(item) + elif status_code == Item.STATUS_OK: + item_queue.item_ok(item) + elif status_code == Item.STATUS_SKIPPED: + for skipped_item in item_queue.item_skipped(item): + skipped_reason = [_("dep skipped")] + for lock in other_peoples_soft_locks: + for selector in lock['items']: + if skipped_item.covered_by_autoskip_selector(selector): + skipped_reason = [_("soft locked")] + break + handle_apply_result( + node, + skipped_item, + Item.STATUS_SKIPPED, + interactive, + changes=skipped_reason, + ) + results.append((skipped_item.id, Item.STATUS_SKIPPED, timedelta(0))) + else: + raise AssertionError(_( + "unknown item status returned for {item}: {status}".format( + item=item.id, + status=repr(status_code), + ), + )) + + handle_apply_result(node, item, status_code, interactive, changes=changes) + if not isinstance(item, DummyItem): + results.append((item.id, status_code, duration)) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_result=handle_result, + pool_id="apply_{}".format(node.name), + workers=workers, + ) + worker_pool.run() + + # we have no items without deps left and none are processing + # there must be a loop + if item_queue.items_with_deps: + io.debug(_( + "There was a dependency problem. Look at the debug.svg generated " + "by the following command and try to find a loop:\n" + "printf '{}' | dot -Tsvg -odebug.svg" + ).format("\\n".join(graph_for_items(node.name, item_queue.items_with_deps)))) + + raise ItemDependencyError( + _("bad dependencies between these items: {}").format( + ", ".join([i.id for i in item_queue.items_with_deps]), + ) + ) + + return results + + +def _flatten_group_hierarchy(groups): + """ + Takes a list of groups and returns a list of group names ordered so + that parent groups will appear before any of their subgroups. + """ + # dict mapping groups to subgroups + child_groups = {} + for group in groups: + child_groups[group.name] = list(names(group.subgroups)) + + # dict mapping groups to parent groups + parent_groups = {} + for child_group in child_groups.keys(): + parent_groups[child_group] = [] + for parent_group, subgroups in child_groups.items(): + if child_group in subgroups: + parent_groups[child_group].append(parent_group) + + order = [] + + while True: + top_level_group = None + for group, parents in parent_groups.items(): + if parents: + continue + else: + top_level_group = group + break + if not top_level_group: + if parent_groups: + raise RuntimeError( + _("encountered subgroup loop that should have been detected") + ) + else: + break + order.append(top_level_group) + del parent_groups[top_level_group] + for group in parent_groups.keys(): + if top_level_group in parent_groups[group]: + parent_groups[group].remove(top_level_group) + + return order + + +def format_item_result(result, node, bundle, item, interactive=False, changes=None): + if changes is True: + changes_text = "({})".format(_("create")) + elif changes is False: + changes_text = "({})".format(_("remove")) + elif changes is None: + changes_text = "" + else: + changes_text = "({})".format(", ".join(sorted(changes))) + if result == Item.STATUS_FAILED: + return "{x} {node} {bundle} {item} {status} {changes}".format( + bundle=bold(bundle), + changes=changes_text, + item=item, + node=bold(node), + status=red(_("failed")), + x=bold(red("✘")), + ) + elif result == Item.STATUS_ACTION_SUCCEEDED: + return "{x} {node} {bundle} {item} {status}".format( + bundle=bold(bundle), + item=item, + node=bold(node), + status=green(_("succeeded")), + x=bold(green("✓")), + ) + elif result == Item.STATUS_SKIPPED: + return "{x} {node} {bundle} {item} {status} {changes}".format( + bundle=bold(bundle), + changes=changes_text, + item=item, + node=bold(node), + x=bold(yellow("»")), + status=yellow(_("skipped")), + ) + elif result == Item.STATUS_FIXED: + return "{x} {node} {bundle} {item} {status} {changes}".format( + bundle=bold(bundle), + changes=changes_text, + item=item, + node=bold(node), + x=bold(green("✓")), + status=green(_("fixed")), + ) + + +class Node(object): + OS_FAMILY_BSD = ( + 'freebsd', + 'macos', + 'netbsd', + 'openbsd', + ) + OS_FAMILY_DEBIAN = ( + 'debian', + 'ubuntu', + 'raspbian', + ) + OS_FAMILY_REDHAT = ( + 'rhel', + 'centos', + 'fedora', + ) + + OS_FAMILY_LINUX = ( + 'amazonlinux', + 'arch', + 'opensuse', + 'gentoo', + 'linux', + 'oraclelinux', + ) + \ + OS_FAMILY_DEBIAN + \ + OS_FAMILY_REDHAT + + OS_KNOWN = OS_FAMILY_BSD + OS_FAMILY_LINUX + + def __init__(self, name, infodict=None): + if infodict is None: + infodict = {} + + if not validate_name(name): + raise RepositoryError(_("'{}' is not a valid node name").format(name)) + + self.name = name + self._bundles = infodict.get('bundles', []) + self._compiling_metadata = Lock() + self._dynamic_group_lock = Lock() + self._dynamic_groups_resolved = False # None means we're currently doing it + self._metadata_so_far = {} + self._node_metadata = infodict.get('metadata', {}) + self._ssh_conn_established = False + self._ssh_first_conn_lock = Lock() + self.add_ssh_host_keys = False + self.hostname = infodict.get('hostname', self.name) + + for attr in GROUP_ATTR_DEFAULTS: + setattr(self, "_{}".format(attr), infodict.get(attr)) + + def __lt__(self, other): + return self.name < other.name + + def __repr__(self): + return "".format(self.name) + + @cached_property + def bundles(self): + with io.job(_(" {node} loading bundles...").format(node=self.name)): + added_bundles = [] + found_bundles = [] + for group in self.groups: + for bundle_name in group.bundle_names: + found_bundles.append(bundle_name) + + for bundle_name in found_bundles + list(self._bundles): + if bundle_name not in added_bundles: + added_bundles.append(bundle_name) + try: + yield Bundle(self, bundle_name) + except NoSuchBundle: + raise NoSuchBundle(_( + "Node '{node}' wants bundle '{bundle}', but it doesn't exist." + ).format( + bundle=bundle_name, + node=self.name, + )) + + @cached_property + def cdict(self): + node_dict = {} + for item in self.items: + try: + node_dict[item.id] = item.hash() + except AttributeError: # actions have no cdict + pass + return node_dict + + def covered_by_autoskip_selector(self, autoskip_selector): + """ + True if this node should be skipped based on the given selector + string (e.g. "node:foo,group:bar"). + """ + components = [c.strip() for c in autoskip_selector.split(",")] + if "node:{}".format(self.name) in components: + return True + for group in self.groups: + if "group:{}".format(group.name) in components: + return True + return False + + def group_membership_hash(self): + return hash_statedict(sorted(names(self.groups))) + + @cached_property + def groups(self): + _groups = set(self.repo._static_groups_for_node(self)) + # lock to avoid infinite recursion when .members_add/remove + # use stuff like node.in_group() that in turn calls this function + if self._dynamic_group_lock.acquire(False): + cache_result = True + self._dynamic_groups_resolved = None + # first we remove ourselves from all static groups whose + # .members_remove matches us + for group in list(_groups): + if group.members_remove is not None and group.members_remove(self): + try: + _groups.remove(group) + except KeyError: + pass + # now add all groups whose .members_add (but not .members_remove) + # matches us + _groups = _groups.union(self._groups_dynamic) + self._dynamic_groups_resolved = True + self._dynamic_group_lock.release() + else: + cache_result = False + + # we have to add parent groups at the very end, since we might + # have added or removed subgroups thru .members_add/remove + for group in list(_groups): + for parent_group in group.parent_groups: + if cache_result: + with self._dynamic_group_lock: + self._dynamic_groups_resolved = None + if ( + not parent_group.members_remove or + not parent_group.members_remove(self) + ): + _groups.add(parent_group) + self._dynamic_groups_resolved = True + else: + _groups.add(parent_group) + + if cache_result: + return sorted(_groups) + else: + raise DontCache(sorted(_groups)) + + @property + def _groups_dynamic(self): + """ + Returns all groups whose members_add matches this node. + """ + _groups = set() + for group in self.repo.groups: + if group.members_add is not None and group.members_add(self): + _groups.add(group) + if group.members_remove is not None and group.members_remove(self): + try: + _groups.remove(group) + except KeyError: + pass + return _groups + + def has_any_bundle(self, bundle_list): + for bundle_name in bundle_list: + if self.has_bundle(bundle_name): + return True + return False + + def has_bundle(self, bundle_name): + for bundle in self.bundles: + if bundle.name == bundle_name: + return True + return False + + def hash(self): + return hash_statedict(self.cdict) + + def in_any_group(self, group_list): + for group_name in group_list: + if self.in_group(group_name): + return True + return False + + def in_group(self, group_name): + for group in self.groups: + if group.name == group_name: + return True + return False + + @cached_property + def items(self): + if not self.dummy: + for bundle in self.bundles: + for item in bundle.items: + yield item + + @property + def _static_items(self): + for bundle in self.bundles: + for item in bundle._static_items: + yield item + + def apply( + self, + autoskip_selector="", + interactive=False, + force=False, + workers=4, + profiling=False, + ): + if not list(self.items): + io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!"))) + return None + + if self.covered_by_autoskip_selector(autoskip_selector): + io.debug(_("skipping {}, matches autoskip selector").format(self.name)) + return None + + start = datetime.now() + + io.stdout(_("{x} {node} run started at {time}").format( + node=bold(self.name), + time=start.strftime("%Y-%m-%d %H:%M:%S"), + x=blue("i"), + )) + self.repo.hooks.node_apply_start( + self.repo, + self, + interactive=interactive, + ) + + try: + with NodeLock(self, interactive=interactive, ignore=force) as lock: + item_results = apply_items( + self, + autoskip_selector=autoskip_selector, + my_soft_locks=lock.my_soft_locks, + other_peoples_soft_locks=lock.other_peoples_soft_locks, + workers=workers, + interactive=interactive, + profiling=profiling, + ) + except NodeLockedException as e: + if not interactive: + io.stderr(_("{x} {node} already locked by {user} at {date} ({duration} ago, `bw apply -f` to override)").format( + date=bold(e.args[0]['date']), + duration=e.args[0]['duration'], + node=bold(self.name), + user=bold(e.args[0]['user']), + x=red("!"), + )) + item_results = [] + result = ApplyResult(self, item_results) + result.start = start + result.end = datetime.now() + + io.stdout(_("{x} {node} run completed after {time}s").format( + node=bold(self.name), + time=(result.end - start).total_seconds(), + x=blue("i"), + )) + io.stdout(_("{x} {node} stats: {stats}").format( + node=bold(self.name), + stats=format_node_result(result), + x=blue("i"), + )) + + self.repo.hooks.node_apply_end( + self.repo, + self, + duration=result.duration, + interactive=interactive, + result=result, + ) + + return result + + def download(self, remote_path, local_path, ignore_failure=False): + return operations.download( + self.hostname, + remote_path, + local_path, + add_host_keys=True if environ.get('BW_ADD_HOST_KEYS', False) == "1" else False, + wrapper_inner=self.cmd_wrapper_inner, + wrapper_outer=self.cmd_wrapper_outer, + ) + + def get_item(self, item_id): + return find_item(item_id, self.items) + + @property + def metadata(self): + """ + Returns full metadata for a node. MUST NOT be used from inside a + metadata processor. Use .partial_metadata instead. + """ + if self._dynamic_groups_resolved is None: + # return only metadata set directly at the node level if + # we're still in the process of figuring out which groups + # we belong to + return self._node_metadata + else: + return self.repo._metadata_for_node(self.name, partial=False) + + def metadata_hash(self): + return hash_metadata(self.metadata) + + @property + def metadata_processors(self): + for bundle in self.bundles: + for metadata_processor in bundle.metadata_processors: + yield ( + "{}.{}".format( + bundle.name, + metadata_processor.__name__, + ), + metadata_processor, + ) + + @property + def partial_metadata(self): + """ + Only to be used from inside metadata processors. Can't use the + normal .metadata there because it might deadlock when nodes + have interdependent metadata. + + It's OK for metadata processors to work with partial metadata + because they will be fed all metadata updates until no more + changes are made by any metadata processor. + """ + return self.repo._metadata_for_node(self.name, partial=True) + + def run(self, command, may_fail=False, log_output=False): + if log_output: + def log_function(msg): + io.stdout("{x} {node} {msg}".format( + node=bold(self.name), + msg=force_text(msg).rstrip("\n"), + x=cyan("›"), + )) + else: + log_function = None + + add_host_keys = True if environ.get('BW_ADD_HOST_KEYS', False) == "1" else False + + if not self._ssh_conn_established: + # Sometimes we're opening SSH connections to a node too fast + # for OpenSSH to establish the ControlMaster socket for the + # second and following connections to use. + # To prevent this, we just wait until a first dummy command + # has completed on the node before trying to reuse the + # multiplexed connection. + if self._ssh_first_conn_lock.acquire(False): + try: + operations.run(self.hostname, "true", add_host_keys=add_host_keys) + self._ssh_conn_established = True + finally: + self._ssh_first_conn_lock.release() + else: + # we didn't get the lock immediately, now we just wait + # until it is released before we proceed + with self._ssh_first_conn_lock: + pass + + return operations.run( + self.hostname, + command, + add_host_keys=add_host_keys, + ignore_failure=may_fail, + log_function=log_function, + wrapper_inner=self.cmd_wrapper_inner, + wrapper_outer=self.cmd_wrapper_outer, + ) + + def test(self, ignore_missing_faults=False, workers=4): + with io.job(_(" {node} checking for metadata collisions...").format(node=self.name)): + check_for_unsolvable_metadata_key_conflicts(self) + io.stdout(_("{x} {node} has no metadata collisions").format( + x=green("✓"), + node=bold(self.name), + )) + if self.items: + test_items(self, ignore_missing_faults=ignore_missing_faults, workers=workers) + else: + io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!"))) + + self.repo.hooks.test_node(self.repo, self) + + def upload(self, local_path, remote_path, mode=None, owner="", group=""): + return operations.upload( + self.hostname, + local_path, + remote_path, + add_host_keys=True if environ.get('BW_ADD_HOST_KEYS', False) == "1" else False, + group=group, + mode=mode, + owner=owner, + wrapper_inner=self.cmd_wrapper_inner, + wrapper_outer=self.cmd_wrapper_outer, + ) + + def verify(self, show_all=False, workers=4): + bad = 0 + good = 0 + if not self.items: + io.stdout(_("{x} {node} has no items").format(node=bold(self.name), x=yellow("!"))) + else: + for item_status in verify_items( + self, + show_all=show_all, + workers=workers, + ): + if item_status: + good += 1 + else: + bad += 1 + + return {'good': good, 'bad': bad} + + +def build_attr_property(attr, default): + def method(self): + attr_source = None + attr_value = None + group_order = [ + self.repo.get_group(group_name) + for group_name in _flatten_group_hierarchy(self.groups) + ] + + for group in group_order: + if getattr(group, attr) is not None: + attr_source = "group:{}".format(group.name) + attr_value = getattr(group, attr) + + if getattr(self, "_{}".format(attr)) is not None: + attr_source = "node" + attr_value = getattr(self, "_{}".format(attr)) + + if attr_value is None: + attr_source = "default" + attr_value = default + + io.debug(_("node {node} gets its {attr} attribute from: {source}").format( + node=self.name, + attr=attr, + source=attr_source, + )) + if self._dynamic_groups_resolved: + return attr_value + else: + raise DontCache(attr_value) + method.__name__ = str("_group_attr_{}".format(attr)) # required for cached_property + # str() for Python 2 compatibility + return cached_property(method) + +for attr, default in GROUP_ATTR_DEFAULTS.items(): + setattr(Node, attr, build_attr_property(attr, default)) + + +def test_items(node, ignore_missing_faults=False, workers=1): + item_queue = ItemTestQueue(node.items) + + def tasks_available(): + return bool(item_queue.items_without_deps) + + def next_task(): + try: + # Get the next non-DummyItem in the queue. + while True: + item = item_queue.pop() + if not isinstance(item, DummyItem): + break + except IndexError: # no more items available right now + return None + else: + return { + 'task_id': item.node.name + ":" + item.bundle.name + ":" + item.id, + 'target': item._test, + } + + def handle_result(task_id, return_value, duration): + node_name, bundle_name, item_id = task_id.split(":", 2) + io.stdout("{x} {node} {bundle} {item}".format( + bundle=bold(bundle_name), + item=item_id, + node=bold(node_name), + x=green("✓"), + )) + + def handle_exception(task_id, exception, traceback): + node_name, bundle_name, item_id = task_id.split(":", 2) + if ignore_missing_faults and isinstance(exception, FaultUnavailable): + io.stderr(_("{x} {node} {bundle} {item} ({msg})").format( + bundle=bold(bundle_name), + item=item_id, + msg=yellow(_("Fault unavailable")), + node=bold(node_name), + x=yellow("»"), + )) + else: + io.stderr("{x} {node} {bundle} {item}".format( + bundle=bold(bundle_name), + item=item_id, + node=bold(node_name), + x=red("!"), + )) + io.stderr(traceback) + io.stderr("{}: {}".format(type(exception), str(exception))) + exit(1) + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_result=handle_result, + handle_exception=handle_exception, + pool_id="test_{}".format(node.name), + workers=workers, + ) + worker_pool.run() + + if item_queue.items_with_deps: + io.stderr(_( + "There was a dependency problem. Look at the debug.svg generated " + "by the following command and try to find a loop:\n" + "printf '{}' | dot -Tsvg -odebug.svg" + ).format("\\n".join(graph_for_items(node.name, item_queue.items_with_deps)))) + + raise ItemDependencyError( + _("bad dependencies between these items: {}").format( + ", ".join([i.id for i in item_queue.items_with_deps]), + ) + ) + + +def verify_items(node, show_all=False, workers=1): + items = [] + for item in node.items: + if ( + not item.ITEM_TYPE_NAME == 'action' and + not item.triggered + ): + items.append(item) + + def tasks_available(): + return bool(items) + + def next_task(): + while True: + try: + item = items.pop() + except IndexError: + return None + if item._faults_missing_for_attributes: + if item.error_on_missing_fault: + item._raise_for_faults() + else: + io.stdout(_("{x} {node} {bundle} {item} ({msg})").format( + bundle=bold(item.bundle.name), + item=item.id, + msg=yellow(_("Fault unavailable")), + node=bold(node.name), + x=yellow("»"), + )) + else: + return { + 'task_id': node.name + ":" + item.bundle.name + ":" + item.id, + 'target': item.get_status, + } + + def handle_result(task_id, item_status, duration): + node_name, bundle_name, item_id = task_id.split(":", 2) + if not item_status.correct: + if item_status.must_be_created: + changes_text = _("create") + elif item_status.must_be_deleted: + changes_text = _("remove") + else: + changes_text = ", ".join(sorted(item_status.keys_to_fix)) + io.stderr("{x} {node} {bundle} {item} ({changes})".format( + bundle=bold(bundle_name), + changes=changes_text, + item=item_id, + node=bold(node_name), + x=red("✘"), + )) + return False + else: + if show_all: + io.stdout("{x} {node} {bundle} {item}".format( + bundle=bold(bundle_name), + item=item_id, + node=bold(node_name), + x=green("✓"), + )) + return True + + worker_pool = WorkerPool( + tasks_available, + next_task, + handle_result, + pool_id="verify_{}".format(node.name), + workers=workers, + ) + return worker_pool.run() diff --git a/bundlewrap/operations.py b/bundlewrap/operations.py new file mode 100644 index 0000000..d22302a --- /dev/null +++ b/bundlewrap/operations.py @@ -0,0 +1,295 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote +from select import select +from shlex import split +from subprocess import Popen, PIPE +from threading import Event, Thread +from os import close, environ, pipe, read, setpgrp + +from .exceptions import RemoteException +from .utils import cached_property +from .utils.text import force_text, LineBuffer, mark_for_translation as _, randstr +from .utils.ui import io + + +def output_thread_body(line_buffer, read_fd, quit_event, read_until_eof): + # see run() for details + while True: + r, w, x = select([read_fd], [], [], 0.1) + if r: + chunk = read(read_fd, 1024) + if chunk: + line_buffer.write(chunk) + else: # EOF + return + elif quit_event.is_set() and not read_until_eof: + # one last chance to read output after the child process + # has died + while True: + r, w, x = select([read_fd], [], [], 0) + if r: + line_buffer.write(read(read_fd, 1024)) + else: + break + return + + +def download( + hostname, + remote_path, + local_path, + add_host_keys=False, + wrapper_inner="{}", + wrapper_outer="{}", +): + """ + Download a file. + """ + io.debug(_("downloading {host}:{path} -> {target}").format( + host=hostname, path=remote_path, target=local_path)) + + result = run( + hostname, + "cat {}".format(quote(remote_path)), # See issue #39. + add_host_keys=add_host_keys, + wrapper_inner=wrapper_inner, + wrapper_outer=wrapper_outer, + ) + + if result.return_code == 0: + with open(local_path, "wb") as f: + f.write(result.stdout) + else: + raise RemoteException(_( + "reading file '{path}' on {host} failed: {error}" + ).format( + error=force_text(result.stderr) + force_text(result.stdout), + host=hostname, + path=remote_path, + )) + + +class RunResult(object): + def __init__(self): + self.return_code = None + self.stderr = None + self.stdout = None + + @cached_property + def stderr_text(self): + return force_text(self.stderr) + + @cached_property + def stdout_text(self): + return force_text(self.stdout) + + +def run( + hostname, + command, + add_host_keys=False, + ignore_failure=False, + log_function=None, + wrapper_inner="{}", + wrapper_outer="{}", +): + """ + Runs a command on a remote system. + """ + # LineBuffer objects take care of always printing complete lines + # which have been properly terminated by a newline. This is only + # relevant when using `bw run`. + # Does nothing when log_function is None. + stderr_lb = LineBuffer(log_function) + stdout_lb = LineBuffer(log_function) + + # Create pipes which will be used by the SSH child process. We do + # not use subprocess.PIPE because we need to be able to continuously + # check those pipes for new output, so we can feed it to the + # LineBuffers during `bw run`. + stdout_fd_r, stdout_fd_w = pipe() + stderr_fd_r, stderr_fd_w = pipe() + + # Launch OpenSSH. It's important that SSH gets a dummy stdin, i.e. + # it must *not* read from the terminal. Otherwise, it can steal user + # input. + ssh_command = [ + "ssh", + "-o", "KbdInteractiveAuthentication=no", + "-o", "PasswordAuthentication=no", + "-o", "StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes", + ] + extra_args = environ.get("BW_SSH_ARGS", "").strip() + if extra_args: + ssh_command.extend(split(extra_args)) + ssh_command.append(hostname) + ssh_command.append(wrapper_outer.format(quote(wrapper_inner.format(command)))) + cmd_id = randstr(length=4).upper() + io.debug("running command with ID {}: {}".format(cmd_id, " ".join(ssh_command))) + + ssh_process = Popen( + ssh_command, + preexec_fn=setpgrp, + stdin=PIPE, + stderr=stderr_fd_w, + stdout=stdout_fd_w, + ) + io._ssh_pids.append(ssh_process.pid) + + quit_event = Event() + stdout_thread = Thread( + args=(stdout_lb, stdout_fd_r, quit_event, True), + target=output_thread_body, + ) + stderr_thread = Thread( + args=(stderr_lb, stderr_fd_r, quit_event, False), + target=output_thread_body, + ) + stdout_thread.start() + stderr_thread.start() + + try: + ssh_process.communicate() + finally: + # Once we end up here, the OpenSSH process has terminated. + # + # Now, the big question is: Why do we need an Event here? + # + # Problem is, a user could use SSH multiplexing with + # auto-forking (e.g., "ControlPersist 10m"). In this case, + # OpenSSH forks another process which holds the "master" + # connection. This forked process *inherits* our pipes (at least + # for stderr). Thus, only when that master process finally + # terminates (possibly after many minutes), we will be informed + # about EOF on our stderr pipe. That doesn't work. bw will hang. + # + # So, instead, we use a busy loop in output_thread_body() which + # checks for quit_event being set. Unfortunately there is no way + # to be absolutely sure that we received all output from stderr + # because we never get a proper EOF there. All we can do is hope + # that all output has arrived on the reading end of the pipe by + # the time the quit_event is checked in the thread. + # + # Luckily stdout is a somewhat simpler affair: we can just close + # the writing end of the pipe, causing the reader thread to + # shut down as it sees the EOF. + io._ssh_pids.remove(ssh_process.pid) + quit_event.set() + close(stdout_fd_w) + stdout_thread.join() + stderr_thread.join() + stdout_lb.close() + stderr_lb.close() + for fd in (stdout_fd_r, stderr_fd_r, stderr_fd_w): + close(fd) + + io.debug("command with ID {} finished with return code {}".format( + cmd_id, + ssh_process.returncode, + )) + + result = RunResult() + result.stdout = stdout_lb.record.getvalue() + result.stderr = stderr_lb.record.getvalue() + result.return_code = ssh_process.returncode + + if result.return_code != 0: + error_msg = _( + "Non-zero return code ({rcode}) running '{command}' " + "with ID {id} on '{host}':\n\n{result}\n\n" + ).format( + command=command, + host=hostname, + id=cmd_id, + rcode=result.return_code, + result=force_text(result.stdout) + force_text(result.stderr), + ) + io.debug(error_msg) + if not ignore_failure or result.return_code == 255: + raise RemoteException(error_msg) + return result + + +def upload( + hostname, + local_path, + remote_path, + add_host_keys=False, + group="", + mode=None, + owner="", + wrapper_inner="{}", + wrapper_outer="{}", +): + """ + Upload a file. + """ + io.debug(_("uploading {path} -> {host}:{target}").format( + host=hostname, path=local_path, target=remote_path)) + temp_filename = ".bundlewrap_tmp_" + randstr() + + scp_process = Popen( + [ + "scp", + "-o", + "StrictHostKeyChecking=no" if add_host_keys else "StrictHostKeyChecking=yes", + local_path, + "{}:{}".format(hostname, temp_filename), + ], + preexec_fn=setpgrp, + stdin=PIPE, + stdout=PIPE, + stderr=PIPE, + ) + io._ssh_pids.append(scp_process.pid) + stdout, stderr = scp_process.communicate() + io._ssh_pids.remove(scp_process.pid) + + if scp_process.returncode != 0: + raise RemoteException(_( + "Upload to {host} failed for {failed}:\n\n{result}\n\n" + ).format( + failed=remote_path, + host=hostname, + result=force_text(stdout) + force_text(stderr), + )) + + if owner or group: + if group: + group = ":" + quote(group) + run( + hostname, + "chown {}{} {}".format( + quote(owner), + group, + quote(temp_filename), + ), + add_host_keys=add_host_keys, + wrapper_inner=wrapper_inner, + wrapper_outer=wrapper_outer, + ) + + if mode: + run( + hostname, + "chmod {} {}".format( + mode, + quote(temp_filename), + ), + add_host_keys=add_host_keys, + wrapper_inner=wrapper_inner, + wrapper_outer=wrapper_outer, + ) + + run( + hostname, + "mv -f {} {}".format( + quote(temp_filename), + quote(remote_path), + ), + add_host_keys=add_host_keys, + wrapper_inner=wrapper_inner, + wrapper_outer=wrapper_outer, + ) diff --git a/bundlewrap/plugins.py b/bundlewrap/plugins.py new file mode 100644 index 0000000..a629b6e --- /dev/null +++ b/bundlewrap/plugins.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from json import dumps, loads +from os import chmod, remove +from os.path import exists, join +from stat import S_IREAD, S_IRGRP, S_IROTH + +from requests import get + +from .exceptions import NoSuchPlugin, PluginError, PluginLocalConflict +from .utils import download, hash_local_file +from .utils.text import mark_for_translation as _ +from .utils.ui import io + + +BASE_URL = "https://raw.githubusercontent.com/bundlewrap/plugins/master" + + +class PluginManager(object): + def __init__(self, path, base_url=BASE_URL): + self.base_url = base_url + self.path = path + if exists(join(self.path, "plugins.json")): + with open(join(self.path, "plugins.json")) as f: + self.plugin_db = loads(f.read()) + else: + self.plugin_db = {} + + @property + def index(self): + return get( + "{}/index.json".format(self.base_url) + ).json() + + def install(self, plugin, force=False): + if plugin in self.plugin_db: + raise PluginError(_("plugin '{plugin}' is already installed").format(plugin=plugin)) + + manifest = self.manifest_for_plugin(plugin) + + for file in manifest['provides']: + target_path = join(self.path, file) + if exists(target_path) and not force: + raise PluginLocalConflict(_( + "cannot install '{plugin}' because it provides " + "'{path}' which already exists" + ).format(path=target_path, plugin=plugin)) + + url = "{}/{}/{}".format(self.base_url, plugin, file) + download(url, target_path) + + # make file read-only to discourage users from editing them + # which will block future updates of the plugin + chmod(target_path, S_IREAD | S_IRGRP | S_IROTH) + + self.record_as_installed(plugin, manifest) + + return manifest + + def list(self): + for plugin, info in self.plugin_db.items(): + yield (plugin, info['version']) + + def local_modifications(self, plugin): + try: + plugin_data = self.plugin_db[plugin] + except KeyError: + raise NoSuchPlugin(_( + "The plugin '{plugin}' is not installed." + ).format(plugin=plugin)) + local_changes = [] + for filename, checksum in plugin_data['files'].items(): + target_path = join(self.path, filename) + actual_checksum = hash_local_file(target_path) + if actual_checksum != checksum: + local_changes.append(( + target_path, + actual_checksum, + checksum, + )) + return local_changes + + def manifest_for_plugin(self, plugin): + r = get( + "{}/{}/manifest.json".format(self.base_url, plugin) + ) + if r.status_code == 404: + raise NoSuchPlugin(plugin) + else: + return r.json() + + def record_as_installed(self, plugin, manifest): + file_hashes = {} + + for file in manifest['provides']: + target_path = join(self.path, file) + file_hashes[file] = hash_local_file(target_path) + + self.plugin_db[plugin] = { + 'files': file_hashes, + 'version': manifest['version'], + } + self.write_db() + + def remove(self, plugin, force=False): + if plugin not in self.plugin_db: + raise NoSuchPlugin(_("plugin '{plugin}' is not installed").format(plugin=plugin)) + + for file, db_checksum in self.plugin_db[plugin]['files'].items(): + file_path = join(self.path, file) + if not exists(file_path): + continue + + current_checksum = hash_local_file(file_path) + if db_checksum != current_checksum and not force: + io.stderr(_( + "not removing '{path}' because it has been modified since installation" + ).format(path=file_path)) + continue + + remove(file_path) + + del self.plugin_db[plugin] + self.write_db() + + def search(self, term): + term = term.lower() + for plugin_name, plugin_data in self.index.items(): + if term in plugin_name.lower() or term in plugin_data['desc'].lower(): + yield (plugin_name, plugin_data['desc']) + + def update(self, plugin, check_only=False, force=False): + if plugin not in self.plugin_db: + raise PluginError(_("plugin '{plugin}' is not installed").format(plugin=plugin)) + + # before updating anything, we need to check for local modifications + local_changes = self.local_modifications(plugin) + if local_changes and not force: + files = [path for path, c1, c2 in local_changes] + raise PluginLocalConflict(_( + "cannot update '{plugin}' because the following files have been modified locally:" + "\n{files}" + ).format(files="\n".join(files), plugin=plugin)) + + manifest = self.manifest_for_plugin(plugin) + + for file in manifest['provides']: + file_path = join(self.path, file) + if exists(file_path) and file not in self.plugin_db[plugin]['files'] and not force: + # new version added a file that already existed locally + raise PluginLocalConflict(_( + "cannot update '{plugin}' because it would overwrite '{path}'" + ).format(path=file, plugin=plugin)) + + old_version = self.plugin_db[plugin]['version'] + new_version = manifest['version'] + + if not check_only and old_version != new_version: + # actually install files + for file in manifest['provides']: + target_path = join(self.path, file) + url = "{}/{}/{}".format(self.base_url, plugin, file) + download(url, target_path) + + # make file read-only to discourage users from editing them + # which will block future updates of the plugin + chmod(target_path, S_IREAD | S_IRGRP | S_IROTH) + + # check for files that have been removed in the new version + for file, db_checksum in self.plugin_db[plugin]['files'].items(): + if file not in manifest['provides']: + file_path = join(self.path, file) + current_checksum = hash_local_file(file_path) + if db_checksum != current_checksum and not force: + io.stderr(_( + "not removing '{path}' because it has been modified since installation" + ).format(path=file_path)) + continue + remove(file_path) + + self.record_as_installed(plugin, manifest) + + return (old_version, new_version) + + def write_db(self): + with open(join(self.path, "plugins.json"), 'w') as f: + f.write(dumps(self.plugin_db, indent=4, sort_keys=True)) diff --git a/bundlewrap/repo.py b/bundlewrap/repo.py new file mode 100644 index 0000000..6518c7d --- /dev/null +++ b/bundlewrap/repo.py @@ -0,0 +1,649 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from imp import load_source +from os import listdir, mkdir +from os.path import isdir, isfile, join +from threading import Lock + +from pkg_resources import DistributionNotFound, require, VersionConflict + +from . import items, utils, VERSION_STRING +from .bundle import FILENAME_BUNDLE +from .exceptions import ( + BundleError, + NoSuchGroup, + NoSuchNode, + NoSuchRepository, + MissingRepoDependency, + RepositoryError, +) +from .group import Group +from .metadata import deepcopy_metadata +from .node import _flatten_group_hierarchy, Node +from .secrets import FILENAME_SECRETS, generate_initial_secrets_cfg, SecretProxy +from .utils import cached_property, merge_dict, names +from .utils.scm import get_rev +from .utils.statedict import hash_statedict +from .utils.text import mark_for_translation as _, red, validate_name +from .utils.ui import io, QUIT_EVENT + +DIRNAME_BUNDLES = "bundles" +DIRNAME_DATA = "data" +DIRNAME_HOOKS = "hooks" +DIRNAME_ITEM_TYPES = "items" +DIRNAME_LIBS = "libs" +FILENAME_GROUPS = "groups.py" +FILENAME_NODES = "nodes.py" +FILENAME_REQUIREMENTS = "requirements.txt" + +HOOK_EVENTS = ( + 'action_run_start', + 'action_run_end', + 'apply_start', + 'apply_end', + 'item_apply_start', + 'item_apply_end', + 'node_apply_start', + 'node_apply_end', + 'node_run_start', + 'node_run_end', + 'run_start', + 'run_end', + 'test', + 'test_node', +) + +INITIAL_CONTENT = { + FILENAME_GROUPS: _(""" +groups = { + #'group1': { + # 'bundles': ( + # 'bundle1', + # ), + # 'members': ( + # 'node1', + # ), + # 'subgroups': ( + # 'group2', + # ), + #}, + 'all': { + 'member_patterns': ( + r".*", + ), + }, +} + """), + + FILENAME_NODES: _(""" +nodes = { + 'node1': { + 'hostname': "localhost", + }, +} + """), + FILENAME_REQUIREMENTS: "bundlewrap>={}\n".format(VERSION_STRING), + FILENAME_SECRETS: generate_initial_secrets_cfg, +} +META_PROC_MAX_ITER = 1000 # maximum iterations for metadata processors + + +def groups_from_file(filepath, libs, repo_path, vault): + """ + Returns all groups as defined in the given groups.py. + """ + try: + flat_group_dict = utils.getattr_from_file( + filepath, + 'groups', + base_env={ + 'libs': libs, + 'repo_path': repo_path, + 'vault': vault, + }, + ) + except KeyError: + raise RepositoryError(_( + "{} must define a 'groups' variable" + ).format(filepath)) + for groupname, infodict in flat_group_dict.items(): + yield Group(groupname, infodict) + + +class HooksProxy(object): + def __init__(self, path): + self.__hook_cache = {} + self.__module_cache = {} + self.__path = path + self.__registered_hooks = None + + def __getattr__(self, attrname): + if attrname not in HOOK_EVENTS: + raise AttributeError + + if self.__registered_hooks is None: + self._register_hooks() + + event = attrname + + if event not in self.__hook_cache: + # build a list of files that define a hook for the event + files = [] + for filename, events in self.__registered_hooks.items(): + if event in events: + files.append(filename) + + # define a function that calls all hook functions + def hook(*args, **kwargs): + for filename in files: + self.__module_cache[filename][event](*args, **kwargs) + self.__hook_cache[event] = hook + + return self.__hook_cache[event] + + def _register_hooks(self): + """ + Builds an internal dictionary of defined hooks. + + Priming __module_cache here is just a performance shortcut and + could be left out. + """ + self.__registered_hooks = {} + + if not isdir(self.__path): + return + + for filename in listdir(self.__path): + filepath = join(self.__path, filename) + if not filename.endswith(".py") or \ + not isfile(filepath) or \ + filename.startswith("_"): + continue + self.__module_cache[filename] = {} + self.__registered_hooks[filename] = [] + for name, obj in utils.get_all_attrs_from_file(filepath).items(): + if name not in HOOK_EVENTS: + continue + self.__module_cache[filename][name] = obj + self.__registered_hooks[filename].append(name) + + +def items_from_path(path): + """ + Looks for Item subclasses in the given path. + + An alternative method would involve metaclasses (as Django + does it), but then it gets very hard to have two separate repos + in the same process, because both of them would register config + item classes globally. + """ + if not isdir(path): + raise StopIteration() + for filename in listdir(path): + filepath = join(path, filename) + if not filename.endswith(".py") or \ + not isfile(filepath) or \ + filename.startswith("_"): + continue + for name, obj in \ + utils.get_all_attrs_from_file(filepath).items(): + if obj == items.Item or name.startswith("_"): + continue + try: + if issubclass(obj, items.Item): + yield obj + except TypeError: + pass + + +class LibsProxy(object): + def __init__(self, path): + self.__module_cache = {} + self.__path = path + + def __getattr__(self, attrname): + if attrname.startswith("__") and attrname.endswith("__"): + raise AttributeError(attrname) + if attrname not in self.__module_cache: + filename = attrname + ".py" + filepath = join(self.__path, filename) + try: + m = load_source('bundlewrap.repo.libs_{}'.format(attrname), filepath) + except: + io.stderr(_("Exception while trying to load {}:").format(filepath)) + raise + self.__module_cache[attrname] = m + return self.__module_cache[attrname] + + +def nodes_from_file(filepath, libs, repo_path, vault): + """ + Returns a list of nodes as defined in the given nodes.py. + """ + try: + flat_node_dict = utils.getattr_from_file( + filepath, + 'nodes', + base_env={ + 'libs': libs, + 'repo_path': repo_path, + 'vault': vault, + }, + ) + except KeyError: + raise RepositoryError( + _("{} must define a 'nodes' variable").format(filepath) + ) + for nodename, infodict in flat_node_dict.items(): + yield Node(nodename, infodict) + + +class Repository(object): + def __init__(self, repo_path=None): + self.path = "/dev/null" if repo_path is None else repo_path + + self._set_path(self.path) + + self.bundle_names = [] + self.group_dict = {} + self.node_dict = {} + self._node_metadata_complete = {} + self._node_metadata_partial = {} + self._node_metadata_static_complete = set() + self._node_metadata_lock = Lock() + + if repo_path is not None: + self.populate_from_path(repo_path) + else: + self.item_classes = list(items_from_path(items.__path__[0])) + + def __eq__(self, other): + if self.path == "/dev/null": + # in-memory repos are never equal + return False + return self.path == other.path + + def __repr__(self): + return "".format(self.path) + + @staticmethod + def is_repo(path): + """ + Validates whether the given path is a bundlewrap repository. + """ + try: + assert isdir(path) + assert isfile(join(path, "nodes.py")) + assert isfile(join(path, "groups.py")) + except AssertionError: + return False + return True + + def add_group(self, group): + """ + Adds the given group object to this repo. + """ + if group.name in utils.names(self.nodes): + raise RepositoryError(_("you cannot have a node and a group " + "both named '{}'").format(group.name)) + if group.name in utils.names(self.groups): + raise RepositoryError(_("you cannot have two groups " + "both named '{}'").format(group.name)) + group.repo = self + self.group_dict[group.name] = group + + def add_node(self, node): + """ + Adds the given node object to this repo. + """ + if node.name in utils.names(self.groups): + raise RepositoryError(_("you cannot have a node and a group " + "both named '{}'").format(node.name)) + if node.name in utils.names(self.nodes): + raise RepositoryError(_("you cannot have two nodes " + "both named '{}'").format(node.name)) + + node.repo = self + self.node_dict[node.name] = node + + @cached_property + def cdict(self): + repo_dict = {} + for node in self.nodes: + repo_dict[node.name] = node.hash() + return repo_dict + + @classmethod + def create(cls, path): + """ + Creates and returns a repository at path, which must exist and + be empty. + """ + for filename, content in INITIAL_CONTENT.items(): + if callable(content): + content = content() + with open(join(path, filename), 'w') as f: + f.write(content.strip() + "\n") + + mkdir(join(path, DIRNAME_BUNDLES)) + mkdir(join(path, DIRNAME_ITEM_TYPES)) + + return cls(path) + + def create_bundle(self, bundle_name): + """ + Creates an empty bundle. + """ + if not validate_name(bundle_name): + raise ValueError(_("'{}' is not a valid bundle name").format(bundle_name)) + + bundle_dir = join(self.bundles_dir, bundle_name) + + # deliberately not using makedirs() so this will raise an + # exception if the directory exists + mkdir(bundle_dir) + mkdir(join(bundle_dir, "files")) + + open(join(bundle_dir, FILENAME_BUNDLE), 'a').close() + + def create_node(self, node_name): + """ + Creates an adhoc node with the given name. + """ + node = Node(node_name) + self.add_node(node) + return node + + def get_group(self, group_name): + try: + return self.group_dict[group_name] + except KeyError: + raise NoSuchGroup(group_name) + + def get_node(self, node_name): + try: + return self.node_dict[node_name] + except KeyError: + raise NoSuchNode(node_name) + + def group_membership_hash(self): + return hash_statedict(sorted(names(self.groups))) + + @property + def groups(self): + return sorted(self.group_dict.values()) + + def _static_groups_for_node(self, node): + for group in self.groups: + if node in group._static_nodes: + yield group + + def hash(self): + return hash_statedict(self.cdict) + + @property + def nodes(self): + return sorted(self.node_dict.values()) + + def nodes_in_all_groups(self, group_names): + """ + Returns a list of nodes where every node is a member of every + group given. + """ + base_group = set(self.get_group(group_names[0]).nodes) + for group_name in group_names[1:]: + if not base_group: + # quit early if we have already eliminated every node + break + base_group.intersection_update(set(self.get_group(group_name).nodes)) + result = list(base_group) + result.sort() + return result + + def nodes_in_any_group(self, group_names): + """ + Returns all nodes that are a member of at least one of the given + groups. + """ + for node in self.nodes: + if node.in_any_group(group_names): + yield node + + def nodes_in_group(self, group_name): + """ + Returns a list of nodes in the given group. + """ + return self.nodes_in_all_groups([group_name]) + + def _metadata_for_node(self, node_name, partial=False): + """ + Returns full or partial metadata for this node. + + Partial metadata may only be requested from inside a metadata + processor. + + If necessary, this method will build complete metadata for this + node and all related nodes. Related meaning nodes that this node + depends on in one of its metadata processors. + """ + try: + return self._node_metadata_complete[node_name] + except KeyError: + pass + + if partial: + self._node_metadata_partial.setdefault(node_name, {}) + return self._node_metadata_partial[node_name] + + with self._node_metadata_lock: + try: + # maybe our metadata got completed while waiting for the lock + return self._node_metadata_complete[node_name] + except KeyError: + pass + + self._node_metadata_partial[node_name] = {} + self._build_node_metadata() + + # now that we have completed all metadata for this + # node and all related nodes, copy that data over + # to the complete dict + self._node_metadata_complete.update(self._node_metadata_partial) + + # reset temporary vars + self._node_metadata_partial = {} + self._node_metadata_static_complete = set() + + return self._node_metadata_complete[node_name] + + def _build_node_metadata(self): + """ + Builds complete metadata for all nodes that appear in + self._node_metadata_partial.keys(). + """ + iterations = {} + while ( + not iterations or max(iterations.values()) <= META_PROC_MAX_ITER + ) and not QUIT_EVENT.is_set(): + # First, get the static metadata out of the way + for node_name in list(self._node_metadata_partial): + if QUIT_EVENT.is_set(): + break + node = self.get_node(node_name) + # check if static metadata for this node is already done + if node_name in self._node_metadata_static_complete: + continue + else: + self._node_metadata_static_complete.add(node_name) + + with io.job(_(" {node} building group metadata...").format(node=node.name)): + group_order = _flatten_group_hierarchy(node.groups) + for group_name in group_order: + self._node_metadata_partial[node.name] = merge_dict( + self._node_metadata_partial[node.name], + self.get_group(group_name).metadata, + ) + + with io.job(_(" {node} merging node metadata...").format(node=node.name)): + self._node_metadata_partial[node.name] = merge_dict( + self._node_metadata_partial[node.name], + node._node_metadata, + ) + + # Now for the interesting part: We run all metadata processors + # in sequence until none of them return changed metadata. + modified = False + for node_name in list(self._node_metadata_partial): + if QUIT_EVENT.is_set(): + break + node = self.get_node(node_name) + with io.job(_(" {node} running metadata processors...").format(node=node.name)): + for metadata_processor_name, metadata_processor in node.metadata_processors: + iterations.setdefault((node.name, metadata_processor_name), 1) + io.debug(_( + "running metadata processor {metaproc} for node {node}, " + "iteration #{i}" + ).format( + metaproc=metadata_processor_name, + node=node.name, + i=iterations[(node.name, metadata_processor_name)], + )) + processed = metadata_processor( + deepcopy_metadata(self._node_metadata_partial[node.name]), + ) + iterations[(node.name, metadata_processor_name)] += 1 + if not isinstance(processed, dict): + raise ValueError(_( + "metadata processor {metaproc} for node {node} did not return " + "a dictionary" + ).format( + metaproc=metadata_processor_name, + node=node.name, + )) + if processed != self._node_metadata_partial[node.name]: + io.debug(_( + "metadata processor {metaproc} for node {node} changed metadata, " + "rerunning all metadata processors for this node" + ).format( + metaproc=metadata_processor_name, + node=node.name, + )) + self._node_metadata_partial[node.name] = processed + modified = True + if not modified: + if self._node_metadata_static_complete != set(self._node_metadata_partial.keys()): + # During metadata processor execution, partial metadata may + # have been requested for nodes we did not previously + # consider. Since partial metadata may defaults to + # just an empty dict, we still need to make sure to + # generate static metadata for these new nodes, as + # that may trigger additional runs of metadata + # processors. + continue + else: + break + + for culprit, number_of_iterations in iterations.items(): + if number_of_iterations >= META_PROC_MAX_ITER: + node, metadata_processor = culprit + raise BundleError(_( + "Metadata processor '{proc}' stopped after too many iterations " + "({max_iter}) for node '{node}' to prevent infinite loop. " + "This usually means one of two things: " + "1) You have two metadata processors that keep overwriting each other's " + "data or 2) You have a single metadata processor that keeps changing its own " + "data. " + "To fix this, use `bw --debug metadata {node}` and look for repeated messages " + "indicating that the same metadata processor keeps changing metadata. Then " + "rewrite that metadata processor to eventually stop changing metadata.".format( + max_iter=META_PROC_MAX_ITER, + node=node, + proc=metadata_processor, + ), + )) + + def metadata_hash(self): + repo_dict = {} + for node in self.nodes: + repo_dict[node.name] = node.metadata_hash() + return hash_statedict(repo_dict) + + def populate_from_path(self, path): + if not self.is_repo(path): + raise NoSuchRepository( + _("'{}' is not a bundlewrap repository").format(path) + ) + + if path != self.path: + self._set_path(path) + + # check requirements.txt + try: + with open(join(path, FILENAME_REQUIREMENTS)) as f: + lines = f.readlines() + except: + pass + else: + try: + require(lines) + except DistributionNotFound as exc: + raise MissingRepoDependency(_( + "{x} Python package '{pkg}' is listed in {filename}, but wasn't found. " + "You probably have to install it with `pip install {pkg}`." + ).format( + filename=FILENAME_REQUIREMENTS, + pkg=exc.req, + x=red("!"), + )) + except VersionConflict as exc: + raise MissingRepoDependency(_( + "{x} Python package '{required}' is listed in {filename}, " + "but only '{existing}' was found. " + "You probably have to upgrade it with `pip install {required}`." + ).format( + existing=exc.dist, + filename=FILENAME_REQUIREMENTS, + required=exc.req, + x=red("!"), + )) + + self.vault = SecretProxy(self) + + # populate bundles + self.bundle_names = [] + for dir_entry in listdir(self.bundles_dir): + if validate_name(dir_entry): + self.bundle_names.append(dir_entry) + + # populate groups + self.group_dict = {} + for group in groups_from_file(self.groups_file, self.libs, self.path, self.vault): + self.add_group(group) + + # populate items + self.item_classes = list(items_from_path(items.__path__[0])) + for item_class in items_from_path(self.items_dir): + self.item_classes.append(item_class) + + # populate nodes + self.node_dict = {} + for node in nodes_from_file(self.nodes_file, self.libs, self.path, self.vault): + self.add_node(node) + + @utils.cached_property + def revision(self): + return get_rev() + + def _set_path(self, path): + self.path = path + self.bundles_dir = join(self.path, DIRNAME_BUNDLES) + self.data_dir = join(self.path, DIRNAME_DATA) + self.hooks_dir = join(self.path, DIRNAME_HOOKS) + self.items_dir = join(self.path, DIRNAME_ITEM_TYPES) + self.groups_file = join(self.path, FILENAME_GROUPS) + self.libs_dir = join(self.path, DIRNAME_LIBS) + self.nodes_file = join(self.path, FILENAME_NODES) + + self.hooks = HooksProxy(self.hooks_dir) + self.libs = LibsProxy(self.libs_dir) diff --git a/bundlewrap/secrets.py b/bundlewrap/secrets.py new file mode 100644 index 0000000..aa06e50 --- /dev/null +++ b/bundlewrap/secrets.py @@ -0,0 +1,260 @@ +from base64 import b64encode, urlsafe_b64decode +try: + from configparser import SafeConfigParser +except ImportError: # Python 2 + from ConfigParser import SafeConfigParser +import hashlib +import hmac +from os import environ +from os.path import join +from string import ascii_letters, punctuation, digits + +from cryptography.fernet import Fernet + +from .exceptions import FaultUnavailable +from .utils import Fault, get_file_contents +from .utils.text import mark_for_translation as _ +from .utils.ui import io + + +FILENAME_SECRETS = ".secrets.cfg" + + +def generate_initial_secrets_cfg(): + return ( + "# DO NOT COMMIT THIS FILE\n" + "# share it with your team through a secure channel\n\n" + "[generate]\nkey = {}\n\n" + "[encrypt]\nkey = {}\n" + ).format( + SecretProxy.random_key(), + SecretProxy.random_key(), + ) + + +def random(seed): + """ + Provides a way to get repeatable random numbers from the given seed. + Unlike random.seed(), this approach provides consistent results + across platforms. + See also http://stackoverflow.com/a/18992474 + """ + while True: + seed = hashlib.sha512(seed).digest() + for character in seed: + try: + yield ord(character) + except TypeError: # Python 3 + yield character + + +class SecretProxy(object): + @staticmethod + def random_key(): + """ + Provided as a helper to generate new keys from `bw debug`. + """ + return Fernet.generate_key().decode('utf-8') + + def __init__(self, repo): + self.repo = repo + self.keys = self._load_keys() + + def _decrypt(self, cryptotext=None, key='encrypt'): + """ + Decrypts a given encrypted password. + """ + if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": + return "decrypted text" + try: + key = self.keys[key] + except KeyError: + raise FaultUnavailable(_( + "Key '{key}' not available for decryption of the following cryptotext, " + "check your {file}: {cryptotext}" + ).format( + cryptotext=cryptotext, + file=FILENAME_SECRETS, + key=key, + )) + + return Fernet(key).decrypt(cryptotext.encode('utf-8')).decode('utf-8') + + def _decrypt_file(self, source_path=None, key='encrypt'): + """ + Decrypts the file at source_path (relative to data/) and + returns the plaintext as unicode. + """ + if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": + return "decrypted file" + try: + key = self.keys[key] + except KeyError: + raise FaultUnavailable(_( + "Key '{key}' not available for decryption of the following file, " + "check your {file}: {source_path}" + ).format( + file=FILENAME_SECRETS, + key=key, + source_path=source_path, + )) + + f = Fernet(key) + return f.decrypt(get_file_contents(join(self.repo.data_dir, source_path))).decode('utf-8') + + def _decrypt_file_as_base64(self, source_path=None, key='encrypt'): + """ + Decrypts the file at source_path (relative to data/) and + returns the plaintext as base64. + """ + if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": + return b64encode("decrypted file as base64").decode('utf-8') + try: + key = self.keys[key] + except KeyError: + raise FaultUnavailable(_( + "Key '{key}' not available for decryption of the following file, " + "check your {file}: {source_path}" + ).format( + file=FILENAME_SECRETS, + key=key, + source_path=source_path, + )) + + f = Fernet(key) + return b64encode(f.decrypt(get_file_contents( + join(self.repo.data_dir, source_path), + ))).decode('utf-8') + + def _generate_password(self, identifier=None, key='generate', length=32, symbols=False): + """ + Derives a password from the given identifier and the shared key + in the repository. + + This is done by seeding a random generator with an SHA512 HMAC built + from the key and the given identifier. + One could just use the HMAC digest itself as a password, but the + PRNG allows for more control over password length and complexity. + """ + if environ.get("BW_VAULT_DUMMY_MODE", "0") != "0": + return "generatedpassword" + try: + key_encoded = self.keys[key] + except KeyError: + raise FaultUnavailable(_( + "Key '{key}' not available to generate password '{password}', check your {file}" + ).format( + file=FILENAME_SECRETS, + key=key, + password=identifier, + )) + + alphabet = ascii_letters + digits + if symbols: + alphabet += punctuation + + h = hmac.new(urlsafe_b64decode(key_encoded), digestmod=hashlib.sha512) + h.update(identifier.encode('utf-8')) + prng = random(h.digest()) + return "".join([alphabet[next(prng) % (len(alphabet) - 1)] for i in range(length)]) + + def _load_keys(self): + config = SafeConfigParser() + secrets_file = join(self.repo.path, FILENAME_SECRETS) + try: + config.read(secrets_file) + except IOError: + io.debug(_("unable to read {}").format(secrets_file)) + return {} + result = {} + for section in config.sections(): + result[section] = config.get(section, 'key').encode('utf-8') + return result + + def decrypt(self, cryptotext, key='encrypt'): + return Fault( + self._decrypt, + cryptotext=cryptotext, + key=key, + ) + + def decrypt_file(self, source_path, key='encrypt'): + return Fault( + self._decrypt_file, + source_path=source_path, + key=key, + ) + + def decrypt_file_as_base64(self, source_path, key='encrypt'): + return Fault( + self._decrypt_file_as_base64, + source_path=source_path, + key=key, + ) + + def encrypt(self, plaintext, key='encrypt'): + """ + Encrypts a given plaintext password and returns a string that can + be fed into decrypt() to get the password back. + """ + try: + key = self.keys[key] + except KeyError: + raise KeyError(_( + "Key '{key}' not available for encryption, check your {file}" + ).format( + file=FILENAME_SECRETS, + key=key, + )) + + return Fernet(key).encrypt(plaintext.encode('utf-8')).decode('utf-8') + + def encrypt_file(self, source_path, target_path, key='encrypt'): + """ + Encrypts the file at source_path and places the result at + target_path. The source_path is relative to CWD or absolute, + while target_path is relative to data/. + """ + try: + key = self.keys[key] + except KeyError: + raise KeyError(_( + "Key '{key}' not available for file encryption, check your {file}" + ).format( + file=FILENAME_SECRETS, + key=key, + )) + + plaintext = get_file_contents(source_path) + fernet = Fernet(key) + target_file = join(self.repo.data_dir, target_path) + with open(target_file, 'wb') as f: + f.write(fernet.encrypt(plaintext)) + return target_file + + def _format(self, format_str=None, faults=None): + return format_str.format(*[fault.value for fault in faults]) + + def format(self, format_str, *faults): + """ + Returns a Fault for a string formatted with the given Faults, + e.g.: + + vault.format("password: {}", vault.password_for("something")) + + DEPRECATED, remove in 3.0, use Fault.format_into instead. + """ + return Fault( + self._format, + format_str=format_str, + faults=faults, + ) + + def password_for(self, identifier, key='generate', length=32, symbols=False): + return Fault( + self._generate_password, + identifier=identifier, + key=key, + length=length, + symbols=symbols, + ) diff --git a/bundlewrap/utils/__init__.py b/bundlewrap/utils/__init__.py new file mode 100644 index 0000000..d5c950e --- /dev/null +++ b/bundlewrap/utils/__init__.py @@ -0,0 +1,388 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from codecs import getwriter +from contextlib import contextmanager +import hashlib +from inspect import isgenerator +from os import chmod, close, makedirs, remove +from os.path import dirname, exists +import stat +from sys import stderr, stdout +from tempfile import mkstemp + +from requests import get + +from ..exceptions import DontCache, FaultUnavailable + +__GETATTR_CACHE = {} +__GETATTR_NODEFAULT = "very_unlikely_default_value" + + +MODE644 = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH + +try: + STDERR_WRITER = getwriter('utf-8')(stderr.buffer) + STDOUT_WRITER = getwriter('utf-8')(stdout.buffer) +except AttributeError: # Python 2 + STDERR_WRITER = getwriter('utf-8')(stderr) + STDOUT_WRITER = getwriter('utf-8')(stdout) + + +def cached_property(prop): + """ + A replacement for the property decorator that will only compute the + attribute's value on the first call and serve a cached copy from + then on. + """ + def cache_wrapper(self): + if not hasattr(self, "_cache"): + self._cache = {} + if prop.__name__ not in self._cache: + try: + return_value = prop(self) + if isgenerator(return_value): + return_value = tuple(return_value) + except DontCache as exc: + return exc.obj + else: + self._cache[prop.__name__] = return_value + return self._cache[prop.__name__] + return property(cache_wrapper) + + +def download(url, path): + if not exists(dirname(path)): + makedirs(dirname(path)) + if exists(path): + chmod(path, MODE644) + with open(path, 'wb') as f: + r = get(url, stream=True) + r.raise_for_status() + for block in r.iter_content(1024): + if not block: + break + else: + f.write(block) + + +class Fault(object): + """ + A proxy object for lazy access to things that may not really be + available at the time of use. + + This let's us gracefully skip items that require information that's + currently not available. + """ + def __init__(self, callback, **kwargs): + self._available = None + self._exc = None + self._value = None + self.callback = callback + self.kwargs = kwargs + + def _resolve(self): + if self._available is None: + try: + self._value = self.callback(**self.kwargs) + self._available = True + except FaultUnavailable as exc: + self._available = False + self._exc = exc + + def __add__(self, other): + if isinstance(other, Fault): + def callback(): + return self.value + other.value + return Fault(callback) + else: + def callback(): + return self.value + other + return Fault(callback) + + def __len__(self): + return len(self.value) + + def __str__(self): + return str(self.value) + + def format_into(self, format_string): + def callback(): + return format_string.format(self.value) + return Fault(callback) + + @property + def is_available(self): + self._resolve() + return self._available + + @property + def value(self): + self._resolve() + if not self._available: + raise self._exc + return self._value + + +def _make_method_callback(method_name): + def method(self, *args, **kwargs): + def callback(): + return getattr(self.value, method_name)(*args, **kwargs) + return Fault(callback) + return method + + +for method_name in ( + 'format', + 'lower', + 'lstrip', + 'replace', + 'rstrip', + 'strip', + 'upper', + 'zfill', +): + setattr(Fault, method_name, _make_method_callback(method_name)) + + +def get_file_contents(path): + with open(path, 'rb') as f: + content = f.read() + return content + + +def get_all_attrs_from_file(path, cache=True, base_env=None): + """ + Reads all 'attributes' (if it were a module) from a source file. + """ + if base_env is None: + base_env = {} + if base_env: + # do not allow caching when passing in a base env because that + # breaks repeated calls with different base envs for the same + # file + cache = False + if path not in __GETATTR_CACHE or not cache: + source = get_file_contents(path) + env = base_env.copy() + try: + exec(source, env) + except: + from .ui import io + io.stderr("Exception while executing {}".format(path)) + raise + if cache: + __GETATTR_CACHE[path] = env + else: + env = __GETATTR_CACHE[path] + return env + + +def getattr_from_file(path, attrname, base_env=None, cache=True, default=__GETATTR_NODEFAULT): + """ + Reads a specific 'attribute' (if it were a module) from a source + file. + """ + env = get_all_attrs_from_file(path, base_env=base_env, cache=cache) + if default == __GETATTR_NODEFAULT: + return env[attrname] + else: + return env.get(attrname, default) + + +def graph_for_items( + title, + items, + cluster=True, + concurrency=True, + static=True, + regular=True, + reverse=True, + auto=True, +): + items = sorted(items) + + yield "digraph bundlewrap" + yield "{" + + # Print subgraphs *below* each other + yield "rankdir = LR" + + # Global attributes + yield ("graph [color=\"#303030\"; " + "fontname=Helvetica; " + "penwidth=2; " + "shape=box; " + "style=\"rounded,dashed\"]") + yield ("node [color=\"#303030\"; " + "fillcolor=\"#303030\"; " + "fontcolor=white; " + "fontname=Helvetica; " + "shape=box; " + "style=\"rounded,filled\"]") + yield "edge [arrowhead=vee]" + + item_ids = [] + for item in items: + item_ids.append(item.id) + + if cluster: + # Define which items belong to which bundle + bundle_number = 0 + bundles_seen = [] + for item in items: + if item.bundle is None or item.bundle.name in bundles_seen: + continue + yield "subgraph cluster_{}".format(bundle_number) + bundle_number += 1 + yield "{" + yield "label = \"{}\"".format(item.bundle.name) + yield "\"bundle:{}\"".format(item.bundle.name) + for bitem in item.bundle.items: + if bitem.id in item_ids: + yield "\"{}\"".format(bitem.id) + yield "}" + bundles_seen.append(item.bundle.name) + + # Define dependencies between items + for item in items: + if regular: + for dep in item.needs: + if dep in item_ids: + yield "\"{}\" -> \"{}\" [color=\"#C24948\",penwidth=2]".format(item.id, dep) + + if auto: + for dep in sorted(item._deps): + if dep in item._concurrency_deps: + if concurrency: + yield "\"{}\" -> \"{}\" [color=\"#714D99\",penwidth=2]".format(item.id, dep) + elif dep in item._reverse_deps: + if reverse: + yield "\"{}\" -> \"{}\" [color=\"#D18C57\",penwidth=2]".format(item.id, dep) + elif dep not in item.needs: + if dep in item_ids: + yield "\"{}\" -> \"{}\" [color=\"#6BB753\",penwidth=2]".format(item.id, dep) + + # Global graph title + yield "fontsize = 28" + yield "label = \"{}\"".format(title) + yield "labelloc = \"t\"" + yield "}" + + +def hash_local_file(path): + """ + Retuns the sha1 hash of a file on the local machine. + """ + return sha1(get_file_contents(path)) + + +class _Atomic(object): + """ + This and the following related classes are used to mark objects as + non-mergeable for the purposes of merge_dict(). + """ + pass + + +class _AtomicDict(dict, _Atomic): + pass + + +class _AtomicList(list, _Atomic): + pass + + +class _AtomicSet(set, _Atomic): + pass + + +class _AtomicTuple(tuple, _Atomic): + pass + + +ATOMIC_TYPES = { + dict: _AtomicDict, + list: _AtomicList, + set: _AtomicSet, + tuple: _AtomicTuple, +} + + +def merge_dict(base, update): + """ + Recursively merges the base dict into the update dict. + """ + if not isinstance(update, dict): + return update + + merged = base.copy() + + for key, value in update.items(): + merge = key in base and not isinstance(value, _Atomic) + if merge and isinstance(base[key], dict): + merged[key] = merge_dict(base[key], value) + elif ( + merge and + isinstance(base[key], list) and + ( + isinstance(value, list) or + isinstance(value, set) or + isinstance(value, tuple) + ) + ): + extended = base[key][:] + extended.extend(value) + merged[key] = extended + elif ( + merge and + isinstance(base[key], tuple) and + ( + isinstance(value, list) or + isinstance(value, set) or + isinstance(value, tuple) + ) + ): + merged[key] = base[key] + tuple(value) + elif ( + merge and + isinstance(base[key], set) and + ( + isinstance(value, list) or + isinstance(value, set) or + isinstance(value, tuple) + ) + ): + merged[key] = base[key].union(set(value)) + else: + merged[key] = value + + return merged + + +def names(obj_list): + """ + Iterator over the name properties of a given list of objects. + + repo.nodes will give you node objects + names(repo.nodes) will give you node names + """ + for obj in obj_list: + yield obj.name + + +def sha1(data): + """ + Returns hex SHA1 hash for input. + """ + hasher = hashlib.sha1() + hasher.update(data) + return hasher.hexdigest() + + +@contextmanager +def tempfile(): + handle, path = mkstemp() + close(handle) + yield path + remove(path) diff --git a/bundlewrap/utils/cmdline.py b/bundlewrap/utils/cmdline.py new file mode 100644 index 0000000..6a9d320 --- /dev/null +++ b/bundlewrap/utils/cmdline.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from sys import exit + +from ..exceptions import NoSuchNode, NoSuchGroup +from . import names +from .text import mark_for_translation as _, red +from .ui import io + + +def get_group(repo, group_name): + try: + return repo.get_group(group_name) + except NoSuchGroup: + io.stderr(_("{x} No such group: {group}").format( + group=group_name, + x=red("!!!"), + )) + exit(1) + + +def get_item(node, item_id): + try: + return node.get_item(item_id) + except NoSuchGroup: + io.stderr(_("{x} No such item on node '{node}': {item}").format( + item=item_id, + node=node.name, + x=red("!!!"), + )) + exit(1) + + +def get_node(repo, node_name, adhoc_nodes=False): + try: + return repo.get_node(node_name) + except NoSuchNode: + if adhoc_nodes: + return repo.create_node(node_name) + else: + io.stderr(_("{x} No such node: {node}").format( + node=node_name, + x=red("!!!"), + )) + exit(1) + + +def get_target_nodes(repo, target_string, adhoc_nodes=False): + """ + Returns a list of nodes. The input is a string like this: + + "node1,node2,group3,bundle:foo" + + Meaning: Targets are 'node1', 'node2', all nodes in 'group3', + and all nodes with the bundle 'foo'. + """ + targets = [] + for name in target_string.split(","): + name = name.strip() + if name.startswith("bundle:"): + bundle_name = name.split(":", 1)[1] + for node in repo.nodes: + if bundle_name in names(node.bundles): + targets.append(node) + elif name.startswith("!bundle:"): + bundle_name = name.split(":", 1)[1] + for node in repo.nodes: + if bundle_name not in names(node.bundles): + targets.append(node) + elif name.startswith("!group:"): + group_name = name.split(":", 1)[1] + for node in repo.nodes: + if group_name not in names(node.groups): + targets.append(node) + else: + try: + targets.append(repo.get_node(name)) + except NoSuchNode: + try: + targets += list(repo.get_group(name).nodes) + except NoSuchGroup: + if adhoc_nodes: + targets.append(repo.create_node(name)) + else: + io.stderr(_("{x} No such node or group: {name}").format( + x=red("!!!"), + name=name, + )) + exit(1) + return sorted(set(targets)) diff --git a/bundlewrap/utils/remote.py b/bundlewrap/utils/remote.py new file mode 100644 index 0000000..3b7c740 --- /dev/null +++ b/bundlewrap/utils/remote.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from pipes import quote + +from . import cached_property +from .text import force_text, mark_for_translation as _ +from .ui import io + + +def _parse_file_output(file_output): + if file_output.startswith("cannot open "): + # required for Mac OS X, OpenBSD, and CentOS/RHEL + return ('nonexistent', "") + elif file_output.endswith("directory"): + return ('directory', file_output) + elif file_output.startswith("block special") or \ + file_output.startswith("character special"): + return ('other', file_output) + elif file_output.startswith("symbolic link to ") or \ + file_output.startswith("broken symbolic link to "): + return ('symlink', file_output) + else: + return ('file', file_output) + + +def get_path_type(node, path): + """ + Returns (TYPE, DESC) where TYPE is one of: + + 'directory', 'file', 'nonexistent', 'other', 'symlink' + + and DESC is the output of the 'file' command line utility. + """ + result = node.run("file -bh -- {}".format(quote(path)), may_fail=True) + file_output = force_text(result.stdout.strip()) + if ( + result.return_code != 0 or + "No such file or directory" in file_output # thanks CentOS + ): + return ('nonexistent', "") + + return _parse_file_output(file_output) + + +def stat(node, path): + if node.os in node.OS_FAMILY_BSD: + result = node.run("stat -f '%Su:%Sg:%p:%z' -- {}".format(quote(path))) + else: + result = node.run("stat -c '%U:%G:%a:%s' -- {}".format(quote(path))) + owner, group, mode, size = force_text(result.stdout).split(":") + mode = mode[-4:].zfill(4) # cut off BSD file type + file_stat = { + 'owner': owner, + 'group': group, + 'mode': mode, + 'size': int(size), + } + io.debug(_("stat for '{path}' on {node}: {result}".format( + node=node.name, + path=path, + result=repr(file_stat), + ))) + return file_stat + + +class PathInfo(object): + """ + Serves as a proxy to get_path_type. + """ + def __init__(self, node, path): + self.node = node + self.path = path + self.path_type, self.desc = get_path_type(node, path) + self.stat = stat(node, path) if self.path_type != 'nonexistent' else {} + + def __repr__(self): + return "".format(self.node.name, quote(self.path)) + + @property + def exists(self): + return self.path_type != 'nonexistent' + + @property + def group(self): + return self.stat['group'] + + @property + def is_binary_file(self): + return self.is_file and not self.is_text_file + + @property + def is_directory(self): + return self.path_type == 'directory' + + @property + def is_file(self): + return self.path_type == 'file' + + @property + def is_symlink(self): + return self.path_type == 'symlink' + + @property + def is_text_file(self): + return self.is_file and ( + "text" in self.desc or + self.desc in ( + "empty", + "OpenSSH RSA public key", + "OpenSSH DSA public key", + ) + ) + + @property + def mode(self): + return self.stat['mode'] + + @property + def owner(self): + return self.stat['owner'] + + @cached_property + def sha1(self): + if self.node.os == 'macos': + result = self.node.run("shasum -a 1 -- {}".format(quote(self.path))) + elif self.node.os in self.node.OS_FAMILY_BSD: + result = self.node.run("sha1 -q -- {}".format(quote(self.path))) + else: + result = self.node.run("sha1sum -- {}".format(quote(self.path))) + return force_text(result.stdout).strip().split()[0] + + @property + def size(self): + return self.stat['size'] + + @property + def symlink_target(self): + if not self.is_symlink: + raise ValueError("{} is not a symlink".format(quote(self.path))) + if self.desc.startswith("symbolic link to `"): + return self.desc[18:-1] + elif self.desc.startswith("broken symbolic link to `"): + return self.desc[25:-1] + elif self.desc.startswith("symbolic link to "): + return self.desc[17:] + elif self.desc.startswith("broken symbolic link to "): + return self.desc[24:] + else: + raise ValueError("unable to find target for {}".format(quote(self.path))) diff --git a/bundlewrap/utils/scm.py b/bundlewrap/utils/scm.py new file mode 100644 index 0000000..140e692 --- /dev/null +++ b/bundlewrap/utils/scm.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from subprocess import CalledProcessError, check_output, STDOUT + + +def get_bzr_rev(): + try: + return check_output( + "bzr revno", + shell=True, + stderr=STDOUT, + ).strip() + except CalledProcessError: + return None + + +def get_git_rev(): + try: + return check_output( + "git rev-parse HEAD", + shell=True, + stderr=STDOUT, + ).strip() + except CalledProcessError: + return None + + +def get_hg_rev(): + try: + return check_output( + "hg --debug id -i", + shell=True, + stderr=STDOUT, + ).strip().rstrip("+") + except CalledProcessError: + return None + + +def get_rev(): + for scm_rev in (get_git_rev, get_hg_rev, get_bzr_rev): + rev = scm_rev() + if rev is not None: + return rev + return None diff --git a/bundlewrap/utils/statedict.py b/bundlewrap/utils/statedict.py new file mode 100644 index 0000000..7d08c75 --- /dev/null +++ b/bundlewrap/utils/statedict.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from difflib import unified_diff +from hashlib import sha1 +from json import dumps, JSONEncoder + +from . import Fault +from .text import bold, green, red +from .text import force_text, mark_for_translation as _ + + +try: + text_type = unicode + byte_type = str +except NameError: + text_type = str + byte_type = bytes + +DIFF_MAX_INLINE_LENGTH = 36 +DIFF_MAX_LINE_LENGTH = 1024 + + +def diff_keys(sdict1, sdict2): + """ + Compares the keys of two statedicts and returns the keys with + differing values. + + Note that only keys in the first statedict are considered. If a key + only exists in the second one, it is disregarded. + """ + if sdict1 is None: + return [] + if sdict2 is None: + return sdict1.keys() + differing_keys = [] + for key, value in sdict1.items(): + if value != sdict2[key]: + differing_keys.append(key) + return differing_keys + + +def diff_value_bool(title, value1, value2): + return diff_value_text( + title, + "yes" if value1 else "no", + "yes" if value2 else "no", + ) + + +def diff_value_int(title, value1, value2): + return diff_value_text( + title, + "{}".format(value1), + "{}".format(value2), + ) + + +def diff_value_list(title, value1, value2): + if isinstance(value1, set): + value1 = sorted(value1) + value2 = sorted(value2) + elif isinstance(value1, tuple): + value1 = list(value1) + value2 = list(value2) + # make sure that *if* we have lines, the last one will also end with + # a newline + if value1: + value1.append("") + if value2: + value2.append("") + return diff_value_text( + title, + "\n".join([str(i) for i in value1]), + "\n".join([str(i) for i in value2]), + ) + + +def diff_value_text(title, value1, value2): + max_length = max(len(value1), len(value2)) + value1, value2 = force_text(value1), force_text(value2) + if ( + "\n" not in value1 and + "\n" not in value2 + ): + if max_length < DIFF_MAX_INLINE_LENGTH: + return "{} {} → {}".format( + bold(title), + red(value1), + green(value2), + ) + elif max_length < DIFF_MAX_LINE_LENGTH: + return "{} {}\n{}→ {}".format( + bold(title), + red(value1), + " " * (len(title) - 1), + green(value2), + ) + output = bold(title) + "\n" + for line in unified_diff( + value1.splitlines(True), + value2.splitlines(True), + fromfile=_(""), + tofile=_(""), + ): + suffix = "" + if len(line) > DIFF_MAX_LINE_LENGTH: + suffix += _(" (line truncated after {} characters)").format(DIFF_MAX_LINE_LENGTH) + if not line.endswith("\n"): + suffix += _(" (no newline at end of file)") + line = line[:DIFF_MAX_LINE_LENGTH].rstrip("\n") + if line.startswith("+"): + line = green(line) + elif line.startswith("-"): + line = red(line) + output += line + suffix + "\n" + return output + + +TYPE_DIFFS = { + bool: diff_value_bool, + byte_type: diff_value_text, + float: diff_value_int, + int: diff_value_int, + list: diff_value_list, + set: diff_value_list, + text_type: diff_value_text, + tuple: diff_value_list, +} + + +def diff_value(title, value1, value2): + value_type = type(value1) + assert value_type == type(value2) + diff_func = TYPE_DIFFS[value_type] + return diff_func(title, value1, value2) + + +class FaultResolvingJSONEncoder(JSONEncoder): + def default(self, obj): + if isinstance(obj, Fault): + return obj.value + else: + return JSONEncoder.default(obj) + + +def hash_statedict(sdict): + """ + Returns a canonical SHA1 hash to describe this dict. + """ + return sha1(statedict_to_json(sdict).encode('utf-8')).hexdigest() + + +def statedict_to_json(sdict, pretty=False): + """ + Returns a canonical JSON representation of the given statedict. + """ + if sdict is None: + return "" + else: + return dumps( + sdict, + cls=FaultResolvingJSONEncoder, + indent=4 if pretty else None, + sort_keys=True, + ) + + +def validate_statedict(sdict): + """ + Raises ValueError if the given statedict is invalid. + """ + if sdict is None: + return + for key, value in sdict.items(): + if not isinstance(force_text(key), text_type): + raise ValueError(_("non-text statedict key: {}").format(key)) + + if type(value) not in TYPE_DIFFS and value is not None: + raise ValueError( + _("invalid statedict value for key '{k}': {v}").format(k=key, v=value) + ) + + if type(value) in (list, tuple): + for index, element in enumerate(value): + if type(element) not in TYPE_DIFFS and element is not None: + raise ValueError(_( + "invalid element #{i} in statedict key '{k}': {e}" + ).format( + e=element, + i=index, + k=key, + )) diff --git a/bundlewrap/utils/testing.py b/bundlewrap/utils/testing.py new file mode 100644 index 0000000..b7ea819 --- /dev/null +++ b/bundlewrap/utils/testing.py @@ -0,0 +1,54 @@ +import platform +from subprocess import Popen, PIPE + +from ..bundle import FILENAME_BUNDLE +from ..secrets import FILENAME_SECRETS + + +HOST_OS = { + "Darwin": 'macos', + "Linux": 'linux', +} + + +def host_os(): + return HOST_OS[platform.system()] + + +def make_repo(tmpdir, bundles=None, groups=None, nodes=None): + bundles = {} if bundles is None else bundles + groups = {} if groups is None else groups + nodes = {} if nodes is None else nodes + + bundles_dir = tmpdir.mkdir("bundles") + for bundle, items in bundles.items(): + bundle_dir = bundles_dir.mkdir(bundle) + bundle_dir.mkdir("files") + bundlepy = bundle_dir.join(FILENAME_BUNDLE) + bundle_content = "" + for itemtype, itemconfig in items.items(): + bundle_content += "{} = {}\n".format(itemtype, repr(itemconfig)) + bundlepy.write(bundle_content) + + tmpdir.mkdir("data") + tmpdir.mkdir("hooks") + + groupspy = tmpdir.join("groups.py") + groupspy.write("groups = {}\n".format(repr(groups))) + + nodespy = tmpdir.join("nodes.py") + nodespy.write("nodes = {}\n".format(repr(nodes))) + + secrets = tmpdir.join(FILENAME_SECRETS) + secrets.write("[generate]\nkey = {}\n\n[encrypt]\nkey = {}\n".format( + "Fl53iG1czBcaAPOKhSiJE7RjFU9nIAGkiKDy0k_LoTc=", + "DbYiUu5VMfrdeSiKYiAH4rDOAUISipvLSBJI-T0SpeY=", + )) + + +def run(command, path=None): + process = Popen(command, cwd=path, shell=True, stderr=PIPE, stdout=PIPE) + stdout, stderr = process.communicate() + print(stdout.decode('utf-8')) + print(stderr.decode('utf-8')) + return (stdout, stderr, process.returncode) diff --git a/bundlewrap/utils/text.py b/bundlewrap/utils/text.py new file mode 100644 index 0000000..db77cea --- /dev/null +++ b/bundlewrap/utils/text.py @@ -0,0 +1,170 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from io import BytesIO +from os import environ +from os.path import normpath +from random import choice +import re +from string import digits, ascii_letters + +from . import Fault, STDERR_WRITER + + +ANSI_ESCAPE = re.compile(r'\x1b[^m]*m') +VALID_NAME_CHARS = digits + ascii_letters + "-_.+" + + +def ansi_wrapper(colorizer): + if environ.get("BW_COLORS", "1") != "0": + return colorizer + else: + return lambda s, **kwargs: s + + +@ansi_wrapper +def blue(text): + return "\033[34m{}\033[0m".format(text) + + +@ansi_wrapper +def bold(text): + return "\033[1m{}\033[0m".format(text) + + +@ansi_wrapper +def cyan(text): + return "\033[36m{}\033[0m".format(text) + + +@ansi_wrapper +def inverse(text): + return "\033[0m\033[7m{}\033[0m".format(text) + + +@ansi_wrapper +def green(text): + return "\033[32m{}\033[0m".format(text) + + +@ansi_wrapper +def red(text): + return "\033[31m{}\033[0m".format(text) + + +@ansi_wrapper +def yellow(text): + return "\033[33m{}\033[0m".format(text) + + +def error_summary(errors): + if not errors: + return + + if len(errors) == 1: + STDERR_WRITER.write(_("\n{x} There was an error, repeated below.\n\n").format( + x=red("!!!"), + )) + STDERR_WRITER.flush() + else: + STDERR_WRITER.write(_("\n{x} There were {count} errors, repeated below.\n\n").format( + count=len(errors), + x=red("!!!"), + )) + STDERR_WRITER.flush() + + for e in errors: + STDERR_WRITER.write(e) + STDERR_WRITER.write("\n") + STDERR_WRITER.flush() + + +def force_text(data): + """ + Try to return a text aka unicode object from the given data. + Also has Python 2/3 compatibility baked in. Oh the humanity. + """ + if isinstance(data, bytes): + return data.decode('utf-8', 'replace') + elif isinstance(data, Fault): + return data.value + return data + + +def is_subdirectory(parent, child): + """ + Returns True if the given child is a subdirectory of the parent. + """ + parent = normpath(parent) + child = normpath(child) + + if not parent.startswith("/") or not child.startswith("/"): + raise ValueError(_("directory paths must be absolute")) + + if parent == child: + return False + + if parent == "/": + return True + + return child.startswith(parent + "/") + + +def mark_for_translation(s): + return s +_ = mark_for_translation + + +def randstr(length=24): + """ + Returns a random alphanumeric string of the given length. + """ + return ''.join(choice(ascii_letters + digits) for c in range(length)) + + +def validate_name(name): + """ + Checks whether the given string is a valid name for a node, group, + or bundle. + """ + try: + for char in name: + assert char in VALID_NAME_CHARS + assert not name.startswith(".") + except AssertionError: + return False + return True + + +def wrap_question(title, body, question, prefix=""): + output = ("{0}\n" + "{0} ╭─ {1}\n" + "{0} │\n".format(prefix, title)) + for line in body.splitlines(): + output += "{0} │ {1}\n".format(prefix, line) + output += ("{0} │\n" + "{0} ╰─ ".format(prefix) + question) + return output + + +class LineBuffer(object): + def __init__(self, target): + self.buffer = b"" + self.record = BytesIO() + self.target = target if target else lambda s: None + + def close(self): + self.flush() + if self.buffer: + self.record.write(self.buffer) + self.target(self.buffer) + + def flush(self): + while b"\n" in self.buffer: + chunk, self.buffer = self.buffer.split(b"\n", 1) + self.record.write(chunk + b"\n") + self.target(chunk + b"\n") + + def write(self, msg): + self.buffer += msg + self.flush() diff --git a/bundlewrap/utils/time.py b/bundlewrap/utils/time.py new file mode 100644 index 0000000..2cc4827 --- /dev/null +++ b/bundlewrap/utils/time.py @@ -0,0 +1,49 @@ +from datetime import datetime, timedelta + +from .text import mark_for_translation as _ + + +def format_duration(duration): + """ + Takes a timedelta and returns something like "1d 5h 4m 3s". + """ + components = [] + if duration.days > 0: + components.append(_("{}d").format(duration.days)) + seconds = duration.seconds + if seconds >= 3600: + hours = int(seconds / 3600) + seconds -= hours * 3600 + components.append(_("{}h").format(hours)) + if seconds >= 60: + minutes = int(seconds / 60) + seconds -= minutes * 60 + components.append(_("{}m").format(minutes)) + if seconds > 0 or not components: + components.append(_("{}s").format(seconds)) + return " ".join(components) + + +def format_timestamp(timestamp): + return datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") + + +def parse_duration(duration): + """ + Parses a string like "1d 5h 4m 3s" into a timedelta. + """ + days = 0 + seconds = 0 + for component in duration.strip().split(" "): + component = component.strip() + if component[-1] == "d": + days += int(component[:-1]) + elif component[-1] == "h": + seconds += int(component[:-1]) * 3600 + elif component[-1] == "m": + seconds += int(component[:-1]) * 60 + elif component[-1] == "s": + seconds += int(component[:-1]) + else: + raise ValueError(_("{} is not a valid duration string").format(repr(duration))) + return timedelta(days=days, seconds=seconds) diff --git a/bundlewrap/utils/ui.py b/bundlewrap/utils/ui.py new file mode 100644 index 0000000..31fe7a2 --- /dev/null +++ b/bundlewrap/utils/ui.py @@ -0,0 +1,289 @@ +from contextlib import contextmanager +from datetime import datetime +from errno import EPIPE +import fcntl +from functools import wraps +from os import _exit, environ, getpid, kill +from os.path import join +from select import select +from signal import signal, SIG_DFL, SIGINT, SIGTERM +import struct +import sys +import termios +from threading import Event, Lock, Thread + +from . import STDERR_WRITER, STDOUT_WRITER +from .text import ANSI_ESCAPE, blue, bold, inverse, mark_for_translation as _ + +QUIT_EVENT = Event() +SHUTDOWN_EVENT_HARD = Event() +SHUTDOWN_EVENT_SOFT = Event() +TTY = STDOUT_WRITER.isatty() + + +if sys.version_info >= (3, 0): + broken_pipe_exception = BrokenPipeError +else: + broken_pipe_exception = IOError + + +def add_debug_indicator(f): + @wraps(f) + def wrapped(self, msg, **kwargs): + return f(self, "[DEBUG] " + msg, **kwargs) + return wrapped + + +def add_debug_timestamp(f): + @wraps(f) + def wrapped(self, msg, **kwargs): + if self.debug_mode: + msg = datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + msg + return f(self, msg, **kwargs) + return wrapped + + +def capture_for_debug_logfile(f): + @wraps(f) + def wrapped(self, msg, **kwargs): + if self.debug_log_file: + self.debug_log_file.write( + datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + + ANSI_ESCAPE.sub("", msg).rstrip("\n") + "\n" + ) + return f(self, msg, **kwargs) + return wrapped + + +def clear_formatting(f): + """ + Makes sure formatting from cut-off lines can't bleed into next one + """ + @wraps(f) + def wrapped(self, msg, **kwargs): + if TTY and environ.get("BW_COLORS", "1") != "0": + msg = "\033[0m" + msg + return f(self, msg, **kwargs) + return wrapped + + +def sigint_handler(*args, **kwargs): + """ + This handler is kept short since it interrupts execution of the + main thread. It's safer to handle these events in their own thread + because the main thread might be holding the IO lock while it is + interrupted. + """ + if not SHUTDOWN_EVENT_SOFT.is_set(): + SHUTDOWN_EVENT_SOFT.set() + else: + SHUTDOWN_EVENT_HARD.set() + + +def term_width(): + if not TTY: + return 0 + + fd = sys.stdout.fileno() + _, width = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, 'aaaa')) + return width + + +def write_to_stream(stream, msg): + try: + if TTY: + stream.write(msg) + else: + stream.write(ANSI_ESCAPE.sub("", msg)) + stream.flush() + except broken_pipe_exception as e: + if broken_pipe_exception == IOError: + if e.errno != EPIPE: + raise + + +class DrainableStdin(object): + def get_input(self): + while True: + if QUIT_EVENT.is_set(): + return None + if select([sys.stdin], [], [], 0.1)[0]: + return sys.stdin.readline().strip() + + def drain(self): + if sys.stdin.isatty(): + termios.tcflush(sys.stdin, termios.TCIFLUSH) + + +class IOManager(object): + """ + Threadsafe singleton class that handles all IO. + """ + def __init__(self): + self._active = False + self.debug_log_file = None + self.debug_mode = False + self.jobs = [] + self.lock = Lock() + self._signal_handler_thread = Thread( + target=self._signal_handler_thread_body, + ) + # daemon mode is required because we need to keep the thread + # around until the end of a soft shutdown to wait for a hard + # shutdown signal, but don't have a feasible way of stopping + # the thread once the soft shutdown has completed + self._signal_handler_thread.daemon = True + self._ssh_pids = [] + + def activate(self): + self._active = True + if 'BW_DEBUG_LOG_DIR' in environ: + self.debug_log_file = open(join( + environ['BW_DEBUG_LOG_DIR'], + "{}_{}.log".format( + datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), + getpid(), + ), + ), 'a') + self._signal_handler_thread.start() + signal(SIGINT, sigint_handler) + + def ask(self, question, default, epilogue=None, input_handler=DrainableStdin()): + assert self._active + answers = _("[Y/n]") if default else _("[y/N]") + question = question + " " + answers + " " + with self.lock: + if QUIT_EVENT.is_set(): + sys.exit(0) + self._clear_last_job() + while True: + write_to_stream(STDOUT_WRITER, "\a" + question) + + input_handler.drain() + answer = input_handler.get_input() + if answer is None: + if epilogue: + write_to_stream(STDOUT_WRITER, "\n" + epilogue + "\n") + QUIT_EVENT.set() + sys.exit(0) + elif answer.lower() in (_("y"), _("yes")) or ( + not answer and default + ): + answer = True + break + elif answer.lower() in (_("n"), _("no")) or ( + not answer and not default + ): + answer = False + break + write_to_stream( + STDOUT_WRITER, + _("Please answer with 'y(es)' or 'n(o)'.\n"), + ) + if epilogue: + write_to_stream(STDOUT_WRITER, epilogue + "\n") + self._write_current_job() + return answer + + def deactivate(self): + self._active = False + signal(SIGINT, SIG_DFL) + self._signal_handler_thread.join() + if self.debug_log_file: + self.debug_log_file.close() + + @clear_formatting + @add_debug_indicator + @capture_for_debug_logfile + @add_debug_timestamp + def debug(self, msg, append_newline=True): + if self.debug_mode: + with self.lock: + self._write(msg, append_newline=append_newline) + + def job_add(self, msg): + if not self._active: + return + with self.lock: + if TTY: + self._clear_last_job() + write_to_stream(STDOUT_WRITER, inverse("{} ".format(msg)[:term_width() - 1])) + self.jobs.append(msg) + + def job_del(self, msg): + if not self._active: + return + with self.lock: + self._clear_last_job() + self.jobs.remove(msg) + self._write_current_job() + + @clear_formatting + @capture_for_debug_logfile + @add_debug_timestamp + def stderr(self, msg, append_newline=True): + with self.lock: + self._write(msg, append_newline=append_newline, err=True) + + @clear_formatting + @capture_for_debug_logfile + @add_debug_timestamp + def stdout(self, msg, append_newline=True): + with self.lock: + self._write(msg, append_newline=append_newline) + + @contextmanager + def job(self, job_text): + self.job_add(job_text) + try: + yield + finally: + self.job_del(job_text) + + def _clear_last_job(self): + if self.jobs and TTY: + write_to_stream(STDOUT_WRITER, "\r\033[K") + + def _signal_handler_thread_body(self): + while self._active: + if QUIT_EVENT.is_set(): + if SHUTDOWN_EVENT_HARD.wait(0.1): + self.stderr(_("{x} {signal} cleanup interrupted, exiting...").format( + signal=bold(_("SIGINT")), + x=blue("i"), + )) + for ssh_pid in self._ssh_pids: + self.debug(_("killing SSH session with PID {pid}").format(pid=ssh_pid)) + try: + kill(ssh_pid, SIGTERM) + except ProcessLookupError: + pass + self._clear_last_job() + _exit(1) + else: + if SHUTDOWN_EVENT_SOFT.wait(0.1): + QUIT_EVENT.set() + self.stderr(_( + "{x} {signal} canceling pending tasks... " + "(hit CTRL+C again for immediate dirty exit)" + ).format( + signal=bold(_("SIGINT")), + x=blue("i"), + )) + + def _write(self, msg, append_newline=True, err=False): + if not self._active: + return + if self.jobs and TTY: + write_to_stream(STDOUT_WRITER, "\r\033[K") + if msg is not None: + if append_newline: + msg += "\n" + write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg) + self._write_current_job() + + def _write_current_job(self): + if self.jobs and TTY: + write_to_stream(STDOUT_WRITER, inverse("{} ".format(self.jobs[-1])[:term_width() - 1])) + +io = IOManager() diff --git a/docs/content/CNAME b/docs/content/CNAME new file mode 100644 index 0000000..9086904 --- /dev/null +++ b/docs/content/CNAME @@ -0,0 +1 @@ +docs.bundlewrap.org diff --git a/docs/content/guide/api.md b/docs/content/guide/api.md new file mode 100644 index 0000000..958b13c --- /dev/null +++ b/docs/content/guide/api.md @@ -0,0 +1,240 @@ +# API + +While most users will interact with BundleWrap through the `bw` command line utility, you can also use it from your own code to extract data or further automate config management tasks. + +Even within BundleWrap itself (e.g. templates, libs, and hooks) you are often given repo and/or node objects to work with. Their methods and attributes are documented below. + +Some general notes on using BundleWrap's API: + +* There can be an arbitrary amount of `bundlewrap.repo.Repository` objects per process. +* Repositories are read as needed and not re-read when something changes. Modifying files in a repo during the lifetime of the matching Repository object may result in undefined behavior. + +
+ +## Example + +Here's a short example of how to use BundleWrap to get the uptime for a node. + + from bundlewrap.repo import Repository + + repo = Repository("/path/to/my/repo") + node = repo.get_node("mynode") + uptime = node.run("uptime") + print(uptime.stdout) + +
+ +## Reference + + +### bundlewrap.repo.Repository(path) + +The starting point of any interaction with BundleWrap. An object of this class represents the repository at the given path. + +
+ +**`.groups`** + +A list of all groups in the repo (instances of `bundlewrap.group.Group`) + +
+ +**`.group_names`** + +A list of all group names in this repo. + +
+ +**`.nodes`** + +A list of all nodes in the repo (instances of `bundlewrap.node.Node`) + +
+ +**`.node_names`** + +A list of all node names in this repo + +
+ +**`.revision`** + +The current git, hg or bzr revision of this repo. `None` if no SCM was detected. + +
+ +**`.get_group(group_name)`** + +Returns the Group object for the given name. + +
+ +**`.get_node(node_name)`** + +Returns the Node object with the given name. + +
+ +**`.nodes_in_all_groups(group_names)`** + +Returns a list of Node objects where every node is a member of every group name given. + +
+ +**`.nodes_in_any_group(group_names)`** + +Returns all Node objects that are a member of at least one of the given group names. + +
+ +**`.nodes_in_group(group_name)`** + +Returns a list of Node objects in the named group. + +
+ +### bundlewrap.node.Node() + +A system managed by BundleWrap. + +
+ +**`.bundles`** + +A list of all bundles associated with this node (instances of `bundlewrap.bundle.Bundle`) + +
+ +**`.groups`** + +A list of `bundlewrap.group.Group` objects this node belongs to + +
+ +**`.hostname`** + +The DNS name BundleWrap uses to connect to this node + +
+ +**`.items`** + +A list of items on this node (instances of subclasses of `bundlewrap.items.Item`) + +
+ +**`.metadata`** + +A dictionary of custom metadata, merged from information in [nodes.py](../repo/nodes.py.md) and [groups.py](../repo/groups.py.md) + +
+ +**`.name`** + +The internal identifier for this node + +
+ +**`.download(remote_path, local_path)`** + +Downloads a file from the node. + +`remote_path` Which file to get from the node +`local_path` Where to put the file + +
+ +**`.get_item(item_id)`** + +Get the Item object with the given ID (e.g. "file:/etc/motd"). + +
+ +**`.has_bundle(bundle_name)`** + +`True` if the node has a bundle with the given name. + +
+ +**`.has_any_bundle(bundle_names)`** + +`True` if the node has a bundle with any of the given names. + +
+ +**`.in_group(group_name)`** + +`True` if the node is in a group with the given name. + +
+ +**`.in_any_group(group_names)`** + +`True` if the node is in a group with any of the given names. + +
+ +**`.run(command, may_fail=False)`** + +Runs a command on the node. Returns an instance of `bundlewrap.operations.RunResult`. + +`command` What should be executed on the node +`may_fail` If `False`, `bundlewrap.exceptions.RemoteException` will be raised if the command does not return 0. + +
+ +**`.upload(local_path, remote_path, mode=None, owner="", group="")`** + +Uploads a file to the node. + +`local_path` Which file to upload +`remote_path` Where to put the file on the target node +`mode` File mode, e.g. "0644" +`owner` Username of the file owner +`group` Group name of the file group + +
+ +### bundlewrap.group.Group + +A user-defined group of nodes. + +
+ +**`.name`** + +The name of this group + +
+ +**`.nodes`** + +A list of all nodes in this group (instances of `bundlewrap.node.Node`, includes subgroup members) + +
+ +### bundlewrap.utils.Fault + +A Fault acts as a lazy stand-in object for the result of a given callback function. These objects are returned from the "vault" attached to `Repository` objects: + + >>> repo.vault.password_for("demo") + + +Only when the `value` property of a Fault is accessed or when the Fault is converted to a string, the callback function is executed. In the example above, this means that the password is only generated when it is really required (e.g. when used in a template). This is particularly useful when used in metadata in connection with [secrets](secrets.md). Users will be able to generate metadata with Faults in it, even if they lack the required keys for the decryption operation represented by the Fault. The key will only be required for files etc. that actually use it. If a Fault cannot be resolved (e.g. for lack of the required key), BundleWrap can just skip the item using the Fault, while still allowing other items on the same node to be applied. + +Faults also support some rudimentary string operations such as appending a string or another Fault, as well as some string methods: + + >>> f = repo.vault.password_for("1") + ":" + repo.vault.password_for("2") + >>> f + + >>> f.value + 'VOd5PC:JUgYUb' + >>> f += " " + >>> f.value + 'VOd5PC:JUgYUb ' + >>> f.strip().value + 'VOd5PC:JUgYUb' + >>> repo.vault.password_for("1").format_into("Password: {}").value + 'Password: VOd5PC' + +These string methods are supported on Faults: `format`, `lower`, `lstrip`, `replace`, `rstrip`, `strip`, `upper`, `zfill` diff --git a/docs/content/guide/cli.md b/docs/content/guide/cli.md new file mode 100644 index 0000000..62a3046 --- /dev/null +++ b/docs/content/guide/cli.md @@ -0,0 +1,77 @@ +# Command Line Interface + +The `bw` utility is BundleWrap's command line interface. + +
This page is not meant as a complete reference. It provides a starting point to explore the various subcommands. If you're looking for details, --help is your friend.
+ +## bw apply + +
bw apply -i mynode
+ +The most important and most used part of BundleWrap, `bw apply` will apply your configuration to a set of [nodes](../repo/nodes.py.md). By default, it operates in a non-interactive mode. When you're trying something new or are otherwise unsure of some changes, use the `-i` switch to have BundleWrap interactively ask before each change is made. + +
+ +## bw run + +
$ bw run mygroup "uname -a"
+ +Unsurprisingly, the `run` subcommand is used to run commands on nodes. + +As with most commands that accept node names, you can also give a `group` name or any combination of node and group names, separated by commas (without spaces, e.g. `node1,group2,node3`). A third option is to use a bundle selector like `bundle:my_bundle`. It will select all nodes with the named `bundle`. You can freely mix and match node names, group names, and bundle selectors. + +Negation is also possible for bundles and groups. `!bundle:foo` will add all nodes without the foo bundle, while `!group:foo` will add all nodes that aren't in the foo group. + +
+ +## bw nodes and bw groups + +
$ bw nodes --hostnames | xargs -n 1 ping -c 1
+ +With these commands you can quickly get a list of all nodes and groups in your [repository](../repo/layout.md). The example above uses `--hostnames` to get a list of all DNS names for your nodes and send a ping to each one. + +
+ +## bw debug + + $ bw debug + bundlewrap X.Y.Z interactive repository inspector + > You can access the current repository as 'repo'. + >>> len(repo.nodes) + 121 + +This command will drop you into a Python shell with direct access to BundleWrap's [API](api.md). Once you're familiar with it, it can be a very powerful tool. + +
+ +## bw plot + +
You'll need Graphviz installed on your machine for this to be useful.
+ +
$ bw plot node mynode | dot -Tsvg -omynode.svg
+ +You won't be using this every day, but it's pretty cool. The above command will create an SVG file (you can open these in your browser) that shows the item dependency graph for the given node. You will see bundles as dashed rectangles, static dependencies (defined in BundleWrap itself) in green, auto-generated dependencies (calculated dynamically each time you run `bw apply`) in blue and dependencies you defined yourself in red. + +It offers an interesting view into the internal complexities BundleWrap has to deal with when figuring out the order in which your items can be applied to your node. + +
+ +## bw test + +
$ bw test
+✓ node1  samba  pkg_apt:samba
+✘ node1  samba  file:/etc/samba/smb.conf
+
+[...]
+
++----- traceback from worker ------
+|
+|  Traceback (most recent call last):
+|    File "bundlewrap/concurrency.py", line 78, in _worker_process
+|      return_value = target(*msg['args'], **msg['kwargs'])
+|    File "<string>", line 378, in test
+|  BundleError: file:/etc/samba/smb.conf from bundle 'samba' refers to missing file '/path/to/bundlewrap/repo/bundles/samba/files/smb.conf'
+|
++----------------------------------
+
+This command is meant to be run automatically like a test suite after every commit. It will try to catch any errors in your bundles and file templates by initializing every item for every node (but without touching the network). diff --git a/docs/content/guide/dev_item.md b/docs/content/guide/dev_item.md new file mode 100644 index 0000000..7f1355d --- /dev/null +++ b/docs/content/guide/dev_item.md @@ -0,0 +1,134 @@ +# Custom item types + + +## Step 0: Understand statedicts + +To represent supposed vs. actual state, BundleWrap uses statedicts. These are +normal Python dictionaries with some restrictions: + +* keys must be Unicode text +* every value must be of one of these simple data types: + * bool + * float + * int + * Unicode text + * None +* ...or a list/tuple containing only instances of one of the types above + +Additional information can be stored in statedicts by using keys that start with an underscore. You may only use this for caching purposes (e.g. storing rendered file template content while the "real" sdict information only contains a hash of this content). BundleWrap will ignore these keys and hide them from the user. The type restrictions noted above do not apply. + + +## Step 1: Create an item module + +Create a new file called `/your/bundlewrap/repo/items/foo.py`. You can use this as a template: + + from bundlewrap.items import Item + + + class Foo(Item): + """ + A foo. + """ + BLOCK_CONCURRENT = [] + BUNDLE_ATTRIBUTE_NAME = "foo" + ITEM_ATTRIBUTES = { + 'attribute': "default value", + } + ITEM_TYPE_NAME = "foo" + REQUIRED_ATTRIBUTES = ['attribute'] + + def __repr__(self): + return "".format(self.attributes['attribute']) + + def cdict(self): + """ + Return a statedict that describes the target state of this item + as configured in the repo. An empty dict means that the item + should not exist. + + Implementing this method is optional. The default implementation + uses the attributes as defined in the bundle. + """ + raise NotImplementedError + + def sdict(self): + """ + Return a statedict that describes the actual state of this item + on the node. An empty dict means that the item does not exist + on the node. + + For the item to validate as correct, the values for all keys in + self.cdict() have to match this statedict. + """ + raise NotImplementedError + + def display_dicts(self, cdict, sdict, keys): + """ + Given cdict and sdict as implemented above, modify them to better + suit interactive presentation. The keys parameter is the return + value of display_keys (see below) and provided for reference only. + + Implementing this method is optional. + """ + return (cdict, sdict) + + def display_keys(self, cdict, sdict, keys): + """ + Given a list of keys whose values differ between cdict and sdict, + modify them to better suit presentation to the user. + + Implementing this method is optional. + """ + return keys + + def fix(self, status): + """ + Do whatever is necessary to correct this item. The given ItemStatus + object has the following useful information: + + status.keys list of cdict keys that need fixing + status.cdict cached copy of self.cdict() + status.sdict cached copy of self.sdict() + """ + raise NotImplementedError + +
+ +## Step 2: Define attributes + +`BUNDLE_ATTRIBUTE_NAME` is the name of the variable defined in a bundle module that holds the items of this type. If your bundle looks like this: + + foo = { [...] } + +...then you should put `BUNDLE_ATTRIBUTE_NAME = "foo"` here. + + +`ITEM_ATTRIBUTES` is a dictionary of the attributes users will be able to configure for your item. For files, that would be stuff like owner, group, and permissions. Every attribute (even if it's mandatory) needs a default value, `None` is totally acceptable: + + ITEM_ATTRIBUTES = {'attr1': "default1"} + + +`ITEM_TYPE_NAME` sets the first part of an items ID. For the file items, this is "file". Therefore, file ID look this this: `file:/path`. The second part is the name a user assigns to your item in a bundle. Example: + + ITEM_TYPE_NAME = "foo" + + +`BLOCK_CONCURRENT` is a list of item types (e.g. `pkg_apt`), that cannot be applied in parallel with this type of item. May include this very item type itself. For most items this is not an issue (e.g. creating multiple files at the same time), but some types of items have to be applied sequentially (e.g. package managers usually employ locks to ensure only one package is installed at a time): + + BLOCK_CONCURRENT = ["pkg_apt"] + + +`REQUIRED_ATTRIBUTES` is a list of attribute names that must be set on each item of this type. If BundleWrap encounters an item without all these attributes during bundle inspection, an exception will be raised. Example: + + REQUIRED_ATTRIBUTES = ['attr1', 'attr2'] + +
+ +Step 3: Implement methods +------------------------- + +You should probably start with `sdict()`. Use `self.node.run("command")` to run shell commands on the current node and check the `stdout` property of the returned object. + +The only other method you have to implement is `fix`. It doesn't have to return anything and just uses `self.node.run()` to fix the item. To do this efficiently, it may use the provided parameters indicating which keys differ between the should-be sdict and the actual one. Both sdicts are also provided in case you need to know their values. + +If you're having trouble, try looking at the [source code for the items that come with BundleWrap](https://github.com/bundlewrap/bundlewrap/tree/master/bundlewrap/items). The `pkg_*` items are pretty simple and easy to understand while `files` is the most complex to date. Or just drop by on [IRC](irc://chat.freenode.net/bundlewrap), we're glad to help. diff --git a/docs/content/guide/dev_plugin.md b/docs/content/guide/dev_plugin.md new file mode 100644 index 0000000..68d1d4b --- /dev/null +++ b/docs/content/guide/dev_plugin.md @@ -0,0 +1,71 @@ +# Writing your own plugins + +[Plugins](../repo/plugins.md) can provide almost any file in a BundleWrap repository: bundles, custom items, hooks, libs, etc. + +Notable exceptions are `nodes.py` and `groups.py`. If your plugin wants to extend those, use a [lib](../repo/libs.md) instead and ask users to add the result of a function call in your lib to their nodes or groups dicts. + +
If your plugin depends on other libraries, make sure that it catches ImportErrors in a way that makes it obvious for the user what's missing. Keep in mind that people will often just git pull their repo and not install your plugin themselves.
+ +
+ +## Starting a new plugin + +### Step 1: Clone the plugins repo + +Create a clone of the [official plugins repo](https://github.com/bundlewrap/plugins) on GitHub. + +### Step 2: Create a branch + +You should work on a branch specific to your plugin. + +### Step 3: Copy your plugin files + +Now take the files that make up your plugin and move them into a subfolder of the plugins repo. The subfolder must be named like your plugin. + +### Step 4: Create required files + +In your plugin subfolder, create a file called `manifest.json` from this template: + + { + "desc": "Concise description (keep it somewhere around 80 characters)", + "help": "Optional verbose help text to be displayed after installing. May\ninclude\nnewlines.", + "provides": [ + "bundles/example/items.py", + "hooks/example.py" + ], + "version": 1 + } + +The `provides` section must contain a list of all files provided by your plugin. + +You also have to create an `AUTHORS` file containing your name and email address. + +Last but not least we require a `LICENSE` file with an OSI-approved Free Software license. + +### Step 5: Update the plugin index + +Run the `update_index.py` script at the root of the plugins repo. + +### Step 6: Run tests + +Run the `test.py` script at the root of the plugins repo. It will tell you if there is anything wrong with your plugin. + +### Step 7: Commit + +Commit all changes to your branch + +### Step 8: Create pull request + +Create a pull request on GitHub to request inclusion of your new plugin in the official repo. Only then will your plugin become available to be installed by `bw repo plugin install yourplugin`. + +
+ +## Updating an existing plugin + +To release a new version of your plugin: + +* Increase the version number in `manifest.json` +* Update the list of provided files in `manifest.json` +* If you're updating someone elses plugin, you should get their consent and add your name to `AUTHORS` + +Then just follow the instructions above from step 5 onward. diff --git a/docs/content/guide/env.md b/docs/content/guide/env.md new file mode 100644 index 0000000..ec84e3d --- /dev/null +++ b/docs/content/guide/env.md @@ -0,0 +1,55 @@ +# Environment Variables + +## `BW_ADD_HOST_KEYS` + +As BundleWrap uses OpenSSH to connect to hosts, host key checking is involved. By default, strict host key checking is activated. This might not be suitable for your setup. You can set this variable to `1` to cause BundleWrap to set the OpenSSH option `StrictHostKeyChecking=no`. + +You can also use `bw -a ...` to achieve the same effect. + + +## `BW_COLORS` + +Colors are enabled by default. Setting this variable to `0` tells BundleWrap to never use any ANSI color escape sequences. + + +## `BW_DEBUG_LOG_DIR` + +Set this to an existing directory path to have BundleWrap write debug logs there (even when you're running `bw` without `--debug`). + +
Debug logs are verbose and BundleWrap does not rotate them for you. Putting them on a tmpfs or ramdisk will save your SSD and get rid of old logs every time you reboot your machine.
+ + +## `BW_HARDLOCK_EXPIRY` + +[Hard locks](locks.md) are automatically ignored after some time. By default, it's `"8h"`. You can use this variable to override that default. + + +## `BW_IDENTITY` + +When BundleWrap [locks](locks.md) a node, it stores a short description about "you". By default, this is the string `$USER@$HOSTNAME`, e.g. `john@mymachine`. You can use `BW_IDENTITY` to specify a custom string. (No variables will be evaluated in user supplied strings.) + + +## `BW_ITEM_WORKERS` and `BW_NODE_WORKERS` + +BundleWrap attempts to parallelize work. These two options specify the number of nodes and items, respectively, which will be handled concurrently. To be more precise, when setting `BW_NODE_WORKERS=8` and `BW_ITEM_WORKERS=2`, BundleWrap will work on eight nodes in parallel, each handling two items in parallel. + +You can also use the command line options `-p` and `-P`, e.g. `bw apply -p ... -P ... ...`, to achieve the same effect. Command line arguments override environment variables. + +There is no single default for these values. For example, when running `bw apply`, four nodes are being handled by default. However, when running `bw test`, only one node will be tested by default. `BW_NODE_WORKERS` and `BW_ITEM_WORKERS` apply to *all* these operations. + +Note that you should not set these variables to very high values. First, it can cause high memory consumption on your machine. Second, not all SSH servers can handle massive parallelism. Please refer to your OpenSSH documentation on how to tune your servers for these situations. + + +## `BW_SOFTLOCK_EXPIRY` + +[Soft locks](locks.md) are automatically removed from nodes after some time. By default, it's `"8h"`. You can use this variable to override that default. + + +## `BW_SSH_ARGS` + +Extra arguments to include in every call to `ssh` BundleWrap makes. Set this to "-F ~/.ssh/otherconf" to use a different SSH config with BundleWrap. + + +## `BW_VAULT_DUMMY_MODE` + +Setting this to `1` will make `repo.vault` return dummy values for every [secret](secrets.md). This is useful for running `bw test` on a CI server that you don't want to trust with your `.secrets.cfg`. diff --git a/docs/content/guide/installation.md b/docs/content/guide/installation.md new file mode 100644 index 0000000..9ee376f --- /dev/null +++ b/docs/content/guide/installation.md @@ -0,0 +1,84 @@ +# Installation + +
You may need to install pip first. This can be accomplished through your distribution's package manager, e.g.: + +
aptitude install python-pip
+ +or the manual instructions.
+ +## Using pip + +It's as simple as: + +
pip install bundlewrap
+ +Note that you need at least Python 2.7 to run BundleWrap. Python 3 is supported as long as it's >= 3.3. + +
+ +## From git + +
This type of install will give you the very latest (and thus possibly broken) bleeding edge version of BundleWrap. +You should only use this if you know what you're doing.
+ +
The instructions below are for installing on Ubuntu Server 12.10 (Quantal), but should also work for other versions of Ubuntu/Debian. If you're on some other distro, you will obviously have to adjust the package install commands.
+ +
The instructions assume you have root privileges.
+ +Install basic requirements: + +
aptitude install build-essential git python-dev python-pip
+ +Clone the GitHub repository: + +
cd /opt
+git clone https://github.com/bundlewrap/bundlewrap.git
+ +Use `pip install -e` to install in "development mode": + +
pip install -e /opt/bundlewrap
+ +You can now try running the `bw` command line utility:: + +
bw --help
+ +That's it. + +To update your install, just pull the git repository and have setup.py` check for new dependencies: + +
cd /opt/bundlewrap
+git pull
+python setup.py develop
+ +
+ +# Requirements for managed systems + +While the following list might appear long, even very minimal systems should provide everything that's needed. + +* `apt-get` (only used with [pkg_apt](../items/pkg_apt.md) items) +* `cat` +* `chmod` +* `chown` +* `dpkg` (only used with [pkg_apt](../items/pkg_apt.md) items) +* `echo` +* `file` +* `find` (only used with [directory purging](../items/directory.md#purge)) +* `grep` +* `groupadd` +* `groupmod` +* `id` +* `initctl` (only used with [svc_upstart](../items/svc_upstart.md) items) +* `mkdir` +* `mv` +* `pacman` (only used with [pkg_pacman](../items/pkg_pacman.md) items) +* `rm` +* sftp-enabled SSH server (your home directory must be writable) +* `sudo` +* `sha1sum` +* `stat` +* `systemctl` (only used with [svc_systemd](../items/svc_systemd.md) items) +* `useradd` +* `usermod` + +Additionally, you need to pre-configure your SSH client so that it can connect to your nodes without having to type a password (including `sudo` on the node, which also must *not* have the `requiretty` option set). diff --git a/docs/content/guide/item_file_templates.md b/docs/content/guide/item_file_templates.md new file mode 100644 index 0000000..e599e77 --- /dev/null +++ b/docs/content/guide/item_file_templates.md @@ -0,0 +1,113 @@ +# Writing file templates + +BundleWrap can use [Mako](http://www.makotemplates.org) or [Jinja2](http://jinja.pocoo.org) for file templating. This enables you to dynamically contruct your config files. Templates reside in the `files` subdirectory of a bundle and are bound to a file item using the `source` [attribute](../items/file.md#source). This page explains how to get started with Mako. + +The most basic example would be: + +
Hello, this is ${node.name}!
+ +After template rendering, it would look like this:: + +
Hello, this is myexamplenodename!
+ +As you can see, `${...}` can be used to insert the value of a context variable into the rendered file. By default, you have access to two variables in every template: `node` and `repo`. They are `bundlewrap.node.Node` and `bundlewrap.repo.Repository` objects, respectively. You can learn more about the attributes and methods of these objects in the [API docs](api.md), but here are a few examples: + +
+ +## Examples + +inserts the DNS hostname of the current node + + ${node.hostname} + +
+ +a list of all nodes in your repo + + % for node in repo.nodes: + ${node.name} + % endfor + +
+ +make exceptions for certain nodes + + % if node.name == "node1": + option = foo + % elif node.name in ("node2", "node3"): + option = bar + % else: + option = baz + % endif + +
+ +check for group membership + + % if node.in_group("sparkle"): + enable_sparkles = 1 + % endif + +
+ +check for membership in any of several groups + + % if node.in_any_group(("sparkle", "shiny")): + enable_fancy = 1 + % endif + +
+ +check for bundle + + % if node.has_bundle("sparkle"): + enable_sparkles = 1 + % endif + +
+ +check for any of several bundles + + % if node.has_any_bundle(("sparkle", "shiny")): + enable_fancy = 1 + % endif + +
+ +list all nodes in a group + + % for gnode in repo.get_group("mygroup").nodes: + ${gnode.name} + % endfor + +
+ +## Working with node metadata + +Quite often you will attach custom metadata to your nodes in `nodes.py`, e.g.: + + nodes = { + "node1": { + "metadata": { + "interfaces": { + "eth0": "10.1.1.47", + "eth1": "10.1.2.47", + }, + }, + }, + } + +You can easily access this information in templates: + + % for interface, ip in sorted(node.metadata["interfaces"].items()): + interface ${interface} + ip = ${ip} + % endfor + +This template will render to: + + interface eth0 + ip = 10.1.1.47 + interface eth1 + ip = 10.1.2.47 + diff --git a/docs/content/guide/locks.md b/docs/content/guide/locks.md new file mode 100644 index 0000000..1fa44bc --- /dev/null +++ b/docs/content/guide/locks.md @@ -0,0 +1,32 @@ +# Locking + +BundleWrap's decentralized nature makes it necessary to coordinate actions between users of a shared repository. Locking is an important part of collaborating using BundleWrap. + +## Hard locks + +Since very early in the history of BundleWrap, what we call "hard locks" were used to prevent multiple users from using `bw apply` on the same node at the same time. When BundleWrap finds a hard lock on a node in interactive mode, it will display information about who acquired the lock (and when) and will ask whether to ignore the lock or abort the process. In non-interactive mode, the operation is always cancelled for the node in question unless `--force` is used. + +## Soft locks + +Many teams these days are using a workflow based on pull requests. A common problem here is that changes from a feature branch might already have been applied to a set of nodes, while the master branch is still lacking these changes. While the pull request is open and waiting for review, other users might rightly use the master branch to apply to all nodes, reverting changes made by the feature branch. This can be a major nuisance. + +As of version 2.6.0, BundleWrap provides "soft locks" to prevent this. The author of a feature branch can now lock the node so only he or she can use `bw apply` on it: + +
$ bw lock add node1
+✓ node1  locked with ID B9JS (expires in 8h)
+ +This will prevent all other users from changing any items on the node for the next 8 hours. BundleWrap will tell users apart by their [BW_IDENTITY](env.md#BW_IDENTITY). Now say someone else is reviewing the pull request and wants to use `bw apply`, while still keeping others out and the original author in. This can be done by simply locking the node *again* as the reviewer. Nodes can have many soft locks. Soft locks act as an exemption from a general ban on changing items that goes into effect as soon as one or more soft locks are present on the node. Of course, if no soft locks are present, anyone can change any item. + +You can list all soft locks on a node with: + +
$ bw lock show node1
+i node1  ID    Created              Expires              User   Items  Comment
+› node1  Y1KD  2016-05-25 21:30:25  2016-05-26 05:30:25  alice  *      locks are awesome
+› node1  B9JS  2016-05-24 13:10:11  2016-05-27 08:10:11  bob    *      me too
+ +Note that each lock is identified by a case-insensitive 4-character ID that can be used to remove the lock: + +
$ bw lock remove node1 y1kd
+✓ node1  lock Y1KD removed
+ +Expired locks are automatically and silently purged whenever BundleWrap has the opportunity. Be sure to check out `bw lock add --help` for how to customize expiration time, add a short comment explaining the reason for the lock, or lock only certain items. Using `bw apply` on a soft locked node is not an error and affected items will simply be skipped. diff --git a/docs/content/guide/migrate_12.md b/docs/content/guide/migrate_12.md new file mode 100644 index 0000000..cfeb850 --- /dev/null +++ b/docs/content/guide/migrate_12.md @@ -0,0 +1,61 @@ +# Migrating from BundleWrap 1.x to 2.x + +As per [semver](http://semver.org), BundleWrap 2.0 breaks compatibility with repositories created for BundleWrap 1.x. This document provides a guide on how to upgrade your repositories to BundleWrap 2.x. Please read the entire document before proceeding. To aid with the transition, BundleWrap 1.6.0 has been released along with 2.0.0. It contains no new features over 1.5.x, but has builtin helpers to aid your migration to 2.0. + +
+ +## items.py + +In every bundle, rename `bundle.py` to `items.py`. BundleWrap 1.6.0 can do this for you by running `bw migrate`. + +
+ +## Default file content type + +The default `content_type` for [file items](../items/file.md) has changed from "mako" to "text". This means that you need to check all file items that do not define an explicit content type of "mako". Some of them might be fine because you didn't really need templating, while others may need to have their `content_type` set to "mako" explicitly. + +BundleWrap 1.6.0 will print warnings for every file item affected when running `bw test`. + +
+ +## Metadata merging + +The merging behavior for node and group metadata has changed. Instead of a simple `dict.update()`, metadata dicts are now merged recursively. See [the docs](../repo/groups.py.md#metadata) for details. + +
+ +## Metadata processors and item generators + +These two advanced features have been replaced by a single new mechanism: [metadata.py](../repo/bundles.md#metadatapy) You will need to rethink and rewrite them. + +BundleWrap 1.6.0 will print warnings for every group that uses metadata processors and any item generators when running `bw test`. + +
+ +## Custom item types + +The API for defining your own items has changed. Generally, you should be able to upgrade your items with relatively little effort. Refer to [the docs](dev_item.md) for details. + +
+ +## Deterministic templates + +While not a strict requirement, it is highly recommended to ensure your entire configuration can be created deterministically (i.e. remains exactly the same no matter how often you generate it). Otherwise, you won't be able to take advantage of the new functionality provided by `bw hash`. + +A common pitfall here is iteration over dictionaries in templates: + + % for key, value in my_dict.items(): + ${value} + % endfor + +Standard dictionaries in Python have no defined order. This may result in lines occasionally changing their position. To solve this, you can simply use `sorted()`: + + % for key, value in sorted(my_dict.items()): + ${value} + % endfor + +
+ +## Hook arguments + +Some [hooks](../repo/hooks.md) had their arguments adjusted slightly. diff --git a/docs/content/guide/os_compatibility.md b/docs/content/guide/os_compatibility.md new file mode 100644 index 0000000..a58e340 --- /dev/null +++ b/docs/content/guide/os_compatibility.md @@ -0,0 +1,15 @@ +# OS compatibility + +BundleWrap by necessity takes a pragmatic approach to supporting different operating systems and distributions. Our main target is Linux, but support for other UNIXes is also evolving. We cannot guarantee to be compatible with every distribution and BSD flavor under the sun, but we try to cover the common ones. + +
+ +## node.os and node.os_version + +You should set these attributes for every node. Giving BundleWrap this information allows us to adapt some built-in behavior. + +
+ +## other node attributes + +In some cases (e.g. when not using sudo) you will need to manually adjust some things. Check the docs [on node-level OS overrides](../repo/nodes.py.md#os-compatibility-overrides). diff --git a/docs/content/guide/quickstart.md b/docs/content/guide/quickstart.md new file mode 100644 index 0000000..d1d10a6 --- /dev/null +++ b/docs/content/guide/quickstart.md @@ -0,0 +1,143 @@ +Quickstart +========== + +This is the 10 minute intro into BundleWrap. Fasten your seatbelt. + + +Installation +------------ + +First, open a terminal and install BundleWrap: + +
pip install bundlewrap
+ + +Create a repository +------------------- + +Now you'll need to create your [repository](../repo/layout.md): + +
mkdir my_bundlewrap_repo
+cd my_bundlewrap_repo
+bw repo create
+
+ +You will note that some files have been created. Let's check them out: + +
cat nodes.py
+cat groups.py
+
+ +The contents should be fairly self-explanatory, but you can always check the [docs](../repo/layout.md) on these files if you want to go deeper. + +
It is highly recommended to use git or a similar tool to keep track of your repository. You may want to start doing that right away.
+ +At this point you will want to edit `nodes.py` and maybe change "localhost" to the hostname of a system you have passwordless (including sudo) SSH access to. + +
BundleWrap will honor your ~/.ssh/config, so if ssh mynode.example.com sudo id works without any password prompts in your terminal, you're good to go.
+ + +Run a command +------------- + +The first thing you can do is run a command on your army of one node: + +
bw -a run node-1 "uptime"
+ +
The -a switch tells bw to automatically trust unknown SSH host keys (when you're connecting to a new node). By default, only known host keys will be accepted.
+ +You should see something like this: + +
› node-1   20:16:26 up 34 days,  4:10,  0 users,  load average: 0.00, 0.01, 0.05
+✓ node-1  completed successfully after 3.499531s
+ +Instead of a node name ("node-1" in this case) you can also use a group name (such as "all") from your `groups.py`. + + +Create a bundle +--------------- + +BundleWrap stores node configuration in [bundles](../repo/bundles.md). A bundle is a collection of *items* such as files, system packages or users. To create your first bundle, type: + +
bw repo bundle create mybundle
+ +Now that you have created your bundle, it's important to tell BundleWrap which nodes will have this bundle. You can assign bundles to nodes using either groups.py or nodes.py, here we'll use the latter: + + nodes = { + 'node-1': { + 'bundles': ( + "mybundle", + ), + 'hostname': "mynode-1.local", + }, + } + + +Create a file template +---------------------- + +To manage a file, you need two things: + +1. a file item in your bundle +2. a template for the file contents + +Add this to your `bundles/mybundle/items.py`: + + files = { + '/etc/motd': { + 'source': "etc/motd", + }, + } + +Then write the file template:: + +
mkdir bundles/mybundle/files/etc
+vim bundles/mybundle/files/etc/motd
+ +You can use this for example content: + +
Welcome to ${node.name}!
+ +Note that the `source` attribute in `items.py` contains a path relative to the `files` directory of your bundle. + + +Apply configuration +------------------- + +Now all that's left is to run `bw apply`: + +
bw apply -i node-1
+ +BundleWrap will ask to replace your previous MOTD: + +
i node-1 run started at 2016-02-13 21:25:45
+? node-1
+? node-1  ╭─ file:/etc/motd
+? node-1  │
+? node-1  │  content
+? node-1  │  --- <node>
+? node-1  │  +++ <bundlewrap>
+? node-1  │  @@ -1 +1 @@
+? node-1  │  -your old motd
+? node-1  │  +Welcome to node-1!
+? node-1  │
+? node-1  ╰─ Fix file:/etc/motd? [Y/n]
+
+ +That completes the quickstart tutorial! + + +Further reading +--------------- + +Here are some suggestions on what to do next: + +* set up [SSH multiplexing](https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing) for significantly better performance +* take a moment to think about what groups and bundles you will create +* read up on how a [BundleWrap repository](../repo/layout.md) is laid out +* ...especially what [types of items](../repo/bundles.md#item-types) you can add to your bundles +* familiarize yourself with [the Mako template language](http://www.makotemplates.org/) +* explore the [command line interface](cli.md) +* follow [@bundlewrap](https://twitter.com/bundlewrap) on Twitter + +Have fun! If you have any questions, feel free to drop by [on IRC](irc://chat.freenode.net/bundlewrap). diff --git a/docs/content/guide/secrets.md b/docs/content/guide/secrets.md new file mode 100644 index 0000000..bf052e2 --- /dev/null +++ b/docs/content/guide/secrets.md @@ -0,0 +1,102 @@ +# Handling secrets + +We strongly recommend **not** putting any sensitive information such as passwords or private keys into your repository. This page describes the helpers available in BundleWrap to manage those secrets without checking them into version control. + +
Most of the functions described here return lazy Fault objects.
+ +
+ +## .secrets.cfg + +When you initially ran `bw repo create`, a file called `.secrets.cfg` was put into the root level of your repo. It's an INI-style file that by default contains two random keys BundleWrap uses to protect your secrets. + +
You should never commit .secrets.cfg. Immediately add it to your .gitignore or equivalent.
+ +
+ +## Derived passwords + +In some cases, you can control (i.e. manage with BundleWrap) both ends of the authentication process. A common example is a config file for a web application that holds credentials for a database also managed by BundleWrap. In this case, you don't really care what the password is, you just want it to be the same on both sides. + +To accomplish that, just write this in your template (Mako syntax shown here): + +
database_user = "foo"
+database_password = "${repo.vault.password_for("my database")}"
+
+ +In your bundle, you can then configure your database user like this: + + postgres_roles = { + "foo": { + 'password': repo.vault.password_for("my database"), + }, + } + +It doesn't really matter what string you call `password_for()` with, it just has to be the same on both ends. BundleWrap will then use that string, combine it with the default key called `generate` in your `.secrets.cfg` and derive a random password from that. + +This makes it easy to change all your passwords at once (e.g. when an employee leaves or when required for compliance reasons) by rotating keys. + +
However, it also means you have to guard your .secrets.cfg very closely. If it is compromised, so are all your passwords. Use your own judgement.
+ +
+ +## Static passwords + +When you need to store a specific password, you can encrypt it symmetrically: + +
$ bw debug -c "print(repo.vault.encrypt('my password'))"
+gAAAA[...]mrVMA==
+
+ +You can then use this encrypted password in a template like this: + +
database_user = "foo"
+database_password = "${repo.vault.decrypt("gAAAA[...]mrVMA==")}"
+
+ +
+ +## Files + +You can also encrypt entire files: + +
$ bw debug -c "repo.vault.encrypt_file('/my/secret.file', 'encrypted.file'))"
+ +
Encrypted files are always read and written relative to the data/ subdirectory of your repo.
+ +If the source file was encoded using UTF-8, you can then simply pass the decrypted content into a file item: + + files = { + "/secret": { + 'content': repo.vault.decrypt_file("encrypted.file"), + }, + } + +If the source file is binary however (or any encoding other than UTF-8), you must use base64: + + files = { + "/secret": { + 'content': repo.vault.decrypt_file_as_base64("encrypted.file"), + 'content_type': 'base64', + }, + } + +
+ +## Key management + +### Multiple keys + +You can always add more keys to your `.secrets.cfg`, but you should keep the defaults around. Adding more keys makes it possible to give different keys to different teams. **By default, BundleWrap will skip items it can't find the required keys for**. + +When using `.password_for()`, `.decrypt()` etc., you can provide a `key` argument to select the key: + + repo.vault.password_for("some database", key="devops") + +
+ +### Rotating keys + +
This is applicable mostly to .password_for(). The other methods use symmetric encryption and require manually updating the encrypted text after the key has changed.
+ +You can generate a new key by running `bw debug -c "print(repo.vault.random_key())"`. Place the result in your `.secrets.cfg`. Then you need to distribute the new key to your team and run `bw apply` for all your nodes. diff --git a/docs/content/index.md b/docs/content/index.md new file mode 100644 index 0000000..400dcca --- /dev/null +++ b/docs/content/index.md @@ -0,0 +1,43 @@ + + +BundleWrap documentation +======================== + +Check out the [quickstart tutorial](guide/quickstart.md) to get started. + +If you run into a problem that is not answered in these docs, please +find us on [IRC](irc://chat.freenode.net/bundlewrap) or [Twitter](https://twitter.com/bundlewrap). We’re happy to help! + +
+ +Is BundleWrap the right tool for you? +------------------------------------- + +We think you will enjoy BundleWrap a lot if you: + +- know some Python +- like to write your configuration from scratch and control every bit + of it +- have lots of unique nodes +- are trying to get a lot of existing systems under management +- are NOT trying to handle a massive amount of nodes (let’s say more + than 300) +- like to start small +- don’t want yet more stuff to run on your nodes (or mess with + appliances as little as possible) +- prefer a simple tool to a fancy one +- want as much as possible in git/hg/bzr +- have strongly segmented internal networks + +You might be better served with a different config management system if +you: + +- are already using a config management system and don’t have any + major issues +- hate Python and/or JSON +- like to use community-maintained configuration templates +- need unattended bootstrapping of nodes +- need to manage non-Linux systems +- don’t trust your coworkers + +We have also prepared a [comparison with other popular config management systems](misc/alternatives.md). diff --git a/docs/content/items/action.md b/docs/content/items/action.md new file mode 100644 index 0000000..49aff21 --- /dev/null +++ b/docs/content/items/action.md @@ -0,0 +1,49 @@ +# Actions + +Actions will be run on every `bw apply`. They differ from regular items in that they cannot be "correct" in the first place. They can only succeed or fail. + + actions = { + 'check_if_its_still_linux': { + 'command': "uname", + 'expected_return_code': 0, + 'expected_stdout': "Linux\n", + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### command + +The only required attribute. This is the command that will be run on the node with root privileges. + +
+ +### expected_return_code + +Defaults to `0`. If the return code of your command is anything else, the action is considered failed. You can also set this to `None` and any return code will be accepted. + +
+ +### expected_stdout + +If this is given, the stdout output of the command must match the given string or the action is considered failed. + +
+ +### expected_stderr + +Same as `expected_stdout`, but with stderr. + +
+ +### interactive + +If set to `True`, this action will be skipped in non-interactive mode. If set to `False`, this action will always be executed without asking (even in interactive mode). Defaults to `None`. + +
Think hard before setting this to False. People might assume that interactive mode won't do anything without their consent.
diff --git a/docs/content/items/directory.md b/docs/content/items/directory.md new file mode 100644 index 0000000..b98f6cf --- /dev/null +++ b/docs/content/items/directory.md @@ -0,0 +1,37 @@ +# Directory items + + directories = { + "/path/to/directory": { + "mode": "0644", + "owner": "root", + "group": "root", + }, + } + +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### group + +Name of the group this directory belongs to. Defaults to `None` (don't care about group). + +
+ +### mode + +Directory mode as returned by `stat -c %a `. Defaults to `None` (don't care about mode). + +
+ +### owner + +Username of the directory's owner. Defaults to `None` (don't care about owner). + +
+ +### purge + +Set this to `True` to remove everything from this directory that is not managed by BundleWrap. Defaults to `False`. diff --git a/docs/content/items/file.md b/docs/content/items/file.md new file mode 100644 index 0000000..32ff275 --- /dev/null +++ b/docs/content/items/file.md @@ -0,0 +1,97 @@ +# File items + +Manage regular files. + + files = { + "/path/to/file": { + "mode": "0644", + "owner": "root", + "group": "root", + "content_type": "mako", + "encoding": "utf-8", + "source": "my_template", + }, + } + +
+ +Attribute reference +------------------- + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### content + +May be used instead of `source` to provide file content without a template file. + +
+ +### content_type + +How the file pointed to by `source` or the string given to `content` should be interpreted. + + + + + + + + + +
ValueEffect
anyonly cares about file owner, group, and mode
base64content is decoded from base64
binaryfile is uploaded verbatim, no content processing occurs
jinja2content is interpreted by the Jinja2 template engine
makocontent is interpreted by the Mako template engine
text (default)like binary, but will be diffed in interactive mode
+ +
+ +### context + +Only used with Mako and Jinja2 templates. The values of this dictionary will be available from within the template as variables named after the respective keys. + +
+ +### delete + +When set to `True`, the path of this file will be removed. It doesn't matter if there is not a file but a directory or something else at this path. When using `delete`, no other attributes are allowed. + +
+ +### encoding + +Encoding of the target file. Note that this applies to the remote file only, your template is still conveniently written in UTF-8 and will be converted by BundleWrap. Defaults to "utf-8". Other possible values (e.g. "latin-1") can be found [here](http://docs.python.org/2/library/codecs.html#standard-encodings). + +
+ +### group + +Name of the group this file belongs to. Defaults to `None` (don't care about group). + +
+ +### mode + +File mode as returned by `stat -c %a `. Defaults to `None` (don't care about mode). + +
+ +### owner + +Username of the file's owner. Defaults to `None` (don't care about owner). + +
+ +### source + +File name of the file template. If this says `my_template`, BundleWrap will look in `data/my_bundle/files/my_template` and then `bundles/my_bundle/files/my_template`. Most of the time, you will want to put config templates into the latter directory. The `data/` subdirectory is meant for files that are very specific to your infrastructure (e.g. DNS zone files). This separation allows you to write your bundles in a generic way so that they could be open-sourced and shared with other people. Defaults to the filename of this item (e.g. `foo.conf` when this item is `/etc/foo.conf`). + +See also: [Writing file templates](../guide/item_file_templates.md) + +
+ +### verify_with + +This can be used to run external validation commands on a file before it is applied to a node. The file is verified locally on the machine running BundleWrap. Verification is considered successful when the exit code of the verification command is 0. Use `{}` as a placeholder for the shell-quoted path to the temporary file. Here is an example for verifying sudoers files: + +
visudo -cf {}
+ +Keep in mind that all team members will have to have the verification command installed on their machines. diff --git a/docs/content/items/group.md b/docs/content/items/group.md new file mode 100644 index 0000000..4f617b9 --- /dev/null +++ b/docs/content/items/group.md @@ -0,0 +1,27 @@ +# Group items + +Manages system groups. Group members are managed through the [user item](user.md). + + groups = { + "acme": { + "gid": 2342, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### delete + +When set to `True`, this group will be removed from the system. When using `delete`, no other attributes are allowed. + +
+ +### gid + +Numerical ID of the group. diff --git a/docs/content/items/pkg_apt.md b/docs/content/items/pkg_apt.md new file mode 100644 index 0000000..77faecb --- /dev/null +++ b/docs/content/items/pkg_apt.md @@ -0,0 +1,24 @@ +# APT package items + +Handles packages installed by `apt-get` on Debian-based systems. + + pkg_apt = { + "foopkg": { + "installed": True, # default + }, + "bar": { + "installed": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if it should be purged. diff --git a/docs/content/items/pkg_dnf.md b/docs/content/items/pkg_dnf.md new file mode 100644 index 0000000..3639865 --- /dev/null +++ b/docs/content/items/pkg_dnf.md @@ -0,0 +1,24 @@ +# dnf package items + +Handles packages installed by `dnf` on RPM-based systems. + + pkg_dnf = { + "foopkg": { + "installed": True, # default + }, + "bar": { + "installed": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if it should be removed. diff --git a/docs/content/items/pkg_openbsd.md b/docs/content/items/pkg_openbsd.md new file mode 100644 index 0000000..9dc2fd9 --- /dev/null +++ b/docs/content/items/pkg_openbsd.md @@ -0,0 +1,34 @@ +# OpenBSD package items + +Handles packages installed by `pkg_add` on OpenBSD systems. + + pkg_openbsd = { + "foo": { + "installed": True, # default + }, + "bar": { + "installed": True, + "version": "1.0", + }, + "baz": { + "installed": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if it should be purged. + +
+ +### version + +Optional version string. Required for packages that offer multiple variants (like nginx or sudo). Ignored when `installed` is `False`. diff --git a/docs/content/items/pkg_pacman.md b/docs/content/items/pkg_pacman.md new file mode 100644 index 0000000..c76585a --- /dev/null +++ b/docs/content/items/pkg_pacman.md @@ -0,0 +1,35 @@ +# Pacman package items + +Handles packages installed by `pacman` (e.g. Arch Linux). + + pkg_pacman = { + "foopkg": { + "installed": True, # default + }, + "bar": { + "installed": False, + }, + "somethingelse": { + "tarball": "something-1.0.pkg.tar.gz", + } + } + +
System updates on Arch Linux should always be performed manually and with great care. Thus, this item type installs packages with a simple pacman -S $pkgname instead of the commonly recommended pacman -Syu $pkgname. You should manually do a full system update before installing new packages via BundleWrap!
+ +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if this package and all dependencies that are no longer needed should be removed. + +
+ +### tarball + +Upload a local file to the node and install it using `pacman -U`. The value of `tarball` must point to a file relative to the `pkg_pacman` subdirectory of the current bundle. diff --git a/docs/content/items/pkg_pip.md b/docs/content/items/pkg_pip.md new file mode 100644 index 0000000..86f076b --- /dev/null +++ b/docs/content/items/pkg_pip.md @@ -0,0 +1,36 @@ +# pip package items + +Handles Python packages installed by `pip`. + + pkg_pip = { + "foo": { + "installed": True, # default + "version": "1.0", # optional + }, + "bar": { + "installed": False, + }, + "/path/to/virtualenv/foo": { + # will install foo in the virtualenv at /path/to/virtualenv + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if it should be removed. + +
+ +### version + +Force the given exact version to be installed. You can only specify a single version here, selectors like `>=1.0` are NOT supported. + +If it's not given, the latest version will be installed initially, but (like the other package items) upgrades will NOT be installed. diff --git a/docs/content/items/pkg_yum.md b/docs/content/items/pkg_yum.md new file mode 100644 index 0000000..72c2a9e --- /dev/null +++ b/docs/content/items/pkg_yum.md @@ -0,0 +1,24 @@ +# yum package items + +Handles packages installed by `yum` on RPM-based systems. + + pkg_yum = { + "foopkg": { + "installed": True, # default + }, + "bar": { + "installed": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if it should be removed. diff --git a/docs/content/items/pkg_zypper.md b/docs/content/items/pkg_zypper.md new file mode 100644 index 0000000..20edf2f --- /dev/null +++ b/docs/content/items/pkg_zypper.md @@ -0,0 +1,24 @@ +# zypper package items + +Handles packages installed by `zypper` on SUSE-based systems. + + pkg_zypper = { + "foopkg": { + "installed": True, # default + }, + "bar": { + "installed": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### installed + +`True` when the package is expected to be present on the system; `False` if it should be removed. diff --git a/docs/content/items/postgres_db.md b/docs/content/items/postgres_db.md new file mode 100644 index 0000000..a658463 --- /dev/null +++ b/docs/content/items/postgres_db.md @@ -0,0 +1,21 @@ +# Postgres database items + +Manages Postgres databases. + + postgres_dbs = { + "mydatabase": { + "owner": "me", + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### owner + +Name of the role which owns this database (defaults to `"postgres"`). diff --git a/docs/content/items/postgres_role.md b/docs/content/items/postgres_role.md new file mode 100644 index 0000000..0532b9d --- /dev/null +++ b/docs/content/items/postgres_role.md @@ -0,0 +1,36 @@ +# Postgres role items + +Manages Postgres roles. + + postgres_roles = { + "me": { + "superuser": True, + "password": "itsamemario", + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### superuser + +`True` if the role should be given superuser privileges (defaults to `False`). + +
+ +### password + +Plaintext password to set for this role (will be hashed using MD5). + +
Please do not write any passwords into your bundles. This attribute is intended to be used with an external source of passwords and filled dynamically. If you don't have or want such an elaborate setup, specify passwords using the password_hash attribute instead.
+ +
+ +### password_hash + +As an alternative to `password`, this allows setting the raw hash as it will be stored in Postgres' internal database. Should start with "md5". diff --git a/docs/content/items/svc_openbsd.md b/docs/content/items/svc_openbsd.md new file mode 100644 index 0000000..c4bcd67 --- /dev/null +++ b/docs/content/items/svc_openbsd.md @@ -0,0 +1,47 @@ +# OpenBSD service items + +Handles services on OpenBSD. + + svc_openbsd = { + "bgpd": { + "enabled": True, # default + "running": True, # default + }, + "supervisord": { + "running": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### enabled + +`True` if the service shall be automatically started during system bootup; `False` otherwise. `True`, the default value, is needed on OpenBSD, as starting disabled services fails. + +
+ +### running + +`True` if the service is expected to be running on the system; `False` if it should be stopped. + +
+ +## Canned actions + +See also: [Explanation of how canned actions work](../repo/bundles.md#canned-actions) + +### restart + +Restarts the service. + +
+ +### stopstart + +Stops and starts the service. diff --git a/docs/content/items/svc_systemd.md b/docs/content/items/svc_systemd.md new file mode 100644 index 0000000..38f4d7c --- /dev/null +++ b/docs/content/items/svc_systemd.md @@ -0,0 +1,47 @@ +# systemd service items + +Handles services managed by systemd. + + svc_systemd = { + "fcron.service": { + "enabled": True, + "running": True, # default + }, + "sgopherd.socket": { + "running": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### enabled + +`True` if the service shall be automatically started during system bootup; `False` otherwise. `None`, the default value, makes BundleWrap ignore this setting. + +
+ +### running + +`True` if the service is expected to be running on the system; `False` if it should be stopped. + +
+ +## Canned actions + +See also: [Explanation of how canned actions work](../repo/bundles.md#canned-actions) + +### reload + +Reloads the service. + +
+ +### restart + +Restarts the service. diff --git a/docs/content/items/svc_systemv.md b/docs/content/items/svc_systemv.md new file mode 100644 index 0000000..cbe8d75 --- /dev/null +++ b/docs/content/items/svc_systemv.md @@ -0,0 +1,40 @@ +# System V service items + +Handles services managed by traditional System V init scripts. + + svc_systemv = { + "apache2": { + "running": True, # default + }, + "mysql": { + "running": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### running + +`True` if the service is expected to be running on the system; `False` if it should be stopped. + +
+ +## Canned actions + +See also: [Explanation of how canned actions work](../repo/bundles.md#canned-actions) + +### reload + +Reloads the service. + +
+ +### restart + +Restarts the service. diff --git a/docs/content/items/svc_upstart.md b/docs/content/items/svc_upstart.md new file mode 100644 index 0000000..2611ab1 --- /dev/null +++ b/docs/content/items/svc_upstart.md @@ -0,0 +1,46 @@ +# Upstart service items + +Handles services managed by Upstart. + + svc_upstart = { + "gunicorn": { + "running": True, # default + }, + "celery": { + "running": False, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### running + +`True` if the service is expected to be running on the system; `False` if it should be stopped. + +
+ +## Canned actions + +See also: [Explanation of how canned actions work](../repo/bundles.md#canned-actions) + +### reload + +Reloads the service. + +
+ +### restart + +Restarts the service. + +
+ +### stopstart + +Stops and then starts the service. This is different from `restart` in that Upstart will pick up changes to the `/etc/init/SERVICENAME.conf` file, while `restart` will continue to use the version of that file that the service was originally started with. See [http://askubuntu.com/a/238069](http://askubuntu.com/a/238069). diff --git a/docs/content/items/symlink.md b/docs/content/items/symlink.md new file mode 100644 index 0000000..3fc4f52 --- /dev/null +++ b/docs/content/items/symlink.md @@ -0,0 +1,33 @@ +# Symlink items + + symlinks = { + "/some/symlink": { + "group": "root", + "owner": "root", + "target": "/target/file", + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +
+ +### target + +File or directory this symlink points to. **This attribute is required.** + +
+ +### group + +Name of the group this symlink belongs to. Defaults to `root`. Defaults to `None` (don't care about group). + +
+ +### owner + +Username of the symlink's owner. Defaults to `root`. Defaults to `None` (don't care about owner). diff --git a/docs/content/items/user.md b/docs/content/items/user.md new file mode 100644 index 0000000..11ce05e --- /dev/null +++ b/docs/content/items/user.md @@ -0,0 +1,101 @@ +# User items + +Manages system user accounts. + + users = { + "jdoe": { + "full_name": "Jane Doe", + "gid": 2342, + "groups": ["admins", "users", "wheel"], + "home": "/home/jdoe", + "password_hash": "$6$abcdef$ghijklmnopqrstuvwxyz", + "shell": "/bin/zsh", + "uid": 4747, + }, + } + +
+ +## Attribute reference + +See also: [The list of generic builtin item attributes](../repo/bundles.md#builtin-item-attributes) + +All attributes are optional. + +
+ +### delete + +When set to `True`, this user will be removed from the system. Note that because of how `userdel` works, the primary group of the user will be removed if it contains no other users. When using `delete`, no other attributes are allowed. + +
+ +### full_name + +Full name of the user. + +
+ +### gid + +Primary group of the user as numerical ID or group name. + +
Due to how useradd works, this attribute is required whenever you don't want the default behavior of useradd (usually that means automatically creating a group with the same name as the user). If you want to use an unmanaged group already on the node, you need this attribute. If you want to use a group managed by BundleWrap, you need this attribute. This is true even if the groups mentioned are in fact named like the user.
+ +
+ +### groups + +List of groups (names, not GIDs) the user should belong to. Must NOT include the group referenced by `gid`. + +
+ +### hash_method + +One of: + +* `md5` +* `sha256` +* `sha512` + +Defaults to `sha512`. + +
+ +### home + +Path to home directory. Defaults to `/home/USERNAME`. + +
+ +### password + +The user's password in plaintext. + +
Please do not write any passwords into your bundles. This attribute is intended to be used with an external source of passwords and filled dynamically. If you don't have or want such an elaborate setup, specify passwords using the password_hash attribute instead.
+ +
If you don't specify a salt along with the password, BundleWrap will use a static salt. Be aware that this is basically the same as using no salt at all.
+ +
+ +### password_hash + +Hashed password as it would be returned by `crypt()` and written to `/etc/shadow`. + +
+ +### salt + +Recommended for use with the `password` attribute. BundleWrap will use 5000 rounds of SHA-512 on this salt and the provided password. + +
+ +### shell + +Path to login shell executable. + +
+ +### uid + +Numerical user ID. It's your job to make sure it's unique. diff --git a/docs/content/misc/about.md b/docs/content/misc/about.md new file mode 100644 index 0000000..7497878 --- /dev/null +++ b/docs/content/misc/about.md @@ -0,0 +1,16 @@ + + +# About + +Development on BundleWrap started in July 2012, borrowing some ideas from [Bcfg2](http://bcfg2.org/). Some key features that are meant to set BundleWrap apart from other config management systems are: + +* decentralized architecture +* pythonic and easily extendable +* easy to get started with +* true item-level parallelism (in addition to working on multiple nodes simultaneously, BundleWrap will continue to fix config files while installing a package on the same node) +* very customizable item dependencies +* collaboration features like [node locking](../guide/locks.md) (to prevent simultaneous applies to the same node) and hooks for chat notifications +* built-in testing facility (`bw test`) +* can be used as a library + +BundleWrap is a "pure" free software project licensed under the terms of the [GPLv3](http://www.gnu.org/licenses/gpl.html>), with no *Enterprise Edition* or commercial support. diff --git a/docs/content/misc/alternatives.md b/docs/content/misc/alternatives.md new file mode 100644 index 0000000..08ccd8f --- /dev/null +++ b/docs/content/misc/alternatives.md @@ -0,0 +1,60 @@ +# Alternatives + +
This page is an effort to compare BundleWrap to other config management systems. It very hard to keep this information complete and up to date, so please feel free to raise issues or create pull requests if something is amiss.
+ +BundleWrap has the following properties that are unique to it or at least not common among other solutions: + +* server- and agent-less architecture +* item-level parallelism to speed up convergence of complex nodes +* interactive mode to review configuration as it it being applied +* [Mako file templates](../items/file_templates) +* verifies that each action taken actually fixed the item in question +* verify mode to assess the state of your configuration without mutating it +* useful and actionable error messages +* can apply actions (and other items) prior to fixing an item (and only then) +* built-in visualization of node configuration +* nice [Python API](../guide/api.md) +* designed to be mastered quickly and easily remembered +* for better or worse: no commercial agenda/support +* no support for non-Linux target nodes (BundleWrap itself can be run from Mac OS as well) + + +## Ansible + +[Ansible](http://ansible.com>) is very similar to BundleWrap in how it communicates with nodes. Both systems do not use server or agent processes, but SSH. Ansible can optionally use OpenSSH instead of a Python SSH implementation to speed up performance. On the other hand, BundleWrap will always use the Python implementation, but with multiple connections to each node. This should give BundleWrap a performance advantage on very complex systems with many items, since each connection can work on a different item simultaneously. + +To apply configuration, Ansible uploads pieces of code called modules to each node and runs them there. Many Ansible modules depend on the node having a Python 2.x interpreter installed. In some cases, third-party Python libraries are needed as well, increasing the footprint on the node. BundleWrap runs commands on the target node just as you would in an interactive SSH session. Most of the [commands needed](../guide/installation.md#requirements-for-managed-systems) by BundleWrap are provided by coreutils and should be present on all standard Linux systems. + +Ansible ships with loads of modules while BundleWrap will only give you the most needed primitives to work with. For example, we will not add an item type for remote downloads because you can easily build that yourself using an [action](../items/action.md) with `wget`. + +Ansible's playbooks roughly correspond to BundleWrap's bundles, but are written in YAML using a special playbook language. BundleWrap uses Python for this purpose, so if you know some basic Python you only need to learn the schema of the dictionaries you're building. This also means that you will never run into a problem the playbook language cannot solve. Anything you can do in Python, you can do in BundleWrap. + +While you can automate application deployments in BundleWrap, Ansible is much more capable in that regard as it combines config management and sophisticated deployment mechanisms (multi-stage, rolling updates). + +File templates in Ansible are [Jinja2](http://jinja2.pocoo.org), while BundleWrap offers both [Mako](http://makotemplates.org>) and Jinja2. + +Ansible, Inc. offers paid support for Ansible and an optional web-based addon called [Ansible Tower](http://ansible.com/tower). No such offerings are available for BundleWrap. + + +BCFG2 +----- + +BCFG2's bundles obviously were an inspiration for BundleWrap. One important difference is that BundleWrap's bundles are usually completely isolated and self-contained within their directory while BCFG2 bundles may need resources (e.g. file templates) from elsewhere in the repository. + +On a practical level BundleWrap prefers pure Python and Mako over the XML- and text-variants of Genshi used for bundle and file templating in BCFG2. + +And of course BCFG2 has a very traditional client/server model while BundleWrap runs only on the operators computer. + + +Chef +---- + +[Chef](http://www.getchef.com) has basically two modes of operation: The most widely used one involves a server component and the `chef-client` agent. The second option is `chef-solo`, which will apply configuration from a local repository to the node the repository is located on. BundleWrap supports neither of these modes and always applies configuration over SSH. + +Overall, Chef is harder to get into, but will scale to thousands of nodes. + +The community around Chef is quite large and probably the largest of all config management systems. This means lots of community-maintained cookbooks to choose from. BundleWrap does have a [plugin system](../repo/plugins.md) to provide almost anything in a repository, but there aren't many plugins to choose from yet. + +Chef is written in Ruby and uses the popular [ERB](http://www.kuwata-lab.com/erubis/) template language. BundleWrap is heavily invested in Python and offers support for Mako and Jinja2 templates. + +OpsCode offers paid support for Chef and SaaS hosting for the server component. [AWS OpsWorks](http://aws.amazon.com/opsworks/) also integrates Chef cookbooks. diff --git a/docs/content/misc/contributing.md b/docs/content/misc/contributing.md new file mode 100644 index 0000000..8ffed3d --- /dev/null +++ b/docs/content/misc/contributing.md @@ -0,0 +1,41 @@ +# Contributing + +We welcome all input and contributions to BundleWrap. If you've never done this sort of thing before, maybe check out [contribution-guide.org](http://www.contribution-guide.org). But don't be afraid to make mistakes, nobody expects your first contribution to be perfect. We'll gladly help you out. + +
+ +## Submitting bug reports + +Please use the [GitHub issue tracker](https://github.com/bundlewrap/bundlewrap/issues) and take a few minutes to look for existing reports of the same problem (open or closed!). + +
If you've found a security issue or are not at all sure, just contact trehn@bundlewrap.org.
+ +
+ +## Contributing code + +
Before working on new features, try reaching out to one of the core authors first. We are very concerned with keeping BundleWrap lean and not introducing bloat. If your idea is not a good fit for all or most BundleWrap users, it can still be included as a plugin.
+ +Here are the steps: + +1. Write your code. Awesome! +2. If you haven't already done so, please consider writing tests. Otherwise, someone else will have to do it for you. +3. Same goes for documentation. +4. Set up a [virtualenv](http://virtualenv.readthedocs.org/en/latest/) and run `pip install -r requirements.txt`. +5. Make sure you can connect to your localhost via `ssh` without using a password and that you are able to run `sudo`. +6. Run `tox`. +7. Review and sign the Copyright Assignment Agreement (CAA) by adding your name and email to the `AUTHORS` file. (This step can be skipped if your contribution is too small to be considered intellectual property, e.g. spelling fixes) +8. Open a pull request on [GitHub](https://github.com/bundlewrap/bundlewrap). +9. Feel great. Thank you. + +
+ +## Contributing documentation + +The process is essentially the same as detailed above for code contributions. You will find the docs in `docs/content/` and can preview them using `cd docs && mkdocs serve`. + +
+ +## Help + +If at any point you need help or are not sure what to do, just drop by in [#bundlewrap on Freenode](irc://chat.freenode.net/bundlewrap) or poke [@bundlewrap on Twitter](https://twitter.com/bundlewrap). diff --git a/docs/content/misc/faq.md b/docs/content/misc/faq.md new file mode 100644 index 0000000..0bb2013 --- /dev/null +++ b/docs/content/misc/faq.md @@ -0,0 +1,63 @@ +# FAQ + +## Technical + +### BundleWrap says an item failed to apply, what do I do now? + +Try running `bw apply -i nodename` to see which attribute of the item could not be fixed. If that doesn't tell you enough, try `bw --debug apply -i nodename` and look for the command BundleWrap is using to fix the item in question. Then try running that command yourself and check for any errors. + +
+ +### What happens when two people start applying configuration to the same node? + +BundleWrap uses a [locking mechanism](../guide/locks.md) to prevent collisions like this. + +
+ +### How can I have BundleWrap reload my services after config changes? + +See [canned actions](../repo/bundles.md#canned_actions) and [triggers](../repo/bundles.md#triggers). + +
+ +### Will BundleWrap keep track of package updates? + +No. BundleWrap will only care about whether a package is installed or not. Updates will have to be installed through a separate mechanism (I like to create an [action](../items/action.md) with the `interactive` attribute set to `True`). Selecting specific versions should be done through your package manager. + +
+ +### Is there a probing mechanism like Ohai? + +No. BundleWrap is meant to be very push-focused. The node should not have any say in what configuration it will receive. + +
+ +### Is BundleWrap secure? + +BundleWrap is more concerned with safety than security. Due to its design, it is possible for your coworkers to introduce malicious code into a BundleWrap repository that could compromise your machine. You should only use trusted repositories and plugins. We also recommend following commit logs to your repos. + +
+ +## The BundleWrap Project + +### Why doesn't BundleWrap provide pre-built community bundles? + +In our experience, bundles for even the most common pieces of software always contain some opinionated bits specific to local infrastructure. Making bundles truly universal (e.g. in terms of supported Linux distributions) would mean a lot of bloat. And since local modifications are hard to reconcile with an upstream community repository, bundles would have to be very feature-complete to be useful to the majority of users, increasing bloat even more. + +Maintaining bundles and thus configuration for different pieces of software is therefore out of scope for the BundleWrap project. While it might seem tedious when you're getting started, with some practice, writing your own bundles will become both easy and precise in terms of infrastructure fit. + +
+ +### Why do contributors have to sign a Copyright Assignment Agreement? + +While it sounds scary, Copyright assignment is used to improve the enforceability of the GPL. Even the FSF does it, [read their explanation why](http://www.gnu.org/licenses/why-assign.html). The agreement used by BundleWrap is from [harmonyagreements.org](http://harmonyagreements.org). + +If you're still concerned, please do not hesitate to contact [@trehn](https://twitter.com/trehn). + +
+ +### Isn't this all very similar to Ansible? + +Some parts are, but there are significant differences as well. Check out the [alternatives page](alternatives.md#ansible) for a writeup of the details. + +
diff --git a/docs/content/misc/glossary.md b/docs/content/misc/glossary.md new file mode 100644 index 0000000..5f96d11 --- /dev/null +++ b/docs/content/misc/glossary.md @@ -0,0 +1,55 @@ +# Glossary + +## action + +Actions are a special kind of item used for running shell commands during each `bw apply`. They allow you to do things that aren't persistent in nature. + +
+ +## apply + +An "apply" is what we call the process of what's otherwise known as "converging" the state described by your repository and the actual status quo on the node. + +
+ +## bundle + +A collection of items. Most of the time, you will create one bundle per application. For example, an Apache bundle will include the httpd service, the virtual host definitions and the apache2 package. + +
+ +## group + +Used for organizing your nodes. + +
+ +## hook + +[Hooks](../repo/hooks.md) can be used to run your own code automatically during various stages of BundleWrap operations. + +
+ +## item + +A single piece of configuration on a node, e.g. a file or an installed package. + +You might be interested in [this overview of item types](../repo/bundles.md#item_types). + +
+ +## lib + +[Libs](../repo/libs.md) are a way to store Python modules in your repository and make them accessible to your bundles and templates. + +
+ +## node + +A managed system, no matter if physical or virtual. + +
+ +## repo + +A repository is a directory with [some stuff](../repo/layout.md) in it that tells BundleWrap everything it needs to know about your infrastructure. diff --git a/docs/content/repo/bundles.md b/docs/content/repo/bundles.md new file mode 100644 index 0000000..cf8c912 --- /dev/null +++ b/docs/content/repo/bundles.md @@ -0,0 +1,278 @@ +

Bundles

+ +Bundles are subdirectories of the `bundles/` directory of your BundleWrap repository. + +# items.py + +Within each bundle, there may be a file called `items.py`. It defines any number of magic attributes that are automatically processed by BundleWrap. Each attribute is a dictionary mapping an item name (such as a file name) to a dictionary of attributes (e.g. file ownership information). + +A typical bundle might look like this: + + files = { + '/etc/hosts': { + 'owner': "root", + 'group': "root", + 'mode': "0664", + [...] + }, + } + + users = { + 'janedoe': { + 'home': "/home/janedoe", + 'shell': "/bin/zsh", + [...] + }, + 'johndoe': { + 'home': "/home/johndoe", + 'shell': "/bin/bash", + [...] + }, + } + +This bundle defines the attributes `files` and `users`. Within the `users` attribute, there are two `user` items. Each item maps its name to a dictionary that is understood by the specific kind of item. Below you will find a reference of all builtin item types and the attributes they understand. You can also [define your own item types](../guide/dev_item.md). + +
+ +## Item types + +This table lists all item types included in BundleWrap along with the bundle attributes they understand. + + + + + + + + + + + + + + + + + + + + + + + +
TypeBundle attributeDescription
actionactionsActions allow you to run commands on every bw apply
directorydirectoriesManages permissions and ownership for directories
filefilesManages contents, permissions, and ownership for files
groupgroupsManages groups by wrapping groupadd, groupmod and groupdel
pkg_aptpkg_aptInstalls and removes packages with APT
pkg_dnfpkg_dnfInstalls and removes packages with dnf
pkg_pacmanpkg_pacmanInstalls and removes packages with pacman
pkg_pippkg_pipInstalls and removes Python packages with pip
pkg_yumpkg_yumInstalls and removes packages with yum
pkg_zypperpkg_zypperInstalls and removes packages with zypper
postgres_dbpostgres_dbsManages Postgres databases
postgres_rolepostgres_rolesManages Postgres roles
pkg_pippkg_pipInstalls and removes Python packages with pip
pkg_openbsdpkg_openbsdInstalls and removes OpenBSD packages with pkg_add/pkg_delete
svc_openbsdsvc_openbsdStarts and stops services with OpenBSD's rc
svc_systemdsvc_systemdStarts and stops services with systemd
svc_systemvsvc_systemvStarts and stops services with traditional System V init scripts
svc_upstartsvc_upstartStarts and stops services with Upstart
symlinksymlinksManages symbolic links and their ownership
userusersManages users by wrapping useradd, usermod and userdel
+ +
+ +## Builtin item attributes + +There are also attributes that can be applied to any kind of item. + +
+ +### error_on_missing_fault + +This will simply skip an item instead of raising an error when a Fault used for an attribute on the item is unavailable. Faults are special objects used by `repo.vault` to [handle secrets](../guide/secrets.md). A Fault being unavailable can mean you're missing the secret key required to decrypt a secret you're trying to use as an item attribute value. + +Defaults to `False`. + +
+ +### needs + +One such attribute is `needs`. It allows for setting up dependencies between items. This is not something you will have to to very often, because there are already implicit dependencies between items types (e.g. all files depend on the users owning them). Here are two examples: + + my_items = { + 'item1': { + [...] + 'needs': [ + 'file:/etc/foo.conf', + ], + }, + 'item2': { + ... + 'needs': [ + 'pkg_apt:', + 'bundle:foo', + ], + } + } + +The first item (`item1`, specific attributes have been omitted) depends on a file called `/etc/foo.conf`, while `item2` depends on all APT packages being installed and every item in the foo bundle. + +
+ +### needed_by + +This attribute is an alternative way of defining dependencies. It works just like `needs`, but in the other direction. There are only three scenarios where you should use `needed_by` over `needs`: + +* if you need all items of a certain type to depend on something or +* if you need all items in a bundle to depend on something or +* if you need an item in a bundle you can't edit (e.g. because it's provided by a community-maintained [plugin](plugins.md)) to depend on something in your bundles + +
+ +### tags + +A list of strings to tag an item with. Tagging has no immediate effect in itself, but can be useful in a number of places. For example, you can add dependencies on all items with a given tag: + + pkg_apt = { + "mysql-server-{}".format(node.metadata.get('mysql_version', "5.5")): { + 'tags': ["provides-mysqld"], + }, + } + + svc_systemd = { + "myapp": { + 'needs': ["tag:provides-mysqld"], + }, + } + +In this simplified example we save ourselves from duplicating the logic that gets the current MySQL version from metadata (which is probably overkill here, but you might encounter more complex situations). + +
+ +### triggers and triggered + +In some scenarios, you may want to execute an [action](../items/action.md) only when an item is fixed (e.g. restart a daemon after a config file has changed or run `postmap` after updating an alias file). To do this, BundleWrap has the builtin atttribute `triggers`. You can use it to point to any item that has its `triggered` attribute set to `True`. Such items will only be checked (or in the case of actions: run) if the triggering item is fixed (or a triggering action completes successfully). + + files = { + '/etc/daemon.conf': { + [...] + 'triggers': [ + 'action:restart_daemon', + ], + }, + } + + actions = { + 'restart_daemon': { + 'command': "service daemon restart", + 'triggered': True, + }, + } + +The above example will run `service daemon restart` every time BundleWrap successfully applies a change to `/etc/daemon.conf`. If an action is triggered multiple times, it will only be run once. + +Similar to `needed_by`, `triggered_by` can be used to define a `triggers` relationship from the opposite direction. + +
+ +### preceded_by + +Operates like `triggers`, but will apply the triggered item *before* the triggering item. Let's look at an example: + + files = { + '/etc/example.conf': { + [...] + 'preceded_by': [ + 'action:backup_example', + ], + }, + } + + actions = { + 'backup_example': { + 'command': "cp /etc/example.conf /etc/example.conf.bak", + 'triggered': True, + }, + } + +In this configuration, `/etc/example.conf` will always be copied before and only if it is changed. You would probably also want to set `cascade_skip` to `False` on the action so you can skip it in interactive mode when you're sure you don't need the backup copy. + +Similar to `needed_by`, `precedes` can be used to define a `preceded_by` relationship from the opposite direction. + +
+ +### unless + +Another builtin item attribute is `unless`. For example, it can be used to construct a one-off file item where BundleWrap will only create the file once, but won't check or modify its contents once it exists. + + files = { + "/path/to/file": { + [...] + "unless": "test -x /path/to/file", + }, + } + +This will run `test -x /path/to/file` before doing anything with the item. If the command returns 0, no action will be taken to "correct" the item. + +Another common use for `unless` is with actions that perform some sort of install operation. In this case, the `unless` condition makes sure the install operation is only performed when it is needed instead of every time you run `bw apply`. In scenarios like this you will probably want to set `cascade_skip` to `False` so that skipping the installation (because the thing is already installed) will not cause every item that depends on the installed thing to be skipped. Example: + + actions = { + 'download_thing': { + 'command': "wget http://example.com/thing.bin -O /opt/thing.bin && chmod +x /opt/thing.bin", + 'unless': "test -x /opt/thing.bin", + 'cascade_skip': False, + }, + 'run_thing': { + 'command': "/opt/thing.bin", + 'needs': ["action:download_thing"], + }, + } + +If `action:download_thing` would not set `cascade_skip` to `False`, `action:run_thing` would only be executed once: directly after the thing has been downloaded. On subsequent runs, `action:download_thing` will fail the `unless` condition and be skipped. This would also cause all items that depend on it to be skipped, including `action:run_thing`. + +
+ +### cascade_skip + +There are some situations where you don't want to default behavior of skipping everything that depends on a skipped item. That's where `cascade_skip` comes in. Set it to `False` and skipping an item won't skip those that depend on it. Note that items can be skipped + +* interactively or +* because they haven't been triggered or +* because one of their dependencies failed or +* they failed their `unless` condition or +* because an [action](../items/action.md) had its `interactive` attribute set to `True` during a non-interactive run + +The following example will offer to run an `apt-get update` before installing a package, but continue to install the package even if the update is declined interactively. + + actions = { + 'apt_update': { + 'cascade_skip': False, + 'command': "apt-get update", + }, + } + + pkg_apt = { + 'somepkg': { + 'needs': ["action:apt_update"], + }, + } + +`cascade_skip` defaults to `True`. However, if the item uses the `unless` attribute or is triggered, the default changes to `False`. Most of the time, this is what you'll want. + +
+ +## Canned actions + +Some item types have what we call "canned actions". Those are pre-defined actions attached directly to an item. Take a look at this example: + + svc_upstart = {'mysql': {'running': True}} + + files = { + "/etc/mysql/my.cnf": { + 'source': "my.cnf", + 'triggers': [ + "svc_upstart:mysql:reload", # this triggers the canned action + ], + }, + } + +Canned actions always have to be triggered in order to run. In the example above, a change in the file `/etc/mysql/my.cnf` will trigger the `reload` action defined by the [svc_upstart item type](../items/svc_upstart.md) for the mysql service. + +
+ +# metadata.py + +Alongside `items.py` you may create another file called `metadata.py`. It can be used to do advanced processing of the metadata you configured for your nodes and groups. Specifically, it allows each bundle to modify metadata before `items.py` is evaluated. To do that, you simply write any number of functions whose name doesn't start with an underscore and put them into `metadata.py`. + +
Understand that any function will be used as a metadata processor, unless its name starts with an underscore. This is also true for imported functions, so you'll need to import them like this: from module import func as _func.
+ +These functions take the metadata dictionary generated so far as their single argument. You must then return the same dictionary with any modifications you need to make. These functions are called metadata processors. Every metadata processor from every bundle is called *repeatedly* with the latest metadata dictionary until no more changes are made to the metadata. Here's an example for how a `metadata.py` could look like (note that you have access to `repo` and `node` just like in `items.py`): + + def my_metadata_processor(metadata): + metadata["foo"] = node.name + return metadata + +
To avoid deadlocks when accessing other nodes' metadata from within a metadata processor, use other_node.partial_metadata instead of other_node.metadata. For the same reason, always use the metadata parameter to access the current node's metadata, never node.metadata.
diff --git a/docs/content/repo/groups.py.md b/docs/content/repo/groups.py.md new file mode 100644 index 0000000..7e69b0f --- /dev/null +++ b/docs/content/repo/groups.py.md @@ -0,0 +1,128 @@ +# groups.py + +This file lets you specify or dynamically build groups of [nodes](nodes.py.md) in your environment. + +As with `nodes.py`, you define your groups as a dictionary: + + groups = { + 'all': { + 'member_patterns': ( + r".*", + ), + }, + 'group1': { + 'members': ( + 'node1', + ), + }, + } + +All group attributes are optional. + +
+ +# Group attribute reference + +This section is a reference for all possible attributes you can define for a group: + + groups = { + 'group1': { + # THIS PART IS EXPLAINED HERE + 'bundles': ["bundle1", "bundle2"], + 'members': ["node1"], + 'members_add': lambda node: node.os == 'debian', + 'members_remove': lambda node: node.os == 'ubuntu', + 'member_patterns': [r"^cluster1\."], + 'metadata': {'foo': "bar"}, + 'os': 'linux', + 'subgroups': ["group2", "group3"], + 'subgroup_patterns': [r"^group.*pattern$"], + }, + } + +Note that many attributes from [nodes.py](nodes.py.md) (e.g. `bundles`) may also be set at group level, but aren't explicitly documented here again. + +
+ +## member_patterns + +A list of regular expressions. Node names matching these expressions will be added to the group members. + +Matches are determined using [the search() method](http://docs.python.org/2/library/re.html#re.RegexObject.search). + +
+ +## members + +A tuple or list of node names that belong to this group. + +
+ +## members_add and members_remove + +For these attributes you can provide a function that takes a node object as its only argument. The function must return a boolean. The function will be called once for every node in the repo. If `True`, this node will be added (`members_add`) to or removed (`members_remove`) from this group. + +
Inside your function you may query node attributes and groups, but you will not see groups or attributes added as a result of a different members_add / members_remove function. Only attributes and groups that have been set statically will be available. You can, however, remove a node with members_remove that you added with members_add (but not vice-versa).
You should also avoid using node.metadata here. Since metadata ultimately depends on group memberships, only metadata set in nodes.py will be returned here.
+ +
+ +## metadata + +A dictionary that will be accessible from each node's `node.metadata`. For each node, BundleWrap will merge the metadata of all of the node's groups first, then merge in the metadata from the node itself. + +Metadata is merged recursively by default, meaning nested dicts will overlay each other. Lists will be appended to each other, but not recursed into. In come cases, you want to overwrite instead of merge a piece of metadata. This is accomplished through the use of `bundlewrap.metadata.atomic()` and best illustrated as an example: + + from bundlewrap.metadata import atomic + + groups = { + 'all': { + 'metadata': { + 'interfaces': { + 'eth0': {}, + }, + 'nameservers': ["8.8.8.8", "8.8.4.4"], + 'ntp_servers': ["pool.ntp.org"], + }, + }, + 'internal': { + 'metadata': + 'interfaces': { + 'eth1': {}, + }, + 'nameservers': atomic(["10.0.0.1", "10.0.0.2"]), + 'ntp_servers': ["10.0.0.1", "10.0.0.2"], + }, + }, + } + +A node in both groups will end up with `eth0` *and* `eth1`. + +The nameservers however are overwritten, so that nodes what are in both the "all" *and* the "internal" group will only have the `10.0.0.x` ones while nodes just in the "all" group will have the `8.8.x.x` nameservers. + +The NTP servers are appended: a node in both groups will have all three nameservers. + +
BundleWrap will consider group hierarchy when merging metadata. For example, it is possible to define a default nameserver for the "eu" group and then override it for the "eu.frankfurt" subgroup. The catch is that this only works for groups that are connected through a subgroup hierarchy. Independent groups will have their metadata merged in an undefined order. bw test will report conflicting metadata in independent groups as a metadata collision.
+ +
Also see the documentation for node.metadata for more information.
+ +
+ +## subgroups + +A tuple or list of group names whose members should be recursively included in this group. + +
+ +## subgroup_patterns + +A list of regular expressions. Nodes in with group names matching these expressions will be added to the group members. + +Matches are determined using [the search() method](http://docs.python.org/2/library/re.html#re.RegexObject.search). + +
+ +## use_shadow_passwords + +See [node attribute documentation](nodes.py.md#use_shadow_passwords). May be overridden by subgroups or individual nodes. + +
diff --git a/docs/content/repo/hooks.md b/docs/content/repo/hooks.md new file mode 100644 index 0000000..a00f8d3 --- /dev/null +++ b/docs/content/repo/hooks.md @@ -0,0 +1,219 @@ +# Hooks + +Hooks enable you to execute custom code at certain points during a BundleWrap run. This is useful for integrating with other systems e.g. for team notifications, logging or statistics. + +To use hooks, you need to create a subdirectory in your repo called `hooks`. In that directory you can place an arbitrary number of Python source files. If those source files define certain functions, these functions will be called at the appropriate time. + + +## Example + +`hooks/my_awesome_notification.py`: + + from my_awesome_notification_system import post_message + + def node_apply_start(repo, node, interactive=False, **kwargs): + post_message("Starting apply on {}, everything is gonna be OK!".format(node.name)) + +
Always define your hooks with `**kwargs` so we can pass in more information in future updates without breaking your hook.
+ +
+ +## Functions + +This is a list of all functions a hook file may implement. + +--- + +**`action_run_start(repo, node, action, **kwargs)`** + +Called each time a `bw apply` command reaches a new action. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`item` The current action. + +--- + +**`action_run_end(repo, node, action, duration=None, status=None, **kwargs)`** + +Called each time a `bw apply` command completes processing an action. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`item` The current action. + +`duration` How long the action was running (timedelta). + +`status`: One of `bundlewrap.items.Item.STATUS_FAILED`, `bundlewrap.items.Item.STATUS_SKIPPED`, or `bundlewrap.items.Item.STATUS_ACTION_SUCCEEDED`. + +--- + +**`apply_start(repo, target, nodes, interactive=False, **kwargs)`** + +Called when you start a `bw apply` command. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`target` The group or node name you gave on the command line. + +`nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). + +`interactive` Indicates whether the apply is interactive or not. + +--- + +**`apply_end(repo, target, nodes, duration=None, **kwargs)`** + +Called when a `bw apply` command completes. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`target` The group or node name you gave on the command line. + +`nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). + +`duration` How long the apply took (timedelta). + +--- + +**`item_apply_start(repo, node, item, **kwargs)`** + +Called each time a `bw apply` command reaches a new item. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`item` The current item. + +--- + +**`item_apply_end(repo, node, item, duration=None, status_code=None, status_before=None, status_after=None, **kwargs)`** + +Called each time a `bw apply` command completes processing an item. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`item` The current item. + +`duration` How long the apply took (timedelta). + +`status_code` One of `bundlewrap.items.Item.STATUS_FAILED`, `bundlewrap.items.Item.STATUS_SKIPPED`, `bundlewrap.items.Item.STATUS_OK`, or `bundlewrap.items.Item.STATUS_FIXED`. + +`status_before` An instance of `bundlewrap.items.ItemStatus`. + +`status_after` See `status_before`. + +--- + +**`node_apply_start(repo, node, interactive=False, **kwargs)`** + +Called each time a `bw apply` command reaches a new node. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`interactive` `True` if this is an interactive apply run. + +--- + +**`node_apply_end(repo, node, duration=None, interactive=False, result=None, **kwargs)`** + +Called each time a `bw apply` command finishes processing a node. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`duration` How long the apply took (timedelta). + +`interactive` `True` if this was an interactive apply run. + +`result: An instance of `bundlewrap.node.ApplyResult`. + +--- + +**`node_run_start(repo, node, command, **kwargs)`** + +Called each time a `bw run` command reaches a new node. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`command` The command that will be run on the node. + +--- + +**`node_run_end(repo, node, command, duration=None, return_code=None, stdout="", stderr="", **kwargs)`** + +Called each time a `bw run` command finishes on a node. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). + +`command` The command that was run on the node. + +`duration` How long it took to run the command (timedelta). + +`return_code` Return code of the remote command. + +`stdout` The captured stdout stream of the remote command. + +`stderr` The captured stderr stream of the remote command. + +--- + +**`run_start(repo, target, nodes, command, **kwargs)`** + +Called each time a `bw run` command starts. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`target` The group or node name you gave on the command line. + +`nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). + +`command` The command that will be run on the node. + +--- + +**`run_end(repo, target, nodes, command, duration=None, **kwargs)`** + +Called each time a `bw run` command finishes. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`target` The group or node name you gave on the command line. + +`nodes` A list of node objects affected (list of `bundlewrap.node.Node` instances). + +`command` The command that was run. + +`duration` How long it took to run the command on all nodes (timedelta). + +--- + +**`test(repo, **kwargs)`** + +Called at the end of a full `bw test`. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +--- + +**`test_node(repo, node, **kwargs)`** + +Called during `bw test` for each node. + +`repo` The current repository (instance of `bundlewrap.repo.Repository`). + +`node` The current node (instance of `bundlewrap.node.Node`). diff --git a/docs/content/repo/layout.md b/docs/content/repo/layout.md new file mode 100644 index 0000000..862f3d0 --- /dev/null +++ b/docs/content/repo/layout.md @@ -0,0 +1,41 @@ + + +Repository layout +================= + +A BundleWrap repository contains everything you need to contruct the configuration for your systems. + +This page describes the various subdirectories and files than can exist inside a repo. + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
nodes.pyThis file tells BundleWrap what nodes (servers, VMs, ...) there are in your environment and lets you configure options such as hostnames.
groups.pyThis file allows you to organize your nodes into groups.
bundles/This required subdirectory contains the bulk of your configuration, organized into bundles of related items.
data/This optional subdirectory contains data files that are not generic enough to be included in bundles (which are meant to be shareable).
hooks/This optional subdirectory contains hooks you can use to act on certain events when using BundleWrap.
items/This optional subdirectory contains the code for your custom item types.
libs/This optional subdirectory contains reusable custom code for your bundles.
diff --git a/docs/content/repo/libs.md b/docs/content/repo/libs.md new file mode 100644 index 0000000..353a0b3 --- /dev/null +++ b/docs/content/repo/libs.md @@ -0,0 +1,9 @@ + + +# Custom code + +The `libs/` subdirectory of your repository provides a convenient place to put reusable code used throughout your bundles and hooks. + +A Python module called `example.py` placed in this directory will be available as `repo.libs.example` wherever you have access to a `bundlewrap.repo.Repository` object. In `nodes.py` and `groups.py`, you can do the same thing with just `libs.example`. + +
Only single files, no subdirectories or packages, are supported at the moment.
diff --git a/docs/content/repo/nodes.py.md b/docs/content/repo/nodes.py.md new file mode 100644 index 0000000..44aef45 --- /dev/null +++ b/docs/content/repo/nodes.py.md @@ -0,0 +1,133 @@ +# nodes.py + +This file lets you specify or dynamically build a list of nodes in your environment. + +All you have to do here is define a Python dictionary called `nodes`. It should look something like this: + + nodes = { + "node-1": { + 'hostname': "node-1.example.com", + }, + } + + + +With BundleWrap, the DNS name and the internal identifier for a node ("node-1" in this case) are two separate things. + +All fields for a node (including `hostname`) are optional. If you don't give one, BundleWrap will attempt to use the internal identifier to connect to a node: + + nodes = { + "node-1.example.com": {}, + } + +
+ +# Dynamic node list + +You are not confined to the static way of defining a node list as shown above. You can also assemble the `nodes` dictionary dynamically: + + def get_my_nodes_from_ldap(): + [...] + return ldap_nodes + + nodes = get_my_nodes_from_ldap() + +
+ +# Node attribute reference + +This section is a reference for all possible attributes you can define for a node: + + nodes = { + 'node-1': { + # THIS PART IS EXPLAINED HERE + }, + } + +All attributes can also be set at the group level, unless noted otherwise. + +
+ +## Regular attributes + +### bundles + +A list of [bundle names](bundles.md) to be assigned to this node. Bundles set at [group level](groups.py.md) will be added. + +
+ +### dummy + +Set this to `True` to prevent BundleWrap from creating items for and connecting to this node. This is useful for unmanaged nodes because you can still assign them bundles and metadata like regular nodes and access that from managed nodes (e.g. for monitoring). + +
+ +### hostname + +A string used as a DNS name when connecting to this node. May also be an IP address. + +
The username and SSH private key for connecting to the node cannot be configured in BundleWrap. If you need to customize those, BundleWrap will honor your ~/.ssh/config.
+ +Cannot be set at group level. + + +### metadata + +This can be a dictionary of arbitrary data (some type restrictions apply). You can access it from your templates as `node.metadata`. Use this to attach custom data (such as a list of IP addresses that should be configured on the target node) to the node. Note that you can also define metadata at the [group level](groups.py.md#metadata), but node metadata has higher priority. + +You are restricted to using only the following types in metadata: + +* `dict` +* `list` +* `tuple` +* `set` +* `bool` +* `text` / `unicode` +* `bytes` / `str` (only if decodable into text using UTF-8) +* `int` +* `None` +* `bundlewrap.utils.Fault` + +
Also see the documentation for group.metadata for more information.
+ +
+ +### os + +Defaults to `"linux"`. + +A list of supported OSes can be obtained with `bw debug -n ANY_NODE_NAME -c "print(node.OS_KNOWN)"`. + +
+ +### os_version + +Set this to your OS version. Note that it must be a tuple of integers, e.g. if you're running Ubuntu 16.04 LTS, it should be `(16, 4)`. + +Tuples of integers can be used for easy comparison of versions: `(12, 4) < (16, 4)` + +
+ +## OS compatibility overrides + +### cmd_wrapper_outer + +Used whenever a command needs to be run on a node. Defaults to `"sudo sh -c {}"`. `{}` will be replaced by the quoted command to be run (after `cmd_wrapper_inner` has been applied). + +You will need to override this if you're not using `sudo` to gain root privileges (e.g. `doas`) on the node. + +
+ +### cmd_wrapper_inner + +Used whenever a command needs to be run on a node. Defaults to `"export LANG=C; {}"`. `{}` will be replaced by the command to be run. + +You will need to override this if the shell on your node sets environment variables differently. + +
+ +### use_shadow_passwords + +
Changing this setting will affect the security of the target system. Only do this for legacy systems that don't support shadow passwords.
+ +This setting will affect how the [user item](../items/user.md) item operates. If set to `False`, password hashes will be written directly to `/etc/passwd` and thus be accessible to any user on the system. If the OS of the node is set to "openbsd", this setting has no effect as `master.shadow` is always used. diff --git a/docs/content/repo/plugins.md b/docs/content/repo/plugins.md new file mode 100644 index 0000000..33dea97 --- /dev/null +++ b/docs/content/repo/plugins.md @@ -0,0 +1,39 @@ +# Plugins + +The plugin system in BundleWrap is an easy way of integrating third-party code into your repository. + +
While plugins are subject to some superficial code review by BundleWrap developers before being accepted, we cannot make any guarantees as to the quality and trustworthiness of plugins. Always do your due diligence before running third-party code.
+ +
+ +## Finding plugins + +It's as easy as `bw repo plugin search `. Or you can browse [plugins.bundlewrap.org](http://plugins.bundlewrap.org). + +
+ +## Installing plugins + +You probably guessed it: `bw repo plugin install ` + +Installing the first plugin in your repo will create a file called `plugins.json`. You should commit this file (and any files installed by the plugin of course) to version control. + +
Avoid editing files provided by plugins at all costs. Local modifications will prevent future updates to the plugin.
+ +
+ +## Updating plugins + +You can update all installed plugins with this command: `bw repo plugin update` + +
+ +## Removing a plugin + +`bw repo plugin remove ` + +
+ +## Writing your own + +See the [guide on publishing your own plugins](../guide/dev_plugin.md). diff --git a/docs/content/repo/requirements.txt.md b/docs/content/repo/requirements.txt.md new file mode 100644 index 0000000..c1ecc5a --- /dev/null +++ b/docs/content/repo/requirements.txt.md @@ -0,0 +1,13 @@ + + +# requirements.txt + +This optional file can be used to ensure minimum required versions of BundleWrap and other Python packages on every machine that uses a repository. + +`bw repo create` will initially add your current version of BundleWrap: + +
bundlewrap>=2.4.0
+ +You can add more packages as you like (you do not have to specify a version for each one), just append each package in a separate line. When someone then tries to use your repo without one of those packages, BundleWrap will exit early with a friendly error message: + +
! Python package 'foo' is listed in requirements.txt, but wasn't found. You probably have to install it with `pip install foo`.
diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 0000000..50faecd --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,57 @@ +site_name: BundleWrap +docs_dir: content +site_dir: build +theme: cinder +repo_url: "https://github.com/bundlewrap/bundlewrap" +remote_name: github +copyright: "BundleWrap is published under the GPL license" +google_analytics: ['UA-33891245-2', 'docs.bundlewrap.org'] +pages: +- : index.md +- Guides: + - Quickstart: guide/quickstart.md + - Installation: guide/installation.md + - CLI: guide/cli.md + - Environment Variables: guide/env.md + - File templates: guide/item_file_templates.md + - Handling secrets: guide/secrets.md + - Locking: guide/locks.md + - Custom items: guide/dev_item.md + - Writing plugins: guide/dev_plugin.md + - Python API: guide/api.md + - OS compatibility: guide/os_compatibility.md + - Migrating to 2.0: guide/migrate_12.md +- Repository: + - Overview: repo/layout.md + - nodes.py: repo/nodes.py.md + - groups.py: repo/groups.py.md + - requirements.txt: repo/requirements.txt.md + - bundles/: repo/bundles.md + - hooks/: repo/hooks.md + - libs/: repo/libs.md + - Plugins: repo/plugins.md +- Items: + - action: items/action.md + - directory: items/directory.md + - file: items/file.md + - group: items/group.md + - pkg_apt: items/pkg_apt.md + - pkg_dnf: items/pkg_dnf.md + - pkg_pacman: items/pkg_pacman.md + - pkg_pip: items/pkg_pip.md + - pkg_yum: items/pkg_yum.md + - pkg_zypper: items/pkg_zypper.md + - postgres_db: items/postgres_db.md + - postgres_role: items/postgres_role.md + - svc_openbsd: items/svc_openbsd.md + - svc_systemd: items/svc_systemd.md + - svc_systemv: items/svc_systemv.md + - svc_upstart: items/svc_upstart.md + - symlink: items/symlink.md + - user: items/user.md +- Misc: + - About: misc/about.md + - Glossary: misc/glossary.md + - FAQ: misc/faq.md + - Alternatives: misc/alternatives.md + - Contributing: misc/contributing.md diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..7692e66 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +# deps in this file are for local dev purposes only +mkdocs +mkdocs-cinder +pytest +wheel diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..7449aef --- /dev/null +++ b/setup.cfg @@ -0,0 +1,11 @@ +[flake8] +max-line-length = 100 +max-complexity = 10 + +[tool:pytest] +python_files=*.py +python_classes=Test +python_functions=test_* + +[bdist_wheel] +universal = 1 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..645aa7d --- /dev/null +++ b/setup.py @@ -0,0 +1,56 @@ +from sys import version_info + +from setuptools import find_packages, setup + + +dependencies = [ + "cryptography", + "Jinja2", + "Mako", + "passlib", + "requests >= 1.0.0", +] +if version_info < (3, 2, 0): + dependencies.append("futures") + +setup( + name="bundlewrap", + version="2.12.2", + description="Config management with Python", + long_description=( + "By allowing for easy and low-overhead config management, BundleWrap fills the gap between complex deployments using Chef or Puppet and old school system administration over SSH.\n" + "While most other config management systems rely on a client-server architecture, BundleWrap works off a repository cloned to your local machine. It then automates the process of SSHing into your servers and making sure everything is configured the way it's supposed to be. You won't have to install anything on managed servers." + ), + author="Torsten Rehn", + author_email="torsten@rehn.email", + license="GPLv3", + url="http://bundlewrap.org", + packages=find_packages(), + entry_points={ + 'console_scripts': [ + "bw=bundlewrap.cmdline:main", + ], + }, + keywords=["configuration", "config", "management"], + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Natural Language :: English", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Topic :: System :: Installation/Setup", + "Topic :: System :: Systems Administration", + ], + install_requires=dependencies, + extras_require={ # used for wheels + ':python_version=="2.7"': ["futures"], + }, + zip_safe=False, +) diff --git a/tests/integration/bw_adhoc_nodes.py b/tests/integration/bw_adhoc_nodes.py new file mode 100644 index 0000000..610e008 --- /dev/null +++ b/tests/integration/bw_adhoc_nodes.py @@ -0,0 +1,57 @@ +from os.path import exists, join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_apply(tmpdir): + make_repo( + tmpdir, + bundles={ + "bundle1": { + 'files': { + join(str(tmpdir), "test"): { + 'content': "test", + }, + }, + }, + }, + groups={ + "adhoc-localhost": { + 'bundles': ["bundle1"], + 'member_patterns': ["localhost"], + 'os': host_os(), + }, + }, + ) + + assert not exists(join(str(tmpdir), "test")) + stdout, stderr, rcode = run("bw -A apply localhost", path=str(tmpdir)) + assert rcode == 0 + assert exists(join(str(tmpdir), "test")) + + +def test_apply_fail(tmpdir): + make_repo( + tmpdir, + bundles={ + "bundle1": { + 'files': { + join(str(tmpdir), "test"): { + 'content': "test", + }, + }, + }, + }, + groups={ + "adhoc-localhost": { + 'bundles': ["bundle1"], + 'member_patterns': ["localhost"], + 'os': host_os(), + }, + }, + ) + + assert not exists(join(str(tmpdir), "test")) + stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) + assert rcode == 1 + assert not exists(join(str(tmpdir), "test")) diff --git a/tests/integration/bw_apply_actions.py b/tests/integration/bw_apply_actions.py new file mode 100644 index 0000000..e02423e --- /dev/null +++ b/tests/integration/bw_apply_actions.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_action_success(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'actions': { + "success": { + 'command': "true", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + + +def test_action_fail(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'actions': { + "failure": { + 'command': "false", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) diff --git a/tests/integration/bw_apply_autoskip.py b/tests/integration/bw_apply_autoskip.py new file mode 100644 index 0000000..aa3c931 --- /dev/null +++ b/tests/integration/bw_apply_autoskip.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os.path import exists, join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_skip_bundle(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content': "nope", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply --skip bundle:test localhost", path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) + + +def test_skip_group(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content': "nope", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + groups={ + "foo": {'members': ["localhost"]}, + }, + ) + run("bw apply --skip group:foo localhost", path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) + + +def test_skip_id(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content': "nope", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply --skip file:{} localhost".format(join(str(tmpdir), "foo")), path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) + + +def test_skip_node(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content': "nope", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply --skip node:localhost localhost", path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) + + +def test_skip_tag(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content': "nope", + 'tags': ["nope"], + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply --skip tag:nope localhost", path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) + + +def test_skip_type(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content': "nope", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply --skip file: localhost", path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) diff --git a/tests/integration/bw_apply_directories.py b/tests/integration/bw_apply_directories.py new file mode 100644 index 0000000..347dc56 --- /dev/null +++ b/tests/integration/bw_apply_directories.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from base64 import b64encode +from os import mkdir +from os.path import exists, join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_purge(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "purgedir", "managed_file"): { + 'content': "content", + }, + join(str(tmpdir), "purgedir", "subdir1", "managed_file"): { + 'content': "content", + }, + }, + 'directories': { + join(str(tmpdir), "purgedir"): { + 'purge': True, + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + mkdir(join(str(tmpdir), "purgedir")) + mkdir(join(str(tmpdir), "purgedir", "subdir2")) + mkdir(join(str(tmpdir), "purgedir", "subdir3")) + + with open(join(str(tmpdir), "purgedir", "unmanaged_file"), 'w') as f: + f.write("content") + with open(join(str(tmpdir), "purgedir", "subdir3", "unmanaged_file"), 'w') as f: + f.write("content") + + run("bw apply localhost", path=str(tmpdir)) + + assert not exists(join(str(tmpdir), "purgedir", "unmanaged_file")) + assert not exists(join(str(tmpdir), "purgedir", "subdir3", "unmanaged_file")) + assert not exists(join(str(tmpdir), "purgedir", "subdir2")) + assert exists(join(str(tmpdir), "purgedir", "subdir1", "managed_file")) + assert exists(join(str(tmpdir), "purgedir", "managed_file")) + + +def test_purge_special_chars(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "purgedir", "mänäged_file"): { + 'content': "content", + }, + join(str(tmpdir), "purgedir", "managed_`id`_file"): { + 'content': "content", + }, + }, + 'directories': { + join(str(tmpdir), "purgedir"): { + 'purge': True, + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + mkdir(join(str(tmpdir), "purgedir")) + + with open(join(str(tmpdir), "purgedir", "unmänäged_file"), 'w') as f: + f.write("content") + with open(join(str(tmpdir), "purgedir", "unmanaged_`uname`_file"), 'w') as f: + f.write("content") + with open(join(str(tmpdir), "purgedir", "unmanaged_:'_file"), 'w') as f: + f.write("content") + + run("bw apply localhost", path=str(tmpdir)) + + assert not exists(join(str(tmpdir), "purgedir", "unmänäged_file")) + assert not exists(join(str(tmpdir), "purgedir", "unmanaged_`uname`_file")) + assert not exists(join(str(tmpdir), "purgedir", "unmanaged_:'_file")) + assert exists(join(str(tmpdir), "purgedir", "mänäged_file")) + assert exists(join(str(tmpdir), "purgedir", "managed_`id`_file")) diff --git a/tests/integration/bw_apply_files.py b/tests/integration/bw_apply_files.py new file mode 100644 index 0000000..43188dd --- /dev/null +++ b/tests/integration/bw_apply_files.py @@ -0,0 +1,218 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from base64 import b64encode +from os.path import exists, join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_any_content_create(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content_type': 'any', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo"), 'rb') as f: + content = f.read() + assert content == b"" + + +def test_any_content_exists(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content_type': 'any', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + with open(join(str(tmpdir), "foo"), 'wb') as f: + f.write(b"existing content") + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo"), 'rb') as f: + content = f.read() + assert content == b"existing content" + + +def test_binary_inline_content(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo.bin"): { + 'content_type': 'base64', + 'content': b64encode("ö".encode('latin-1')), + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo.bin"), 'rb') as f: + content = f.read() + assert content.decode('latin-1') == "ö" + + +def test_binary_template_content(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo.bin"): { + 'encoding': 'latin-1', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + with open(join(str(tmpdir), "bundles", "test", "files", "foo.bin"), 'wb') as f: + f.write("ö".encode('utf-8')) + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo.bin"), 'rb') as f: + content = f.read() + assert content.decode('latin-1') == "ö" + + +def test_delete(tmpdir): + with open(join(str(tmpdir), "foo"), 'w') as f: + f.write("foo") + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'delete': True, + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + assert not exists(join(str(tmpdir), "foo")) + + +def test_mako_template_content(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content_type': 'mako', + 'content': "${node.name}", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo"), 'rb') as f: + content = f.read() + assert content == b"localhost" + + +def test_mako_template_content_with_secret(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content_type': 'mako', + 'content': "${repo.vault.password_for('testing')}", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo"), 'rb') as f: + content = f.read() + assert content == b"faCTT76kagtDuZE5wnoiD1CxhGKmbgiX" + + +def test_text_template_content(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content_type': 'text', + 'content': "${node.name}", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "foo"), 'rb') as f: + content = f.read() + assert content == b"${node.name}" diff --git a/tests/integration/bw_apply_precedes.py b/tests/integration/bw_apply_precedes.py new file mode 100644 index 0000000..b33ebf0 --- /dev/null +++ b/tests/integration/bw_apply_precedes.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from os.path import join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_precedes(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "file"): { + 'content': "1\n", + 'triggered': True, + 'precedes': ["tag:tag1"], + }, + }, + 'actions': { + "action2": { + 'command': "echo 2 >> {}".format(join(str(tmpdir), "file")), + 'tags': ["tag1"], + }, + "action3": { + 'command': "echo 3 >> {}".format(join(str(tmpdir), "file")), + 'tags': ["tag1"], + 'needs': ["action:action2"], + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "file")) as f: + content = f.read() + assert content == "1\n2\n3\n" diff --git a/tests/integration/bw_apply_secrets.py b/tests/integration/bw_apply_secrets.py new file mode 100644 index 0000000..c8eda60 --- /dev/null +++ b/tests/integration/bw_apply_secrets.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os.path import exists, join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_fault_content(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": {}, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: + f.write(""" +files = {{ + "{}": {{ + 'content': repo.vault.password_for("test"), + }}, +}} +""".format(join(str(tmpdir), "secret"))) + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "secret")) as f: + content = f.read() + assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" + + +def test_fault_content_mako(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "secret"): { + 'content': "${repo.vault.password_for('test')}", + 'content_type': 'mako', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "secret")) as f: + content = f.read() + assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" + + +def test_fault_content_mako_metadata(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "secret"): { + 'content': "${node.metadata['secret']}", + 'content_type': 'mako', + }, + }, + }, + }, + ) + + with open(join(str(tmpdir), "nodes.py"), 'w') as f: + f.write(""" +nodes = {{ + "localhost": {{ + 'bundles': ["test"], + 'metadata': {{'secret': vault.password_for("test")}}, + 'os': "{}", + }}, +}} +""".format(host_os())) + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "secret")) as f: + content = f.read() + assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" + + +def test_fault_content_jinja2(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "secret"): { + 'content': "{{ repo.vault.password_for('test') }}", + 'content_type': 'jinja2', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + run("bw apply localhost", path=str(tmpdir)) + with open(join(str(tmpdir), "secret")) as f: + content = f.read() + assert content == "sQDdTXu5OmCki8gdGgYdfTxooevckXcB" + + +def test_fault_content_skipped(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": {}, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: + f.write(""" +files = {{ + "{}": {{ + 'content': repo.vault.password_for("test", key='unavailable'), + }}, +}} +""".format(join(str(tmpdir), "secret"))) + + stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) + assert rcode == 0 + assert not exists(join(str(tmpdir), "secret")) + + +def test_fault_content_skipped_mako(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "secret"): { + 'content': "${repo.vault.password_for('test', key='unavailable')}", + 'content_type': 'mako', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) + assert rcode == 0 + assert not exists(join(str(tmpdir), "secret")) + + +def test_fault_content_skipped_jinja2(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "secret"): { + 'content': "{{ repo.vault.password_for('test', key='unavailable') }}", + 'content_type': 'jinja2', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + +def test_fault_content_error(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": {}, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + with open(join(str(tmpdir), "bundles", "test", "items.py"), 'w') as f: + f.write(""" +files = {{ + "{}": {{ + 'content': repo.vault.password_for("test", key='unavailable'), + 'error_on_missing_fault': True, + }}, +}} +""".format(join(str(tmpdir), "secret"))) + + stdout, stderr, rcode = run("bw -d apply localhost", path=str(tmpdir)) + print(stdout) + assert rcode == 1 diff --git a/tests/integration/bw_apply_symlinks.py b/tests/integration/bw_apply_symlinks.py new file mode 100644 index 0000000..166a918 --- /dev/null +++ b/tests/integration/bw_apply_symlinks.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os import mkdir, readlink, symlink +from os.path import join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_create(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'symlinks': { + join(str(tmpdir), "foo"): { + 'target': "/dev/null", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) + assert rcode == 0 + assert readlink(join(str(tmpdir), "foo")) == "/dev/null" + + +def test_fix(tmpdir): + symlink(join(str(tmpdir), "bar"), join(str(tmpdir), "foo")) + make_repo( + tmpdir, + bundles={ + "test": { + 'symlinks': { + join(str(tmpdir), "foo"): { + 'target': "/dev/null", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) + assert rcode == 0 + assert readlink(join(str(tmpdir), "foo")) == "/dev/null" + + +def test_fix_dir(tmpdir): + mkdir(join(str(tmpdir), "foo")) + make_repo( + tmpdir, + bundles={ + "test": { + 'symlinks': { + join(str(tmpdir), "foo"): { + 'target': "/dev/null", + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + stdout, stderr, rcode = run("bw apply localhost", path=str(tmpdir)) + assert rcode == 0 + assert readlink(join(str(tmpdir), "foo")) == "/dev/null" diff --git a/tests/integration/bw_hash.py b/tests/integration/bw_hash.py new file mode 100644 index 0000000..9c799a9 --- /dev/null +++ b/tests/integration/bw_hash.py @@ -0,0 +1,332 @@ +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_empty(tmpdir): + make_repo(tmpdir) + stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) + assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n" + assert stderr == b"" + + +def test_nondeterministic(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + 'files': { + "/test": { + 'content_type': 'mako', + 'content': "<% import random %>${random.randint(1, 9999)}", + }, + }, + }, + }, + ) + + hashes = set() + + for i in range(3): + stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) + hashes.add(stdout.strip()) + + assert len(hashes) > 1 + + +def test_deterministic(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + 'files': { + "/test": { + 'content': "${node.name}", + }, + }, + }, + }, + ) + + hashes = set() + + for i in range(3): + stdout, stderr, rcode = run("bw hash", path=str(tmpdir)) + hashes.add(stdout.strip()) + + assert len(hashes) == 1 + assert hashes.pop() == b"8c155b4e7056463eb2c8a8345f4f316f6d7359f6" + + +def test_dict(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + 'files': { + "/test": { + 'content': "yes please", + }, + }, + }, + }, + ) + + stdout, stderr, rcode = run("bw hash -d", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"8ab35c696b63a853ccf568b27a50e24a69964487 node1\n" + + stdout, stderr, rcode = run("bw hash -d node1", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"503583964eadabacb18fda32cc9fb1e9f66e424b file:/test\n" + + stdout, stderr, rcode = run("bw hash -d node1 file:/test", path=str(tmpdir)) + assert rcode == 0 + assert stdout == ( + b"content_hash\tc05a36d547e2b1682472f76985018038d1feebc5\n" + b"type\tfile\n" + ) + + +def test_metadata_empty(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': {}, + }, + }, + ) + + stdout, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f\n" + + +def test_metadata_fault(tmpdir): + make_repo(tmpdir) + with open(join(str(tmpdir), "nodes.py"), 'w') as f: + f.write(""" +nodes = { + 'node1': { + 'metadata': {'foo': vault.password_for("testing")}, + }, + 'node2': { + 'metadata': {'foo': vault.password_for("testing").value}, + }, + 'node3': { + 'metadata': {'foo': "faCTT76kagtDuZE5wnoiD1CxhGKmbgiX"}, + }, + 'node4': { + 'metadata': {'foo': "something else entirely"}, + }, +} +""") + print(run("bw debug -c 'print(repo.vault.password_for(\"testing\"))'", path=str(tmpdir))) + stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) + assert stdout1 == b"d0c998fd17a68322a03345954bb0a75301d3a127\n" + assert stderr == b"" + assert rcode == 0 + stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) + assert stdout2 == stdout1 + assert stderr == b"" + assert rcode == 0 + stdout3, stderr, rcode = run("bw hash -m node3", path=str(tmpdir)) + assert stdout3 == stdout1 + assert stderr == b"" + assert rcode == 0 + stdout4, stderr, rcode = run("bw hash -m node4", path=str(tmpdir)) + assert stdout4 != stdout1 + assert stderr == b"" + assert rcode == 0 + + +def test_metadata_nested_sort(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': { + 'nested': { + 'one': True, + 'two': False, + 'three': 3, + 'four': "four", + 'five': None, + }, + }, + }, + "node2": { + 'metadata': { + 'nested': { + 'five': None, + 'four': "four", + 'one': True, + 'three': 3, + 'two': False, + }, + }, + }, + }, + ) + + stdout1, stderr, rcode = run("bw hash -m node1", path=str(tmpdir)) + assert rcode == 0 + assert stdout1 == b"bc403a093ca3399cd3efa7a64ec420e0afef5e70\n" + + stdout2, stderr, rcode = run("bw hash -m node2", path=str(tmpdir)) + assert rcode == 0 + assert stdout1 == stdout2 + + +def test_metadata_repo(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': { + 'foo': 47, + }, + }, + }, + ) + + stdout, stderr, rcode = run("bw hash -m", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"c0cc160ab1b6e71155cd4f65139bc7f66304d7f3\n" + + +def test_metadata_repo_dict(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': { + 'foo': 47, + }, + }, + }, + ) + + stdout, stderr, rcode = run("bw hash -md", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"node1\t013b3a8199695eb45c603ea4e0a910148d80e7ed\n" + + +def test_groups_repo(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": {}, + "group2": {}, + }, + ) + + stdout, stderr, rcode = run("bw hash -g", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"479c737e191339e5fae20ac8a8903a75f6b91f4d\n" + + +def test_groups_repo_dict(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": {}, + "group2": {}, + }, + ) + + stdout, stderr, rcode = run("bw hash -dg", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"group1\ngroup2\n" + + +def test_groups(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": {'members': ["node1", "node2"]}, + "group2": {'members': ["node3"]}, + }, + nodes={ + "node1": {}, + "node2": {}, + "node3": {}, + }, + ) + + stdout, stderr, rcode = run("bw hash -g group1", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"59f5a812acd22592b046b20e9afedc1cfcd37c77\n" + + +def test_groups_dict(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": {'members': ["node1", "node2"]}, + "group2": {'members': ["node3"]}, + }, + nodes={ + "node1": {}, + "node2": {}, + "node3": {}, + }, + ) + + stdout, stderr, rcode = run("bw hash -dg group1", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"node1\nnode2\n" + + +def test_groups_node(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": {'members': ["node1", "node2"]}, + "group2": {'members': ["node3"]}, + }, + nodes={ + "node1": {}, + "node2": {}, + "node3": {}, + }, + ) + + stdout, stderr, rcode = run("bw hash -g node1", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"6f4615dc71426549e22df7961bd2b88ba95ad1fc\n" + + +def test_groups_node_dict(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": {'members': ["node1", "node2"]}, + "group2": {'members': ["node3"]}, + }, + nodes={ + "node1": {}, + "node2": {}, + "node3": {}, + }, + ) + + stdout, stderr, rcode = run("bw hash -dg node1", path=str(tmpdir)) + assert rcode == 0 + assert stdout == b"group1\n" diff --git a/tests/integration/bw_items.py b/tests/integration/bw_items.py new file mode 100644 index 0000000..aa902ef --- /dev/null +++ b/tests/integration/bw_items.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from bundlewrap.utils.testing import make_repo, run + + +def test_file_preview(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + 'files': { + "/test": { + 'content': "föö", + 'encoding': 'latin-1', + }, + }, + }, + }, + ) + + stdout, stderr, rcode = run("bw items -f /test node1", path=str(tmpdir)) + assert stdout == "föö".encode('utf-8') # our output is always utf-8 diff --git a/tests/integration/bw_metadata.py b/tests/integration/bw_metadata.py new file mode 100644 index 0000000..b167d35 --- /dev/null +++ b/tests/integration/bw_metadata.py @@ -0,0 +1,122 @@ +from json import loads +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_empty(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": {}, + }, + ) + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert stdout == b"{}\n" + assert stderr == b"" + assert rcode == 0 + + +def test_simple(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": {'metadata': {"foo": "bar"}}, + }, + ) + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert loads(stdout.decode()) == {"foo": "bar"} + assert stderr == b"" + assert rcode == 0 + + +def test_object(tmpdir): + make_repo(tmpdir) + with open(join(str(tmpdir), "nodes.py"), 'w') as f: + f.write("nodes = {'node1': {'metadata': {'foo': object}}}") + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert rcode == 1 + + +def test_merge(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': { + "foo": { + "bar": "baz", + }, + }, + }, + }, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + "ding": 5, + "foo": { + "bar": "ZAB", + "baz": "bar", + }, + }, + }, + }, + ) + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert loads(stdout.decode()) == { + "ding": 5, + "foo": { + "bar": "baz", + "baz": "bar", + }, + } + assert stderr == b"" + assert rcode == 0 + + +def test_metadatapy(tmpdir): + make_repo( + tmpdir, + bundles={"test": {}}, + nodes={ + "node1": { + 'bundles': ["test"], + 'metadata': {"foo": "bar"}, + }, + }, + ) + with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: + f.write( +"""def foo(metadata): + metadata["baz"] = node.name + return metadata +""") + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert loads(stdout.decode()) == { + "baz": "node1", + "foo": "bar", + } + assert stderr == b"" + assert rcode == 0 + + +def test_metadatapy_loop(tmpdir): + make_repo( + tmpdir, + bundles={"test": {}}, + nodes={ + "node1": { + 'bundles': ["test"], + 'metadata': {"foo": 1}, + }, + }, + ) + with open(join(str(tmpdir), "bundles", "test", "metadata.py"), 'w') as f: + f.write( +"""def foo(metadata): + metadata["foo"] += 1 + return metadata +""") + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert rcode == 1 diff --git a/tests/integration/bw_nodes.py b/tests/integration/bw_nodes.py new file mode 100644 index 0000000..e5f78c4 --- /dev/null +++ b/tests/integration/bw_nodes.py @@ -0,0 +1,333 @@ +from json import loads +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_empty(tmpdir): + make_repo(tmpdir) + stdout, stderr, rcode = run("bw nodes", path=str(tmpdir)) + assert stdout == b"" + assert stderr == b"" + assert rcode == 0 + + +def test_single(tmpdir): + make_repo(tmpdir, nodes={"node1": {}}) + stdout, stderr, rcode = run("bw nodes", path=str(tmpdir)) + assert stdout == b"node1\n" + assert stderr == b"" + assert rcode == 0 + + +def test_hostname(tmpdir): + make_repo(tmpdir, nodes={"node1": {'hostname': "node1.example.com"}}) + stdout, stderr, rcode = run("bw nodes --attrs | grep '\thostname' | cut -f 3", path=str(tmpdir)) + assert stdout == b"node1.example.com\n" + assert stderr == b"" + assert rcode == 0 + + +def test_inline(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1", "bundle2"], + }, + "node2": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": {}, + "bundle2": {}, + }, + ) + stdout, stderr, rcode = run("bw nodes -ai | grep '\tbundle' | grep bundle2 | cut -f 1", path=str(tmpdir)) + assert stdout == b"node1\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -ai | grep '\tbundle' | grep -v bundle2 | cut -f 1", path=str(tmpdir)) + assert stdout == b"node2\n" + assert stderr == b"" + assert rcode == 0 + + +def test_in_group(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": { + 'members': ["node2"], + }, + }, + nodes={ + "node1": {}, + "node2": {}, + }, + ) + stdout, stderr, rcode = run("bw nodes -g group1", path=str(tmpdir)) + assert stdout == b"node2\n" + assert stderr == b"" + assert rcode == 0 + + +def test_bundles(tmpdir): + make_repo( + tmpdir, + bundles={ + "bundle1": {}, + "bundle2": {}, + }, + nodes={ + "node1": {'bundles': ["bundle1", "bundle2"]}, + "node2": {'bundles': ["bundle2"]}, + }, + ) + stdout, stderr, rcode = run("bw nodes --bundles", path=str(tmpdir)) + assert stdout.decode().strip().split("\n") == [ + "node1: bundle1, bundle2", + "node2: bundle2", + ] + assert stderr == b"" + assert rcode == 0 + + +def test_groups(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": { + 'members': ["node2"], + }, + "group2": { + 'members': ["node1"], + }, + "group3": { + 'subgroup_patterns': ["p2"], + }, + "group4": { + 'subgroups': ["group1"], + }, + }, + nodes={ + "node1": {}, + "node2": {}, + }, + ) + stdout, stderr, rcode = run("bw nodes --groups", path=str(tmpdir)) + assert stdout.decode().strip().split("\n") == [ + "node1: group2, group3", + "node2: group1, group4", + ] + assert stderr == b"" + assert rcode == 0 + + +def test_group_members_add(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": {'os': 'centos'}, + "node2": {'os': 'debian'}, + "node3": {'os': 'ubuntu'}, + }, + ) + with open(join(str(tmpdir), "groups.py"), 'w') as f: + f.write(""" +groups = { + "group1": { + 'members_add': lambda node: node.os == 'centos', + }, + "group2": { + 'members': ["node2"], + 'members_add': lambda node: node.os != 'centos', + }, + "group3": { + 'members_add': lambda node: not node.in_group("group2"), + }, + "group4": { + 'members': ["node3"], + }, +} + """) + stdout, stderr, rcode = run("bw nodes -a node1 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group1\ngroup3\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node2 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group2\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node3 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group2\ngroup3\ngroup4\n" + assert stderr == b"" + assert rcode == 0 + + +def test_group_members_remove(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": {'os': 'centos'}, + "node2": {'os': 'debian'}, + "node3": {'os': 'ubuntu'}, + "node4": {'os': 'ubuntu'}, + }, + ) + with open(join(str(tmpdir), "groups.py"), 'w') as f: + f.write(""" +groups = { + "group1": { + 'members_add': lambda node: node.os == 'ubuntu', + }, + "group2": { + 'members_add': lambda node: node.os == 'ubuntu', + 'members_remove': lambda node: node.name == "node3", + }, + "group3": { + 'members_add': lambda node: not node.in_group("group3"), + }, + "group4": { + 'subgroups': ["group3"], + 'members_remove': lambda node: node.os == 'debian', + }, +} + """) + stdout, stderr, rcode = run("bw nodes -a node1 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group3\ngroup4\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node2 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group3\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node3 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group1\ngroup3\ngroup4\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node4 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group1\ngroup2\ngroup3\ngroup4\n" + assert stderr == b"" + assert rcode == 0 + + +def test_group_members_remove_bundle(tmpdir): + make_repo( + tmpdir, + bundles={ + "bundle1": {}, + "bundle2": {}, + }, + nodes={ + "node1": {}, + "node2": {}, + }, + ) + with open(join(str(tmpdir), "groups.py"), 'w') as f: + f.write(""" +groups = { + "group1": { + 'bundles': ["bundle1"], + 'members': ["node1", "node2"], + }, + "group2": { + 'bundles': ["bundle1", "bundle2"], + 'members': ["node1", "node2"], + 'members_remove': lambda node: node.name == "node2", + }, +} + """) + stdout, stderr, rcode = run("bw nodes -a node1 | grep \tbundle | cut -f 3", path=str(tmpdir)) + assert stdout == b"bundle1\nbundle2\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node2 | grep \tbundle | cut -f 3", path=str(tmpdir)) + assert stdout == b"bundle1\n" + assert stderr == b"" + assert rcode == 0 + + +def test_group_members_partial_metadata(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': {'foo': 1}, + }, + "node2": {}, + }, + ) + with open(join(str(tmpdir), "groups.py"), 'w') as f: + f.write(""" +groups = { + "group1": { + 'members_add': lambda node: node.metadata.get('foo') == 1, + }, + "group2": { + 'members': ["node2"], + 'metadata': {'foo': 1}, + }, +} + """) + stdout, stderr, rcode = run("bw nodes -a node1 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group1\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node2 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group2\n" + assert stderr == b"" + assert rcode == 0 + + +def test_group_members_remove_based_on_metadata(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'metadata': {'remove': False}, + }, + "node2": {}, + }, + ) + with open(join(str(tmpdir), "groups.py"), 'w') as f: + f.write(""" +groups = { + "group1": { + 'members_add': lambda node: not node.metadata.get('remove', False), + 'members_remove': lambda node: node.metadata.get('remove', False), + }, + "group2": { + 'members': ["node2"], + 'metadata': {'remove': True}, + }, + "group3": { + 'subgroups': ["group1"], + 'members_remove': lambda node: node.name.endswith("1") and node.metadata.get('redherring', True), + }, +} + """) + stdout, stderr, rcode = run("bw nodes -a node1 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group1\n" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw nodes -a node2 | grep \tgroup | cut -f 3", path=str(tmpdir)) + assert stdout == b"group1\ngroup2\ngroup3\n" + assert stderr == b"" + assert rcode == 0 + + # make sure there is no metadata deadlock + stdout, stderr, rcode = run("bw metadata node1", path=str(tmpdir)) + assert loads(stdout.decode('utf-8')) == {'remove': False} + assert stderr == b"" + assert rcode == 0 diff --git a/tests/integration/bw_plot.py b/tests/integration/bw_plot.py new file mode 100644 index 0000000..6456ce9 --- /dev/null +++ b/tests/integration/bw_plot.py @@ -0,0 +1,67 @@ +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_groups_for_node(tmpdir): + make_repo( + tmpdir, + nodes={ + "node-foo": {}, + "node-bar": {}, + "node-baz": {}, + "node-pop": {}, + }, + ) + with open(join(str(tmpdir), "groups.py"), 'w') as f: + f.write(""" +groups = { + "group-foo": { + 'members': ["node-foo"], + 'member_patterns': [r".*-bar"], + }, + "group-bar": { + 'subgroups': ["group-foo"], + }, + "group-baz": { + 'members': ["node-pop"], + 'members_add': lambda node: node.name == "node-pop", + }, + "group-pop": { + 'subgroup_patterns': [r"ba"], + }, +} + """) + stdout, stderr, rcode = run("bw plot groups-for-node node-foo", path=str(tmpdir)) + assert stdout == b"""digraph bundlewrap +{ +rankdir = LR +node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] +edge [arrowhead=vee] +"group-bar" [fontcolor=white,style=filled]; +"group-foo" [fontcolor=white,style=filled]; +"group-pop" [fontcolor=white,style=filled]; +"node-foo" [fontcolor="#303030",shape=box,style=rounded]; +"group-bar" -> "group-foo" [color="#6BB753",penwidth=2] +"group-pop" -> "group-bar" [color="#6BB753",penwidth=2] +"group-foo" -> "node-foo" [color="#D18C57",penwidth=2] +} +""" + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw plot groups-for-node node-pop", path=str(tmpdir)) + assert stdout == b"""digraph bundlewrap +{ +rankdir = LR +node [color="#303030"; fillcolor="#303030"; fontname=Helvetica] +edge [arrowhead=vee] +"group-baz" [fontcolor=white,style=filled]; +"group-pop" [fontcolor=white,style=filled]; +"node-pop" [fontcolor="#303030",shape=box,style=rounded]; +"group-pop" -> "group-baz" [color="#6BB753",penwidth=2] +"group-baz" -> "node-pop" [color="#D18C57",penwidth=2] +} +""" + assert stderr == b"" + assert rcode == 0 diff --git a/tests/integration/bw_repo.py b/tests/integration/bw_repo.py new file mode 100644 index 0000000..7b1d554 --- /dev/null +++ b/tests/integration/bw_repo.py @@ -0,0 +1,15 @@ +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_not_a_repo_test(tmpdir): + assert run("bw nodes", path=str(tmpdir))[2] == 1 + + +def test_subdir_invocation(tmpdir): + make_repo(tmpdir, nodes={"node1": {}}) + stdout, stderr, rcode = run("bw nodes", path=join(str(tmpdir), "bundles")) + assert stdout == b"node1\n" + assert stderr == b"" + assert rcode == 0 diff --git a/tests/integration/bw_stats.py b/tests/integration/bw_stats.py new file mode 100644 index 0000000..a011dda --- /dev/null +++ b/tests/integration/bw_stats.py @@ -0,0 +1,31 @@ +from bundlewrap.utils.testing import make_repo, run + + +def test_nondeterministic(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + 'files': { + "/test": { + 'content': "foo", + }, + "/test2": { + 'content': "foo", + }, + }, + }, + }, + ) + + stdout, stderr, rcode = run("bw stats", path=str(tmpdir)) + assert stdout == b"""1 nodes +0 groups +2 items + 2 file +""" diff --git a/tests/integration/bw_test.py b/tests/integration/bw_test.py new file mode 100644 index 0000000..8cf823a --- /dev/null +++ b/tests/integration/bw_test.py @@ -0,0 +1,493 @@ +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_empty(tmpdir): + make_repo(tmpdir) + stdout, stderr, rcode = run("bw test", path=str(tmpdir)) + assert stdout == b"" + + +def test_bundle_not_found(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_hooks(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": {}, + "node2": {}, + }, + ) + with open(join(str(tmpdir), "hooks", "test.py"), 'w') as f: + f.write("""from bundlewrap.utils.ui import io +def test(repo, **kwargs): + io.stdout("AAA") + +def test_node(repo, node, **kwargs): + io.stdout("BBB") +""") + assert b"AAA" in run("bw test", path=str(tmpdir))[0] + assert b"BBB" in run("bw test", path=str(tmpdir))[0] + + +def test_circular_dep_direct(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "pkg_apt": { + "foo": { + 'needs': ["pkg_apt:bar"], + }, + "bar": { + 'needs': ["pkg_apt:foo"], + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_circular_dep_indirect(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "pkg_apt": { + "foo": { + 'needs': ["pkg_apt:bar"], + }, + "bar": { + 'needs': ["pkg_apt:baz"], + }, + "baz": { + 'needs': ["pkg_apt:foo"], + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_circular_dep_self(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "pkg_apt": { + "foo": { + 'needs': ["pkg_apt:foo"], + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_circular_trigger_self(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "pkg_apt": { + "foo": { + 'triggers': ["pkg_apt:foo"], + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_file_invalid_attribute(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "files": { + "/foo": { + "potato": "yes", + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_file_template_error(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "files": { + "/foo": { + 'content_type': 'mako', + 'content': "${broken", + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_group_loop(tmpdir): + make_repo( + tmpdir, + groups={ + "group1": { + 'subgroups': ["group2"], + }, + "group2": { + 'subgroups': ["group3"], + }, + "group3": { + 'subgroups': ["group1"], + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_group_metadata_collision(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': { + 'baz': 1, + }, + 'bar': 2, + }, + }, + "group2": { + 'metadata': { + 'foo': { + 'baz': 3, + }, + 'snap': 4, + }, + 'subgroups': ["group3"], + }, + "group3": { + 'members': ["node1"], + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_group_metadata_collision_subgroups(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': { + 'baz': 1, + }, + 'bar': 2, + }, + }, + "group2": { + 'metadata': { + 'foo': { + 'baz': 3, + }, + 'snap': 4, + }, + 'subgroups': ["group1", "group3"], + }, + "group3": { + 'members': ["node1"], + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 0 + + +def test_group_metadata_collision_list(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': [1], + }, + }, + "group2": { + 'members': ["node1"], + 'metadata': { + 'foo': [2], + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_group_metadata_collision_dict(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': {'bar': 1}, + }, + }, + "group2": { + 'members': ["node1"], + 'metadata': { + 'foo': 2, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_group_metadata_collision_dict_ok(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': {'bar': 1}, + }, + }, + "group2": { + 'members': ["node1"], + 'metadata': { + 'foo': {'baz': 2}, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 0 + + +def test_group_metadata_collision_set(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': set([1]), + }, + }, + "group2": { + 'members': ["node1"], + 'metadata': { + 'foo': 2, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + + +def test_group_metadata_collision_set_ok(tmpdir): + make_repo( + tmpdir, + nodes={"node1": {}}, + groups={ + "group1": { + 'members': ["node1"], + 'metadata': { + 'foo': set([1]), + }, + }, + "group2": { + 'members': ["node1"], + 'metadata': { + 'foo': set([2]), + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 0 + + +def test_fault_missing(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "files": { + "/foo": { + 'content_type': 'mako', + 'content': "${repo.vault.decrypt('bzzt', key='unavailable')}", + }, + }, + }, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + assert run("bw test --ignore-missing-faults", path=str(tmpdir))[2] == 0 + + +def test_metadata_determinism_ok(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": {}, + }, + ) + with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: + f.write(""" +def test(metadata): + metadata['test'] = 1 + return metadata +""") + assert run("bw test -m 3", path=str(tmpdir))[2] == 0 + + +def test_metadata_determinism_broken(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": {}, + }, + ) + with open(join(str(tmpdir), "bundles", "bundle1", "metadata.py"), 'w') as f: + f.write("""from random import randint as _randint + +def test(metadata): + metadata.setdefault('test', _randint(1, 99999)) + return metadata +""") + assert run("bw test -m 3", path=str(tmpdir))[2] == 1 + + +def test_config_determinism_ok(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "files": { + "/test": { + 'content': "1", + 'content_type': 'mako', + }, + }, + }, + }, + ) + assert run("bw test -d 3", path=str(tmpdir))[2] == 0 + + +def test_config_determinism_broken(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": { + 'bundles': ["bundle1"], + }, + }, + bundles={ + "bundle1": { + "files": { + "/test": { + 'content': "<% from random import randint %>\n${randint(1, 99999)\n}", + 'content_type': 'mako', + }, + }, + }, + }, + ) + assert run("bw test -d 3", path=str(tmpdir))[2] == 1 + + +def test_unknown_subgroup(tmpdir): + make_repo( + tmpdir, + nodes={ + "node1": {}, + }, + groups={ + "group1": {'subgroups': ["missing-group"]}, + "group2": {'members': ["node1"]}, + }, + ) + assert run("bw test", path=str(tmpdir))[2] == 1 + assert run("bw test group1", path=str(tmpdir))[2] == 1 + assert run("bw test group2", path=str(tmpdir))[2] == 1 diff --git a/tests/integration/bw_verify.py b/tests/integration/bw_verify.py new file mode 100644 index 0000000..502ccfe --- /dev/null +++ b/tests/integration/bw_verify.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from os.path import join + +from bundlewrap.utils.testing import host_os, make_repo, run + + +def test_empty_verify(tmpdir): + make_repo( + tmpdir, + bundles={ + "test": { + 'files': { + join(str(tmpdir), "foo"): { + 'content_type': 'any', + }, + }, + }, + }, + nodes={ + "localhost": { + 'bundles': ["test"], + 'os': host_os(), + }, + }, + ) + + with open(join(str(tmpdir), "foo"), 'w') as f: + f.write("test") + + stdout, stderr, rcode = run("bw verify localhost", path=str(tmpdir)) + assert rcode == 0 diff --git a/tests/integration/secrets.py b/tests/integration/secrets.py new file mode 100644 index 0000000..ab1fd82 --- /dev/null +++ b/tests/integration/secrets.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from base64 import b64decode +from os.path import join + +from bundlewrap.utils.testing import make_repo, run + + +def test_encrypt(tmpdir): + make_repo(tmpdir) + + stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.encrypt(\"test\"))'", path=str(tmpdir)) + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.decrypt(\"{}\"))'".format(stdout.decode('utf-8').strip()), path=str(tmpdir)) + assert stdout == b"test\n" + assert stderr == b"" + assert rcode == 0 + + +def test_encrypt_file(tmpdir): + make_repo(tmpdir) + + source_file = join(str(tmpdir), "data", "source") + with open(source_file, 'w') as f: + f.write("ohai") + + stdout, stderr, rcode = run( + "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( + source_file, + "encrypted", + ), + path=str(tmpdir), + ) + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run( + "bw debug -c 'print(repo.vault.decrypt_file(\"{}\"))'".format( + "encrypted", + ), + path=str(tmpdir), + ) + assert stdout == b"ohai\n" + assert stderr == b"" + assert rcode == 0 + + +def test_encrypt_file_base64(tmpdir): + make_repo(tmpdir) + + source_file = join(str(tmpdir), "data", "source") + with open(source_file, 'wb') as f: + f.write("öhai".encode('latin-1')) + + stdout, stderr, rcode = run( + "bw debug -c 'repo.vault.encrypt_file(\"{}\", \"{}\")'".format( + source_file, + "encrypted", + ), + path=str(tmpdir), + ) + assert stderr == b"" + assert rcode == 0 + + stdout, stderr, rcode = run( + "bw debug -c 'print(repo.vault.decrypt_file_as_base64(\"{}\"))'".format( + "encrypted", + ), + path=str(tmpdir), + ) + assert b64decode(stdout.decode('utf-8')) == "öhai".encode('latin-1') + assert stderr == b"" + assert rcode == 0 + + +def test_format_password(tmpdir): + make_repo(tmpdir) + + stdout, stderr, rcode = run("bw debug -c 'print(repo.vault.format(\"format: {}\", repo.vault.password_for(\"testing\")))'", path=str(tmpdir)) + assert stdout == b"format: faCTT76kagtDuZE5wnoiD1CxhGKmbgiX\n" + assert stderr == b"" + assert rcode == 0 diff --git a/tests/unit/metadata.py b/tests/unit/metadata.py new file mode 100644 index 0000000..dc37456 --- /dev/null +++ b/tests/unit/metadata.py @@ -0,0 +1,23 @@ +from bundlewrap.metadata import atomic, dictionary_key_map + + +def test_dictmap(): + assert set(dictionary_key_map({ + 'key1': 1, + 'key2': { + 'key3': [3, 3, 3], + 'key4': atomic([4, 4, 4]), + 'key5': { + 'key6': "6", + }, + 'key7': set((7, 7, 7)), + }, + })) == set([ + ("key1",), + ("key2",), + ("key2", "key3"), + ("key2", "key4"), + ("key2", "key5"), + ("key2", "key5", "key6"), + ("key2", "key7"), + ]) diff --git a/tests/unit/utils_time.py b/tests/unit/utils_time.py new file mode 100644 index 0000000..e5b967d --- /dev/null +++ b/tests/unit/utils_time.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from datetime import timedelta + +from bundlewrap.utils.time import format_duration, parse_duration + + +def test_format_duration(): + assert format_duration(timedelta()) == "0s" + assert format_duration(timedelta(seconds=10)) == "10s" + assert format_duration(timedelta(minutes=10)) == "10m" + assert format_duration(timedelta(hours=10)) == "10h" + assert format_duration(timedelta(days=10)) == "10d" + assert format_duration(timedelta(days=1, hours=2, minutes=3, seconds=4)) == "1d 2h 3m 4s" + + +def test_parse_duration(): + assert parse_duration("0s") == timedelta() + assert parse_duration("10s") == timedelta(seconds=10) + assert parse_duration("10m") == timedelta(minutes=10) + assert parse_duration("10h") == timedelta(hours=10) + assert parse_duration("10d") == timedelta(days=10) + assert parse_duration("1d 2h 3m 4s") == timedelta(days=1, hours=2, minutes=3, seconds=4) + + +def test_parse_format_inverse(): + for duration in ( + "0s", + "1s", + "1m", + "1h", + "1d", + "1d 4h", + "1d 4h 7s", + ): + assert format_duration(parse_duration(duration)) == duration