Imported Upstream version 1.7.2
SVN-Git Migration
6 years ago
0 | 2016-05-22 S. Thiell <sthiell@stanford.edu> | |
1 | ||
2 | * EngineClient.py: handle broken pipe on write() (ticket #196). | |
3 | ||
4 | 2016-04-24 S. Thiell <sthiell@stanford.edu> | |
5 | ||
6 | * NodeSet.py: allow empty string as valid argument for empty NodeSet | |
7 | objects (ticket #294). | |
8 | ||
9 | 2016-02-28 S. Thiell <sthiell@stanford.edu> | |
10 | ||
11 | * Version 1.7.1 released. | |
12 | ||
13 | 2016-02-27 S. Thiell <sthiell@stanford.edu> | |
14 | ||
15 | * Worker/Tree.py: implement tree mode reverse copy using tar commands to | |
16 | fix clush --rcopy (ticket #290). | |
17 | ||
18 | * Communication.py: remove 76-char base64 encoding fixed length | |
19 | restriction for tree XML payload communication. The default max length is | |
20 | now 64K, which gives good results. The environment variable | |
21 | 'CLUSTERSHELL_GW_B64_LINE_LENGTH' is propagated to gateways and may be | |
22 | used to override this value. | |
23 | ||
24 | 2016-02-12 S. Thiell <sthiell@stanford.edu> | |
25 | ||
26 | * RangeSet.py and NodeSet.py: fix bad 0-padding handling by RangeSetND or | |
27 | NodeSet objects in nD (ticket #286). | |
28 | ||
29 | 2016-02-09 S. Thiell <sthiell@stanford.edu> | |
30 | ||
31 | * NodeSet.py: fix parser issue when brackets were used with nodeset | |
32 | starting with a digit (ticket #284). | |
33 | ||
34 | 2015-11-30 S. Thiell <sthiell@stanford.edu> | |
35 | ||
36 | * CLI/Nodeset.py: fix --output-format / -O when folding (-f) by applying | |
37 | the provided format to each node (ticket #277). | |
38 | ||
0 | 39 | 2015-11-10 S. Thiell <sthiell@stanford.edu> |
1 | 40 | |
2 | 41 | * Version 1.7 released. |
0 | 0 | %{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} |
1 | 1 | |
2 | 2 | Name: clustershell |
3 | Version: 1.7 | |
3 | Version: 1.7.2 | |
4 | 4 | Release: 1%{?dist} |
5 | 5 | Summary: Python framework for efficient cluster administration |
6 | 6 | |
7 | 7 | Group: System Environment/Base |
8 | 8 | License: CeCILL-C |
9 | URL: http://clustershell.sourceforge.net/ | |
10 | Source0: http://downloads.sourceforge.net/%{name}/%{name}-%{version}.tar.gz | |
9 | URL: http://cea-hpc.github.io/clustershell/ | |
10 | Source0: https://github.com/cea-hpc/%{name}/archive/v%{version}.tar.gz#/%{name}-%{version}.tar.gz | |
11 | 11 | BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) |
12 | 12 | BuildArch: noarch |
13 | 13 | BuildRequires: python-devel python-setuptools |
100 | 100 | %{vimdatadir}/syntax/groupsconf.vim |
101 | 101 | |
102 | 102 | %changelog |
103 | * Sat Jun 18 2016 Stephane Thiell <sthiell@stanford.edu> 1.7.2-1 | |
104 | - update to 1.7.2 | |
105 | ||
106 | * Mon Feb 29 2016 Stephane Thiell <sthiell@stanford.edu> 1.7.1-1 | |
107 | - update to 1.7.1 | |
108 | ||
103 | 109 | * Tue Nov 10 2015 Stephane Thiell <sthiell@stanford.edu> 1.7-1 |
104 | 110 | - update to 1.7 |
105 | 111 |
7 | 7 | connect_timeout: 15 |
8 | 8 | command_timeout: 0 |
9 | 9 | color: auto |
10 | fd_max: 16384 | |
10 | fd_max: 8192 | |
11 | 11 | history_size: 100 |
12 | 12 | node_count: yes |
13 | 13 | verbosity: 1 |
12 | 12 | # prod-[0001-0144] |
13 | 13 | # |
14 | 14 | [ace] |
15 | map: cd /acefs/servers; for s in server-*; do [[ $(cat $s/type) == $GROUP && -f $s/host/name ]] && cat $s/host/name; done || true | |
16 | all: cat /acefs/servers/server-*/host/name | |
17 | list: cd /acefs/servers; for s in server-*; do [ -f $s/host/name ] && cat $s/type; done | sort -u | |
18 | ||
19 | # ACE @server -> host | |
20 | # | |
21 | # server is the permanent node name used by ACE | |
22 | # host is the dynamic/current cluster node name | |
23 | # | |
24 | # example: | |
25 | # $ nodeset -Gs ace-servers -r prod-0038 | |
26 | # @server-0042 | |
27 | # | |
28 | [ace-servers] | |
29 | map: [ -f /acefs/servers/$GROUP/host/name ] && cat /acefs/servers/$GROUP/host/name | |
30 | all: cat /acefs/servers/server-*/host/name | |
31 | list: cd /acefs/servers; for s in server-*; do [ -f $s/host/name ] && echo $s; done || true | |
15 | map: ace servers | awk '/$GROUP/ {gsub("*",""); print $11}' | |
16 | all: ace servers | awk '!/Type/ && $11 != "-" {gsub("*",""); print $11}' | |
17 | list: ace servers | awk '!/Type/ && $11 != "-" {print $2}' |
15 | 15 | [slurmstate] |
16 | 16 | map: if [ "$GROUP" == "broken" ]; then sinfo -h -o "%N" -t down,drained; else sinfo -h -o "%N" -t $GROUP; fi |
17 | 17 | all: sinfo -h -o "%N" |
18 | list: sinfo -h -o "%T" | |
18 | list: sinfo -h -o "%T" | tr -d '~#$*' | |
19 | 19 | reverse: sinfo -h -N -o "%T" -n $NODE |
20 | 20 | cache_time: 300 |
0 | 0 | .\" Man page generated from reStructuredText. |
1 | 1 | . |
2 | .TH CLUBAK 1 "2015-11-05" "1.7" "ClusterShell User Manual" | |
2 | .TH CLUBAK 1 "2016-02-28" "1.7.1" "ClusterShell User Manual" | |
3 | 3 | .SH NAME |
4 | 4 | clubak \- format output from clush/pdsh-like output and more |
5 | 5 | . |
0 | 0 | .\" Man page generated from reStructuredText. |
1 | 1 | . |
2 | .TH CLUSH 1 "2015-11-01" "1.7" "ClusterShell User Manual" | |
2 | .TH CLUSH 1 "2016-06-18" "1.7.2" "ClusterShell User Manual" | |
3 | 3 | .SH NAME |
4 | 4 | clush \- execute shell commands on a cluster |
5 | 5 | . |
189 | 189 | .TP |
190 | 190 | .BI \-\-topology\fB= FILE |
191 | 191 | topology configuration file to use for tree mode |
192 | .TP | |
193 | .BI \-\-pick\fB= N | |
194 | pick N node(s) at random in nodeset | |
192 | 195 | .UNINDENT |
193 | 196 | .TP |
194 | 197 | .B Output behaviour: |
272 | 275 | .TP |
273 | 276 | .BI \-R \ WORKER\fP,\fB \ \-\-worker\fB= WORKER |
274 | 277 | worker name to use for connection (\fBexec\fP, \fBssh\fP, \fBrsh\fP, \fBpdsh\fP), default is \fBssh\fP |
278 | .TP | |
279 | .BI \-\-remote\fB= REMOTE | |
280 | whether to enable remote execution: in tree mode, \(aqyes\(aq forces connections to the leaf nodes for execution, \(aqno\(aq establishes connections up to the leaf parent nodes for execution (default is \(aqyes\(aq) | |
275 | 281 | .UNINDENT |
276 | 282 | .UNINDENT |
277 | 283 | .sp |
0 | 0 | .\" Man page generated from reStructuredText. |
1 | 1 | . |
2 | .TH NODESET 1 "2015-11-05" "1.7" "ClusterShell User Manual" | |
2 | .TH NODESET 1 "2016-06-18" "1.7.2" "ClusterShell User Manual" | |
3 | 3 | .SH NAME |
4 | 4 | nodeset \- compute advanced nodeset operations |
5 | 5 | . |
40 | 40 | \fBnodeset\fP is an utility command provided with the ClusterShell library which |
41 | 41 | implements some features of ClusterShell\(aqs NodeSet and RangeSet Python classes. |
42 | 42 | It provides easy manipulation of 1D or nD\-indexed cluster nodes and node |
43 | groups. | |
43 | groups and supports RFC 1123 (except that a node name can\(aqt be entirely numeric). | |
44 | 44 | .sp |
45 | 45 | Also, \fBnodeset\fP is automatically bound to the library node group resolution |
46 | 46 | mechanism. Thus, it is especially useful to enhance cluster aware |
136 | 136 | .TP |
137 | 137 | .BI \-\-axis\fB= RANGESET |
138 | 138 | for nD nodesets, fold along provided axis only. Axis are indexed from 1 to n and can be specified here either using the rangeset syntax, eg. \(aq1\(aq, \(aq1\-2\(aq, \(aq1,3\(aq, or by a single negative number meaning that the indice is counted from the end. Because some nodesets may have several different dimensions, axis indices are silently truncated to fall in the allowed range. |
139 | .TP | |
140 | .BI \-\-pick\fB= N | |
141 | pick N node(s) at random in nodeset | |
139 | 142 | .UNINDENT |
140 | 143 | .UNINDENT |
141 | 144 | .UNINDENT |
0 | 0 | .\" Man page generated from reStructuredText. |
1 | 1 | . |
2 | .TH CLUSH.CONF 5 "2015-08-27" "1.7" "ClusterShell User Manual" | |
2 | .TH CLUSH.CONF 5 "2016-06-18" "1.7.2" "ClusterShell User Manual" | |
3 | 3 | .SH NAME |
4 | 4 | clush.conf \- Configuration file for clush |
5 | 5 | . |
0 | 0 | .\" Man page generated from reStructuredText. |
1 | 1 | . |
2 | .TH GROUPS.CONF 5 "2015-11-06" "1.7" "ClusterShell User Manual" | |
2 | .TH GROUPS.CONF 5 "2016-06-18" "1.7.2" "ClusterShell User Manual" | |
3 | 3 | .SH NAME |
4 | 4 | groups.conf \- Configuration file for ClusterShell node groups |
5 | 5 | . |
40 | 40 | |
41 | 41 | # General information about the project. |
42 | 42 | project = u'clustershell' |
43 | copyright = u'2015, Stephane Thiell' | |
43 | copyright = u'2016, Stephane Thiell' | |
44 | 44 | |
45 | 45 | # The version info for the project you're documenting, acts as replacement for |
46 | 46 | # |version| and |release|, also used in various other places throughout the |
47 | 47 | # built documents. |
48 | 48 | # |
49 | 49 | # The short X.Y version. |
50 | version = '1.7' | |
50 | version = '1.7.2' | |
51 | 51 | # The full version, including alpha/beta/rc tags. |
52 | release = '1.7' | |
52 | release = '1.7.2' | |
53 | 53 | |
54 | 54 | # The language for content autogenerated by Sphinx. Refer to documentation |
55 | 55 | # for a list of supported languages. |
239 | 239 | # dir menu entry, description, category) |
240 | 240 | texinfo_documents = [ |
241 | 241 | ('index', 'clustershell', u'ClusterShell Documentation', |
242 | u'Stephane Thiell', 'clustershell', 'One line description of project.', | |
242 | u'Stephane Thiell', 'clustershell', | |
243 | 'Manage node sets, node groups and execute commands on cluster', | |
243 | 244 | 'Miscellaneous'), |
244 | 245 | ] |
245 | 246 |
1 | 1 | |
2 | 2 | Release Notes |
3 | 3 | ============= |
4 | ||
5 | Version 1.7.2 | |
6 | ------------- | |
7 | ||
8 | This minor version fixes a defect in :ref:`tree mode <clush-tree>` that led | |
9 | to broken pipe errors or unwanted backtraces. | |
10 | ||
11 | The :class:`.NodeSet` class now supports the empty string as input. In | |
12 | practice, you may now safely reuse the output of a | |
13 | :ref:`nodeset <nodeset-tool>` command as input argument for another | |
14 | :ref:`nodeset <nodeset-tool>` command, even if the result is an empty string. | |
15 | ||
16 | A new option ``--pick`` is available for :ref:`clush <clush-pick>` and | |
17 | :ref:`nodeset <nodeset-pick>` to pick N node(s) at random from the resulting | |
18 | node set. | |
19 | ||
20 | For more details, please have a look at `GitHub Issues for 1.7.2 milestone`_. | |
21 | ||
22 | ClusterShell 1.7.2 is compatible with Python 2.4 up to Python 2.7 (for | |
23 | example: from RedHat EL5 to EL7). Upgrades from versions 1.6 or 1.7 are | |
24 | supported. | |
25 | ||
26 | Version 1.7.1 | |
27 | ------------- | |
28 | ||
29 | This minor version contains a few bug fixes, mostly related to | |
30 | :ref:`guide-NodeSet`. | |
31 | ||
32 | This version also contains bug fixes and performance improvements in tree | |
33 | propagation mode. | |
34 | ||
35 | For more details, please have a look at `GitHub Issues for 1.7.1 milestone`_. | |
36 | ||
37 | ClusterShell 1.7.1 is compatible with Python 2.4 up to Python 2.7 (for | |
38 | example: from RedHat EL5 to EL7). Upgrades from versions 1.6 or 1.7 are | |
39 | supported. | |
4 | 40 | |
5 | 41 | Version 1.7 |
6 | 42 | ----------- |
24 | 60 | |
25 | 61 | Version 1.7 and possible future minor versions 1.7.x are compatible with |
26 | 62 | Python 2.4 up to Python 2.7 (for example: from RedHat EL5 to EL7). Upgrade |
27 | from version 1.6 to 1.7 should be painless and are fully supported. | |
63 | from version 1.6 to 1.7 should be painless and is fully supported. | |
28 | 64 | |
29 | 65 | The next major version of ClusterShell will require at least Python 2.6. We |
30 | 66 | will also soon start working on Python 3 support. |
229 | 265 | ClusterShell 1.7 is now fully compatible with PIP and supports user |
230 | 266 | configuration files:: |
231 | 267 | |
232 | $ pip --user clustershell | |
268 | $ pip install --user clustershell | |
233 | 269 | |
234 | 270 | Please see :ref:`install-pip-user`. |
271 | ||
272 | .. _GitHub Issues for 1.7.1 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.7.1 | |
273 | .. _GitHub Issues for 1.7.2 milestone: https://github.com/cea-hpc/clustershell/issues?utf8=%E2%9C%93&q=is%3Aissue+milestone%3A1.7.2 |
76 | 76 | Selecting all nodes |
77 | 77 | """"""""""""""""""" |
78 | 78 | |
79 | Finally, a special option ``-a`` (without argument) can be used to select | |
80 | **all** nodes, in the sense of ClusterShell node groups (see | |
79 | The special option ``-a`` (without argument) can be used to select **all** | |
80 | nodes, in the sense of ClusterShell node groups (see | |
81 | 81 | :ref:`node groups configuration <groups-config>` for more details on special |
82 | 82 | **all** external shell command upcall). If not properly configured, the |
83 | 83 | ``-a`` option may lead to a runtime error like:: |
84 | 84 | |
85 | 85 | clush: External error: Not enough working external calls (all, or map + |
86 | 86 | list) defined to get all node |
87 | ||
88 | .. _clush-pick: | |
89 | ||
90 | Picking node(s) at random | |
91 | """"""""""""""""""""""""" | |
92 | ||
93 | Use ``--pick`` with a maximum number of nodes you wish to pick randomly from | |
94 | the targeted node set. *clush* will then run only on selected node(s). The | |
95 | following example will run a script on a single random node picked from the | |
96 | ``@compute`` group:: | |
97 | ||
98 | $ clush -w @compute --pick=1 ./nonreg-single-client-fs-io.sh | |
87 | 99 | |
88 | 100 | Host files |
89 | 101 | """""""""" |
155 | 167 | |
156 | 168 | |
157 | 169 | At runtime, ClusterShell will pick an initial propagation tree from this |
158 | topology graph definition. | |
170 | topology graph definition and the current root node. Multiple admin/root | |
171 | nodes may be defined in the file. | |
159 | 172 | |
160 | 173 | .. note:: The algorithm used in Tree mode does not rely on gateway system |
161 | 174 | hostnames anymore. In topology.conf, just use the hosts or aliases needed |
172 | 185 | target nodes that are defined there. The topology file path can be changed |
173 | 186 | using the ``--topology`` command line option. |
174 | 187 | |
175 | .. note:: If using ``clush -v`` (verbose option), clush will display an ASCII | |
188 | .. note:: If using ``clush -d`` (debug option), clush will display an ASCII | |
176 | 189 | representation of the initial propagation tree used. This is useful when |
177 | 190 | working on Tree mode configuration. |
178 | 191 |
9 | 9 | efficient, the *nodeset* command can quickly improve traditional cluster |
10 | 10 | shell scripts. It is also full-featured as it provides most of the |
11 | 11 | :class:`.NodeSet` and :class:`.RangeSet` class methods (see also |
12 | :ref:`class-NodeSet`, and :ref:`class-RangeSet`). Most of the examples in this | |
13 | section are using simple indexed node sets, however, *nodeset* supports | |
14 | multidimensional node sets, like *dc[1-2]n[1-99]*, introduced in version 1.7 | |
15 | (see :ref:`class-RangeSetND` for more info). | |
16 | ||
17 | This section will guide you through the basics and also advanced features of | |
18 | *nodeset*. | |
12 | :ref:`class-NodeSet`, and :ref:`class-RangeSet`). | |
13 | ||
14 | ||
15 | The *nodeset* command supports RFC 1123 (which defines naming standards for | |
16 | host names) except that a node name can't be entirely numeric. | |
17 | ||
18 | Most of the examples in this section are using simple indexed node sets, | |
19 | however, *nodeset* supports multidimensional node sets, like *dc[1-2]n[1-99]*, | |
20 | introduced in version 1.7 (see :ref:`class-RangeSetND` for more info). | |
21 | ||
22 | This section will guide you through the basics and also more advanced features | |
23 | of *nodeset*. | |
19 | 24 | |
20 | 25 | Usage basics |
21 | 26 | ^^^^^^^^^^^^ |
170 | 175 | |
171 | 176 | The ``-O, --output-format`` option can be used to format output results of |
172 | 177 | most *nodeset* commands. The string passed to this option is used as a base |
173 | format pattern applied to each result. The default format string is *"%s"*. | |
174 | Formatting is performed using the Python builtin string formatting operator, | |
175 | so you must use one format operator of the right type (*%s* is guaranteed to | |
176 | work in all cases). A simple example when using the fold command is shown | |
177 | below:: | |
178 | ||
179 | $ nodeset --output-format='%s-ipmi' -f node1 node2 node3 | |
180 | node[1-3]-ipmi | |
181 | ||
182 | Another output formatting example when using the expand command:: | |
178 | format pattern applied to each node or each result (depending on the command | |
179 | and other options requested). The default format string is *"%s"*. Formatting | |
180 | is performed using the Python builtin string formatting operator, so you must | |
181 | use one format operator of the right type (*%s* is guaranteed to work in all | |
182 | cases). Here is an output formatting example when using the expand command:: | |
183 | 183 | |
184 | 184 | $ nodeset --output-format='%s-ipmi' -e node[1-2]x[1-2] |
185 | 185 | node1x1-ipmi node1x2-ipmi node2x1-ipmi node2x2-ipmi |
186 | 186 | |
187 | Output formatting and separator may be combined when using the expand | |
188 | command:: | |
187 | Output formatting and separator combined can be useful when using the expand | |
188 | command, as shown here:: | |
189 | 189 | |
190 | 190 | $ nodeset -O '%s-ipmi' -S '\n' -e node[1-2]x[1-2] |
191 | 191 | node1x1-ipmi |
192 | 192 | node1x2-ipmi |
193 | 193 | node2x1-ipmi |
194 | 194 | node2x2-ipmi |
195 | ||
196 | When using the output formatting option along with the folding command, the | |
197 | format is applied to each node but the result is still folded:: | |
198 | ||
199 | $ nodeset -O '%s-ipmi' -f mgmt1 mgmt2 login[1-4] | |
200 | login[1-4]-ipmi,mgmt[1-2]-ipmi | |
195 | 201 | |
196 | 202 | |
197 | 203 | .. _nodeset-stepping: |
415 | 421 | Special operations |
416 | 422 | ^^^^^^^^^^^^^^^^^^ |
417 | 423 | |
418 | Three special operations are currently available: node set slicing, splitting | |
419 | on a predefined node count and splitting non-contiguous subsets. There are all | |
420 | explained below. | |
424 | A few special operations are currently available: node set slicing, splitting | |
425 | on a predefined node count, splitting non-contiguous subsets, choosing fold | |
426 | axis (for multidimensional node sets) and picking N nodes randomly. They are | |
427 | all explained below. | |
421 | 428 | |
422 | 429 | Slicing |
423 | 430 | """"""" |
559 | 566 | $ nodeset --axis=-1 -f comp-[1-2]-[1-36],login-[1-2] |
560 | 567 | comp-1-[1-36],comp-2-[1-36],login-[1-2] |
561 | 568 | |
569 | .. _nodeset-pick: | |
570 | ||
571 | Picking N node(s) at random | |
572 | """"""""""""""""""""""""""" | |
573 | ||
574 | Use ``--pick`` with a maximum number of nodes you wish to pick randomly from | |
575 | the resulting node set (or from the resulting range set with ``-R``):: | |
576 | ||
577 | $ nodeset --pick=1 -f node11 node12 node13 | |
578 | node12 | |
579 | $ nodeset --pick=2 -f node11 node12 node13 | |
580 | node[11,13] | |
581 | ||
562 | 582 | |
563 | 583 | .. _nodeset-groups: |
564 | 584 |
6 | 6 | -------------------------------------------------- |
7 | 7 | |
8 | 8 | :Author: Stephane Thiell <sthiell@stanford.edu> |
9 | :Date: 2015-11-05 | |
9 | :Date: 2016-06-18 | |
10 | 10 | :Copyright: CeCILL-C V1 |
11 | :Version: 1.7 | |
11 | :Version: 1.7.2 | |
12 | 12 | :Manual section: 1 |
13 | 13 | :Manual group: ClusterShell User Manual |
14 | 14 |
6 | 6 | ------------------------------ |
7 | 7 | |
8 | 8 | :Author: Stephane Thiell, <sthiell@stanford.edu> |
9 | :Date: 2015-08-27 | |
9 | :Date: 2016-06-18 | |
10 | 10 | :Copyright: CeCILL-C V1 |
11 | :Version: 1.7 | |
11 | :Version: 1.7.2 | |
12 | 12 | :Manual section: 5 |
13 | 13 | :Manual group: ClusterShell User Manual |
14 | 14 |
6 | 6 | ----------------------------------- |
7 | 7 | |
8 | 8 | :Author: Stephane Thiell <sthiell@stanford.edu> |
9 | :Date: 2015-11-01 | |
9 | :Date: 2016-06-18 | |
10 | 10 | :Copyright: CeCILL-C V1 |
11 | :Version: 1.7 | |
11 | :Version: 1.7.2 | |
12 | 12 | :Manual section: 1 |
13 | 13 | :Manual group: ClusterShell User Manual |
14 | 14 | |
143 | 143 | --hostfile=FILE, --machinefile=FILE |
144 | 144 | path to a file containing a list of single hosts, node sets or node groups, separated by spaces and lines (may be specified multiple times, one per file) |
145 | 145 | --topology=FILE topology configuration file to use for tree mode |
146 | --pick=N pick N node(s) at random in nodeset | |
146 | 147 | |
147 | 148 | Output behaviour: |
148 | 149 | -q, --quiet be quiet, print essential output only |
180 | 181 | limit time for command to run on the node |
181 | 182 | -R WORKER, --worker=WORKER |
182 | 183 | worker name to use for connection (``exec``, ``ssh``, ``rsh``, ``pdsh``), default is ``ssh`` |
184 | --remote=REMOTE whether to enable remote execution: in tree mode, 'yes' forces connections to the leaf nodes for execution, 'no' establishes connections up to the leaf parent nodes for execution (default is 'yes') | |
183 | 185 | |
184 | 186 | For a short explanation of these options, see ``-h, --help``. |
185 | 187 |
6 | 6 | ----------------------------------------------- |
7 | 7 | |
8 | 8 | :Author: Stephane Thiell, <sthiell@stanford.edu> |
9 | :Date: 2015-11-06 | |
9 | :Date: 2016-06-18 | |
10 | 10 | :Copyright: CeCILL-C V1 |
11 | :Version: 1.7 | |
11 | :Version: 1.7.2 | |
12 | 12 | :Manual section: 5 |
13 | 13 | :Manual group: ClusterShell User Manual |
14 | 14 |
6 | 6 | ----------------------------------- |
7 | 7 | |
8 | 8 | :Author: Stephane Thiell <sthiell@stanford.edu> |
9 | :Date: 2015-11-05 | |
9 | :Date: 2016-06-18 | |
10 | 10 | :Copyright: CeCILL-C V1 |
11 | :Version: 1.7 | |
11 | :Version: 1.7.2 | |
12 | 12 | :Manual section: 1 |
13 | 13 | :Manual group: ClusterShell User Manual |
14 | 14 | |
24 | 24 | ``nodeset`` is an utility command provided with the ClusterShell library which |
25 | 25 | implements some features of ClusterShell's NodeSet and RangeSet Python classes. |
26 | 26 | It provides easy manipulation of 1D or nD-indexed cluster nodes and node |
27 | groups. | |
27 | groups and supports RFC 1123 (except that a node name can't be entirely numeric). | |
28 | 28 | |
29 | 29 | Also, ``nodeset`` is automatically bound to the library node group resolution |
30 | 30 | mechanism. Thus, it is especially useful to enhance cluster aware |
74 | 74 | --split=MAXSPLIT split result into a number of subsets |
75 | 75 | --contiguous split result into contiguous subsets (ie. for nodeset, subsets will contain nodes with same pattern name and a contiguous range of indexes, like foobar[1-100]; for rangeset, subsets with consists in contiguous index ranges)""" |
76 | 76 | --axis=RANGESET for nD nodesets, fold along provided axis only. Axis are indexed from 1 to n and can be specified here either using the rangeset syntax, eg. '1', '1-2', '1,3', or by a single negative number meaning that the indice is counted from the end. Because some nodesets may have several different dimensions, axis indices are silently truncated to fall in the allowed range. |
77 | --pick=N pick N node(s) at random in nodeset | |
77 | 78 | |
78 | 79 | |
79 | 80 | For a short explanation of these options, see ``-h, --help``. |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2007-2015) | |
2 | # Copyright CEA/DAM/DIF (2007-2016) | |
3 | 3 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
4 | 4 | # |
5 | 5 | # This file is part of the ClusterShell library. |
52 | 52 | import signal |
53 | 53 | import time |
54 | 54 | import threading |
55 | import random | |
55 | 56 | |
56 | 57 | from ClusterShell.Defaults import DEFAULTS, _load_workerclass |
57 | 58 | from ClusterShell.CLI.Config import ClushConfig, ClushConfigError |
596 | 597 | def _stdin_thread_start(stdin_port, display): |
597 | 598 | """Standard input reader thread entry point.""" |
598 | 599 | try: |
599 | # Note: read length should be larger and a multiple of 4096 for best | |
600 | # performance to avoid excessive unreg/register of writer fd in | |
601 | # engine; however, it shouldn't be too large. | |
602 | bufsize = 4096 * 8 | |
600 | # Note: read length should be as large as possible for performance | |
601 | # yet not too large to not introduce artificial latency. | |
602 | # 64k seems to be perfect with an openssh backend (they issue 64k | |
603 | # reads) ; could consider making it an option for e.g. gsissh. | |
604 | bufsize = 64 * 1024 | |
603 | 605 | # thread loop: blocking read stdin + send messages to specified |
604 | 606 | # port object |
605 | 607 | buf = sys.stdin.read(bufsize) |
624 | 626 | # Launch a dedicated thread to read stdin in blocking mode. Indeed stdin |
625 | 627 | # can be a file, so we cannot use a WorkerSimple here as polling on file |
626 | 628 | # may result in different behaviors depending on selected engine. |
627 | threading.Thread(None, _stdin_thread_start, args=(port, display)).start() | |
629 | stdin_thread = threading.Thread(None, _stdin_thread_start, args=(port, display)) | |
630 | # setDaemon because we're sometimes left with data that has been read and | |
631 | # ssh connection already closed. | |
632 | # Syntax for compat with Python < 2.6 | |
633 | stdin_thread.setDaemon(True) | |
634 | stdin_thread.start() | |
628 | 635 | |
629 | 636 | def run_command(task, cmd, ns, timeout, display, remote): |
630 | 637 | """ |
632 | 639 | results in a dshbak way when gathering is used. |
633 | 640 | """ |
634 | 641 | task.set_default("USER_running", True) |
635 | ||
636 | if display.verbosity >= VERB_VERB and task.topology: | |
637 | print Display.COLOR_RESULT_FMT % '-' * 15 | |
638 | print Display.COLOR_RESULT_FMT % task.topology, | |
639 | print Display.COLOR_RESULT_FMT % '-' * 15 | |
640 | 642 | |
641 | 643 | if (display.gather or display.line_mode) and ns is not None: |
642 | 644 | if display.gather and display.line_mode: |
668 | 670 | task.set_default("USER_running", True) |
669 | 671 | task.set_default("USER_copies", len(sources)) |
670 | 672 | |
671 | if display.verbosity >= VERB_VERB and task.topology: | |
672 | print Display.COLOR_RESULT_FMT % '-' * 15 | |
673 | print Display.COLOR_RESULT_FMT % task.topology, | |
674 | print Display.COLOR_RESULT_FMT % '-' * 15 | |
675 | ||
676 | 673 | copyhandler = CopyOutputHandler(display) |
677 | 674 | if display.verbosity in (VERB_STD, VERB_VERB): |
678 | 675 | copyhandler.runtimer_init(task, len(ns) * len(sources)) |
680 | 677 | # Sources check |
681 | 678 | for source in sources: |
682 | 679 | if not exists(source): |
683 | display.vprint_err(VERB_QUIET, "ERROR: file \"%s\" not found" % \ | |
684 | source) | |
680 | display.vprint_err(VERB_QUIET, | |
681 | 'ERROR: file "%s" not found' % source) | |
685 | 682 | clush_exit(1, task) |
686 | 683 | task.copy(source, dest, ns, handler=copyhandler, timeout=timeout, |
687 | 684 | preserve=preserve_flag) |
694 | 691 | |
695 | 692 | # Sanity checks |
696 | 693 | if not exists(dest): |
697 | display.vprint_err(VERB_QUIET, "ERROR: directory \"%s\" not found" % \ | |
698 | dest) | |
694 | display.vprint_err(VERB_QUIET, | |
695 | 'ERROR: directory "%s" not found' % dest) | |
699 | 696 | clush_exit(1, task) |
700 | 697 | if not isdir(dest): |
701 | display.vprint_err(VERB_QUIET, \ | |
702 | "ERROR: destination \"%s\" is not a directory" % dest) | |
698 | display.vprint_err(VERB_QUIET, | |
699 | 'ERROR: destination "%s" is not a directory' % dest) | |
703 | 700 | clush_exit(1, task) |
704 | 701 | |
705 | 702 | copyhandler = CopyOutputHandler(display, True) |
707 | 704 | copyhandler.runtimer_init(task, len(ns) * len(sources)) |
708 | 705 | for source in sources: |
709 | 706 | task.rcopy(source, dest, ns, handler=copyhandler, timeout=timeout, |
710 | preserve=preserve_flag) | |
707 | stderr=True, preserve=preserve_flag) | |
711 | 708 | task.resume() |
712 | 709 | |
713 | 710 | def set_fdlimit(fd_max, display): |
714 | 711 | """Make open file descriptors soft limit the max.""" |
715 | 712 | soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) |
716 | 713 | if hard < fd_max: |
717 | display.vprint(VERB_DEBUG, "Warning: Consider increasing max open " \ | |
718 | "files hard limit (%d)" % hard) | |
714 | msgfmt = 'Warning: fd_max set to %d but max open files hard limit is %d' | |
715 | display.vprint_err(VERB_VERB, msgfmt % (fd_max, hard)) | |
719 | 716 | rlim_max = min(hard, fd_max) |
720 | 717 | if soft != rlim_max: |
721 | display.vprint(VERB_DEBUG, "Modifying max open files soft limit: " \ | |
722 | "%d -> %d" % (soft, rlim_max)) | |
723 | resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_max, hard)) | |
718 | msgfmt = 'Changing max open files soft limit from %d to %d' | |
719 | display.vprint(VERB_DEBUG, msgfmt % (soft, rlim_max)) | |
720 | try: | |
721 | resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_max, hard)) | |
722 | except (ValueError, resource.error), exc: | |
723 | # Most probably the requested limit exceeds the system imposed limit | |
724 | msgfmt = 'Warning: Failed to set max open files limit to %d (%s)' | |
725 | display.vprint_err(VERB_VERB, msgfmt % (rlim_max, exc)) | |
724 | 726 | |
725 | 727 | def clush_exit(status, task=None): |
726 | 728 | """Exit script, flushing stdio buffers and stopping ClusterShell task.""" |
730 | 732 | task.join() |
731 | 733 | sys.exit(status) |
732 | 734 | else: |
735 | # Best effort cleanup if no task is set | |
733 | 736 | for stream in [sys.stdout, sys.stderr]: |
734 | stream.flush() | |
737 | try: | |
738 | stream.flush() | |
739 | except IOError: | |
740 | pass | |
735 | 741 | # Use os._exit to avoid threads cleanup |
736 | 742 | os._exit(status) |
737 | 743 | |
885 | 891 | if len(nodeset_base) < 1: |
886 | 892 | parser.error('No node to run on.') |
887 | 893 | |
894 | if options.pick and options.pick < len(nodeset_base): | |
895 | # convert to string for sample as nsiter() is slower for big | |
896 | # nodesets; and we assume options.pick will remain small-ish | |
897 | keep = random.sample(nodeset_base, options.pick) | |
898 | nodeset_base.intersection_update(','.join(keep)) | |
899 | if config.verbosity >= VERB_VERB: | |
900 | msg = "Picked random nodes: %s" % nodeset_base | |
901 | print Display.COLOR_RESULT_FMT % msg | |
902 | ||
888 | 903 | # Set open files limit. |
889 | 904 | set_fdlimit(config.fd_max, display) |
890 | 905 | |
958 | 973 | clush_exit(1, task) |
959 | 974 | |
960 | 975 | if options.topofile or task._default_tree_is_enabled(): |
961 | if config.verbosity >= VERB_VERB: | |
962 | print Display.COLOR_RESULT_FMT % "TREE MODE enabled" | |
963 | 976 | if options.topofile: |
964 | 977 | task.load_topology(options.topofile) |
978 | if config.verbosity >= VERB_VERB: | |
979 | roots = len(task.topology.root.nodeset) | |
980 | gws = task.topology.inner_node_count() - roots | |
981 | msg = "enabling tree topology (%d gateways)" % gws | |
982 | print >> sys.stderr, "clush: %s" % msg | |
965 | 983 | |
966 | 984 | if options.grooming_delay: |
967 | 985 | if config.verbosity >= VERB_VERB: |
968 | print Display.COLOR_RESULT_FMT % ("Grooming delay: %f" % \ | |
986 | msg = Display.COLOR_RESULT_FMT % ("Grooming delay: %f" % | |
969 | 987 | options.grooming_delay) |
988 | print >> sys.stderr, msg | |
970 | 989 | task.set_info("grooming_delay", options.grooming_delay) |
990 | elif options.rcopy: | |
991 | # By default, --rcopy should inhibit grooming | |
992 | task.set_info("grooming_delay", 0) | |
971 | 993 | |
972 | 994 | if config.ssh_user: |
973 | 995 | task.set_info("ssh_user", config.ssh_user) |
1031 | 1053 | config.command_timeout, |
1032 | 1054 | op)) |
1033 | 1055 | if not task.default("USER_interactive"): |
1056 | if display.verbosity >= VERB_DEBUG and task.topology: | |
1057 | print Display.COLOR_RESULT_FMT % '-' * 15 | |
1058 | print Display.COLOR_RESULT_FMT % task.topology, | |
1059 | print Display.COLOR_RESULT_FMT % '-' * 15 | |
1034 | 1060 | if options.copy: |
1035 | 1061 | run_copy(task, args, options.dest_path, nodeset_base, timeout, |
1036 | 1062 | options.preserve_flag, display) |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2010-2015) | |
2 | # Copyright CEA/DAM/DIF (2010-2016) | |
3 | 3 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
4 | 4 | # |
5 | 5 | # This file is part of the ClusterShell library. |
63 | 63 | "color": THREE_CHOICES[-1], # auto |
64 | 64 | "verbosity": "%d" % VERB_STD, |
65 | 65 | "node_count": "yes", |
66 | "fd_max": "16384"} | |
66 | "fd_max": "8192"} | |
67 | 67 | |
68 | 68 | def __init__(self, options, filename=None): |
69 | 69 | """Initialize ClushConfig object from corresponding |
47 | 47 | from ClusterShell.NodeSet import NodeSetExternalError, NodeSetParseError |
48 | 48 | from ClusterShell.NodeSet import RangeSetParseError |
49 | 49 | from ClusterShell.Topology import TopologyError |
50 | from ClusterShell.Worker.EngineClient import EngineClientError | |
50 | 51 | from ClusterShell.Worker.Worker import WorkerError |
51 | 52 | |
52 | 53 | GENERIC_ERRORS = (EngineNotSupportedError, |
54 | EngineClientError, | |
53 | 55 | NodeSetExternalError, |
54 | 56 | NodeSetParseError, |
55 | 57 | RangeSetParseError, |
68 | 70 | try: |
69 | 71 | raise excobj |
70 | 72 | except EngineNotSupportedError, exc: |
71 | print >> sys.stderr, "%s: I/O events engine '%s' not supported on " \ | |
72 | "this host" % (prog, exc.engineid) | |
73 | msgfmt = "%s: I/O events engine '%s' not supported on this host" | |
74 | print >> sys.stderr, msgfmt % (prog, exc.engineid) | |
75 | except EngineClientError, exc: | |
76 | print >> sys.stderr, "%s: EngineClientError: %s" % (prog, exc) | |
73 | 77 | except NodeSetExternalError, exc: |
74 | 78 | print >> sys.stderr, "%s: External error:" % prog, exc |
75 | 79 | except (NodeSetParseError, RangeSetParseError), exc: |
76 | 80 | print >> sys.stderr, "%s: Parse error:" % prog, exc |
77 | 81 | except GroupResolverIllegalCharError, exc: |
78 | print >> sys.stderr, "%s: Illegal group character: \"%s\"" % (prog, exc) | |
82 | print >> sys.stderr, '%s: Illegal group character: "%s"' % (prog, exc) | |
79 | 83 | except GroupResolverSourceError, exc: |
80 | print >> sys.stderr, "%s: Unknown group source: \"%s\"" % (prog, exc) | |
84 | print >> sys.stderr, '%s: Unknown group source: "%s"' % (prog, exc) | |
81 | 85 | except GroupSourceNoUpcall, exc: |
82 | print >> sys.stderr, "%s: No %s upcall defined for group " \ | |
83 | "source \"%s\"" % (prog, exc, exc.group_source.name) | |
86 | msgfmt = '%s: No %s upcall defined for group source "%s"' | |
87 | print >> sys.stderr, msgfmt % (prog, exc, exc.group_source.name) | |
84 | 88 | except GroupSourceError, exc: |
85 | 89 | print >> sys.stderr, "%s: Group error:" % prog, exc |
86 | 90 | except TopologyError, exc: |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2008, 2009, 2010, 2011, 2012) | |
3 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
2 | # Copyright CEA/DAM/DIF (2008-2016) | |
3 | # Contributor: Stephane THIELL <sthiell@stanford.edu> | |
4 | 4 | # |
5 | 5 | # This file is part of the ClusterShell library. |
6 | 6 | # |
38 | 38 | and RangeSet classes. |
39 | 39 | """ |
40 | 40 | |
41 | import logging | |
41 | 42 | import math |
42 | 43 | import sys |
44 | import random | |
43 | 45 | |
44 | 46 | from ClusterShell.CLI.Error import GENERIC_ERRORS, handle_generic_error |
45 | 47 | from ClusterShell.CLI.OptionParser import OptionParser |
169 | 171 | group_resolver = std_group_resolver() |
170 | 172 | |
171 | 173 | if options.debug: |
172 | group_resolver.set_verbosity(1) | |
174 | logging.basicConfig(level=logging.DEBUG) | |
173 | 175 | |
174 | 176 | # Check for command presence |
175 | 177 | cmdcount = int(options.count) + int(options.expand) + \ |
283 | 285 | if options.axis: |
284 | 286 | if not options.axis.startswith('-'): |
285 | 287 | # axis are 1-indexed in nodeset CLI (0 ignored) |
286 | xset.fold_axis = tuple(x - 1 for x in RangeSet(options.axis) if x > 0) | |
288 | xset.fold_axis = tuple(x-1 for x in RangeSet(options.axis) if x > 0) | |
287 | 289 | else: |
288 | 290 | # negative axis index (only single number supported) |
289 | 291 | xset.fold_axis = [int(options.axis)] |
292 | ||
293 | if options.pick and options.pick < len(xset): | |
294 | # convert to string for sample as nsiter() is slower for big | |
295 | # nodesets; and we assume options.pick will remain small-ish | |
296 | keep = random.sample(xset, options.pick) | |
297 | # explicit class_set creation and str() convertion for RangeSet | |
298 | keep = class_set(','.join([str(x) for x in keep])) | |
299 | xset.intersection_update(keep) | |
290 | 300 | |
291 | 301 | fmt = options.output_format # default to '%s' |
292 | 302 | |
294 | 304 | if options.expand: |
295 | 305 | xsubres = lambda x: separator.join((fmt % s for s in x.striter())) |
296 | 306 | elif options.fold: |
297 | xsubres = lambda x: fmt % x | |
307 | # Special case when folding using NodeSet and format is set (#277) | |
308 | if class_set is NodeSet and fmt != '%s': | |
309 | # Create a new set after format has been applied to each node | |
310 | xset = class_set._fromlist1((fmt % xnodestr for xnodestr in xset), | |
311 | autostep=xset.autostep) | |
312 | xsubres = lambda x: x | |
313 | else: | |
314 | xsubres = lambda x: fmt % x | |
298 | 315 | elif options.regroup: |
299 | 316 | xsubres = lambda x: fmt % x.regroup(options.groupsource, |
300 | 317 | noprefix=options.groupbase) |
127 | 127 | default=None, metavar='FILE', |
128 | 128 | help="topology configuration file to use for tree " |
129 | 129 | "mode") |
130 | optgrp.add_option("--pick", action="store", dest="pick", | |
131 | metavar="N", type="int", | |
132 | help="pick N node(s) at random in nodeset") | |
130 | 133 | self.add_option_group(optgrp) |
131 | 134 | |
132 | 135 | def install_display_options(self, |
334 | 337 | optgrp.add_option("--axis", action="store", dest="axis", |
335 | 338 | metavar="RANGESET", help="fold along these axis only " |
336 | 339 | "(axis 1..n for nD nodeset)") |
337 | self.add_option_group(optgrp) | |
340 | optgrp.add_option("--pick", action="store", dest="pick", | |
341 | metavar="N", type="int", | |
342 | help="pick N node(s) at random in nodeset") | |
343 | self.add_option_group(optgrp) |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2010-2015) | |
2 | # Copyright CEA/DAM/DIF (2010-2016) | |
3 | 3 | # Contributor: Henri DOREAU <henri.doreau@cea.fr> |
4 | 4 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
5 | 5 | # |
55 | 55 | import cPickle |
56 | 56 | import base64 |
57 | 57 | import logging |
58 | import os | |
58 | 59 | import xml.sax |
59 | 60 | |
60 | 61 | from xml.sax.handler import ContentHandler |
68 | 69 | from ClusterShell.Event import EventHandler |
69 | 70 | |
70 | 71 | |
72 | # XML character encoding | |
71 | 73 | ENCODING = 'utf-8' |
74 | ||
75 | # See Message.data_encode() | |
76 | DEFAULT_B64_LINE_LENGTH = 65536 | |
72 | 77 | |
73 | 78 | |
74 | 79 | class MessageProcessingError(Exception): |
224 | 229 | self._close() |
225 | 230 | return |
226 | 231 | except MessageProcessingError, ex: |
232 | self.logger.error("MessageProcessingError: %s", ex) | |
227 | 233 | if self.error_response: |
228 | 234 | self.send(ErrorMessage(str(ex))) |
229 | 235 | self._close() |
267 | 273 | |
268 | 274 | def data_encode(self, inst): |
269 | 275 | """serialize an instance and store the result""" |
270 | self.data = base64.encodestring(cPickle.dumps(inst)) | |
276 | # Base64 transfer encoding for MIME mandates a fixed line length | |
277 | # of 76 characters, which is way too small for our per-line ev_read | |
278 | # mechanism. So use b64encode() here instead of encodestring(). | |
279 | encoded = base64.b64encode(cPickle.dumps(inst)) | |
280 | ||
281 | # We now follow relaxed RFC-4648 for base64, but we still add some | |
282 | # newlines to very long lines to avoid memory pressure (eg. --rcopy). | |
283 | # In RFC-4648, CRLF characters constitute "non-alphabet characters" | |
284 | # and are ignored. | |
285 | line_length = int(os.environ.get('CLUSTERSHELL_GW_B64_LINE_LENGTH', | |
286 | DEFAULT_B64_LINE_LENGTH)) | |
287 | self.data = '\n'.join(encoded[pos:pos+line_length] | |
288 | for pos in xrange(0, len(encoded), line_length)) | |
271 | 289 | |
272 | 290 | def data_decode(self): |
273 | 291 | """deserialize a previously encoded instance and return it""" |
274 | 292 | # if self.data is None then an exception is raised here |
275 | 293 | try: |
276 | return cPickle.loads(base64.decodestring(self.data)) | |
294 | return cPickle.loads(base64.b64decode(self.data)) | |
277 | 295 | except (EOFError, TypeError): |
278 | 296 | # raised by cPickle.loads() if self.data is not valid |
279 | 297 | raise MessageProcessingError('Message %s has an invalid payload' |
0 | 0 | # |
1 | # Copyright 2015 Stephane Thiell <sthiell@stanford.edu> | |
1 | # Copyright 2015-2016 Stephane Thiell <sthiell@stanford.edu> | |
2 | 2 | # |
3 | 3 | # This file is part of the ClusterShell library. |
4 | 4 | # |
62 | 62 | """ |
63 | 63 | modname = "ClusterShell.Worker.%s" % workername.capitalize() |
64 | 64 | |
65 | # Import module if not yet loaded | |
66 | if modname.lower() not in [mod.lower() for mod in sys.modules]: | |
65 | # Do not iterate over sys.modules but use .keys() to avoid RuntimeError | |
66 | if modname.lower() not in [mod.lower() for mod in sys.modules.keys()]: | |
67 | # Import module if not yet loaded | |
67 | 68 | __import__(modname) |
68 | 69 | |
69 | 70 | # Get the class pointer |
79 | 80 | |
80 | 81 | def config_paths(config_name): |
81 | 82 | """Return default path list for a ClusterShell config file name.""" |
82 | return [# system-wide config file | |
83 | '/etc/clustershell/%s' % config_name, | |
83 | return ['/etc/clustershell/%s' % config_name, # system-wide config file | |
84 | 84 | # default pip --user config file |
85 | 85 | os.path.expanduser('~/.local/etc/clustershell/%s' % config_name), |
86 | 86 | # per-user config (top override) |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2007-2015) | |
2 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
1 | # Copyright CEA/DAM/DIF (2007-2016) | |
2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> | |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
5 | 5 | # |
43 | 43 | import time |
44 | 44 | import traceback |
45 | 45 | |
46 | ||
47 | LOGGER = logging.getLogger(__name__) | |
48 | ||
46 | 49 | # Engine client fd I/O event interest bits |
47 | 50 | E_READ = 0x1 |
48 | 51 | E_WRITE = 0x2 |
49 | 52 | |
50 | 53 | # Define epsilon value for time float arithmetic operations |
51 | 54 | EPSILON = 1.0e-3 |
55 | ||
52 | 56 | |
53 | 57 | class EngineException(Exception): |
54 | 58 | """ |
218 | 222 | # Just print a debug message that could help detect issues |
219 | 223 | # coming from a long-running timer handler. |
220 | 224 | if self.fire_date < time_current: |
221 | logging.getLogger(__name__).debug( | |
222 | "Warning: passed interval time for %r (long running " | |
223 | "event handler?)", self.client) | |
225 | LOGGER.debug("Warning: passed interval time for %r " | |
226 | "(long running event handler?)", self.client) | |
224 | 227 | |
225 | 228 | def disarm(self): |
226 | 229 | client = self.client |
420 | 423 | if client._reg_epoch < self._current_loopcnt: |
421 | 424 | return client, stream |
422 | 425 | else: |
423 | self._debug("ENGINE _fd2client: ignoring just re-used FD %d" \ | |
424 | % stream.fd) | |
426 | LOGGER.debug("_fd2client: ignoring just re-used FD %d", | |
427 | stream.fd) | |
425 | 428 | return (None, None) |
426 | 429 | |
427 | 430 | def add(self, client): |
471 | 474 | needed read flush as needed. If no more retainable stream |
472 | 475 | remains for this client, this method automatically removes the |
473 | 476 | entire client from engine. |
474 | """ | |
477 | ||
478 | This function does nothing if the stream is not registered. | |
479 | """ | |
480 | if stream.fd not in self.reg_clifds: | |
481 | LOGGER.debug("remove_stream: %s not registered", stream) | |
482 | return | |
483 | ||
475 | 484 | self.unregister_stream(client, stream) |
485 | ||
476 | 486 | # _close_stream() will flush pending read buffers so may generate events |
477 | 487 | client._close_stream(stream.name) |
488 | ||
478 | 489 | # client may have been removed by previous events, if not check whether |
479 | 490 | # some retained streams still remain |
480 | 491 | if client in self._clients and not client.streams.retained(): |
595 | 606 | (stream.new_events, stream.events, client, stream.name)) |
596 | 607 | |
597 | 608 | if not client.registered: |
598 | logging.getLogger(__name__).debug( \ | |
599 | "set_events: client %s not registered" % self) | |
609 | LOGGER.debug("set_events: client %s not registered", self) | |
600 | 610 | return |
601 | 611 | |
602 | 612 | chgbits = stream.new_events ^ stream.events |
704 | 714 | # BaseException. For now, print a backtrace in debug to |
705 | 715 | # help detect the problem. |
706 | 716 | tbexc = traceback.format_exception(exc_t, exc_val, exc_tb) |
707 | logging.getLogger(__name__).debug(''.join(tbexc)) | |
717 | LOGGER.debug(''.join(tbexc)) | |
708 | 718 | raise |
709 | 719 | raise |
710 | 720 | finally: |
746 | 756 | return not self.running and self._exited |
747 | 757 | |
748 | 758 | def _debug(self, s): |
749 | """library engine debugging hook""" | |
750 | #logging.getLogger(__name__).debug(s) | |
759 | """library engine verbose debugging hook""" | |
760 | #LOGGER.debug(s) | |
751 | 761 | pass |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2009-2014) | |
2 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
1 | # Copyright CEA/DAM/DIF (2009-2016) | |
2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> | |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
5 | 5 | # |
34 | 34 | version of Python and Operating System. |
35 | 35 | """ |
36 | 36 | |
37 | import sys | |
37 | import logging | |
38 | 38 | |
39 | 39 | from ClusterShell.Engine.Engine import EngineNotSupportedError |
40 | 40 | |
43 | 43 | from ClusterShell.Engine.Poll import EnginePoll |
44 | 44 | from ClusterShell.Engine.Select import EngineSelect |
45 | 45 | |
46 | ||
46 | 47 | class PreferredEngine(object): |
47 | 48 | """ |
48 | 49 | Preferred Engine selection metaclass (DP Abstract Factory). |
49 | 50 | """ |
50 | 51 | |
51 | engines = { EngineEPoll.identifier: EngineEPoll, | |
52 | EnginePoll.identifier: EnginePoll, | |
53 | EngineSelect.identifier: EngineSelect } | |
52 | engines = {EngineEPoll.identifier: EngineEPoll, | |
53 | EnginePoll.identifier: EnginePoll, | |
54 | EngineSelect.identifier: EngineSelect} | |
54 | 55 | |
55 | 56 | def __new__(cls, hint, info): |
56 | 57 | """ |
58 | 59 | """ |
59 | 60 | if not hint or hint == 'auto': |
60 | 61 | # in order or preference |
61 | for engine_class in [ EngineEPoll, EnginePoll, EngineSelect ]: | |
62 | for engine_class in [EngineEPoll, EnginePoll, EngineSelect]: | |
62 | 63 | try: |
63 | 64 | return engine_class(info) |
64 | 65 | except EngineNotSupportedError: |
76 | 77 | if len(engines) == 0: |
77 | 78 | raise |
78 | 79 | tryengine = engines.popitem()[1] |
79 | except KeyError, exc: | |
80 | print >> sys.stderr, "Invalid engine identifier", exc | |
80 | except KeyError: | |
81 | msg = "Invalid engine identifier: %s" % hint | |
82 | logging.getLogger(__name__).error(msg) | |
81 | 83 | raise |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2007-2015) | |
2 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
1 | # Copyright CEA/DAM/DIF (2007-2016) | |
2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> | |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
5 | 5 | # |
36 | 36 | """ |
37 | 37 | |
38 | 38 | import errno |
39 | import logging | |
39 | 40 | import select |
40 | import sys | |
41 | 41 | import time |
42 | 42 | |
43 | 43 | from ClusterShell.Engine.Engine import Engine, E_READ, E_WRITE |
85 | 85 | def _modify_specific(self, fd, event, setvalue): |
86 | 86 | """ |
87 | 87 | Engine-specific modifications after a interesting event change for |
88 | a file descriptor. Called automatically by Engine register/unregister and | |
89 | set_events(). For the poll() engine, it reg/unreg or modifies the event mask | |
90 | associated to a file descriptor. | |
88 | a file descriptor. Called automatically by Engine register/unregister | |
89 | and set_events(). For the poll() engine, it reg/unreg or modifies the | |
90 | event mask associated to a file descriptor. | |
91 | 91 | """ |
92 | 92 | self._debug("MODSPEC fd=%d event=%x setvalue=%d" % (fd, event, |
93 | 93 | setvalue)) |
127 | 127 | if ex_errno == errno.EINTR: |
128 | 128 | continue |
129 | 129 | elif ex_errno == errno.EINVAL: |
130 | print >> sys.stderr, \ | |
131 | "EnginePoll: please increase RLIMIT_NOFILE" | |
130 | msg = "Increase RLIMIT_NOFILE?" | |
131 | logging.getLogger(__name__).error(msg) | |
132 | 132 | raise |
133 | 133 | |
134 | 134 | for fd, event in evlist: |
151 | 151 | if event & select.POLLERR: |
152 | 152 | self._debug("POLLERR %s" % client) |
153 | 153 | assert fdev & E_WRITE |
154 | self._debug("POLLERR: remove_stream sname %s fdev 0x%x" % (sname, fdev)) | |
154 | self._debug("POLLERR: remove_stream sname %s fdev 0x%x" | |
155 | % (sname, fdev)) | |
155 | 156 | self.remove_stream(client, stream) |
156 | 157 | self._current_stream = None |
157 | 158 | continue |
172 | 173 | # or check for end of stream (do not handle both at the same |
173 | 174 | # time because handle_read() may perform a partial read) |
174 | 175 | elif event & select.POLLHUP: |
175 | self._debug("POLLHUP fd=%d %s (%s)" % (fd, | |
176 | client.__class__.__name__, client.streams)) | |
176 | self._debug("POLLHUP fd=%d %s (%s)" % | |
177 | (fd, client.__class__.__name__, client.streams)) | |
177 | 178 | self.remove_stream(client, stream) |
178 | 179 | self._current_stream = None |
179 | 180 | continue |
180 | 181 | |
181 | 182 | # check for writing |
182 | 183 | if event & select.POLLOUT: |
183 | self._debug("POLLOUT fd=%d %s (%s)" % (fd, | |
184 | client.__class__.__name__, client.streams)) | |
184 | self._debug("POLLOUT fd=%d %s (%s)" % | |
185 | (fd, client.__class__.__name__, client.streams)) | |
185 | 186 | assert fdev == E_WRITE |
186 | 187 | assert stream.events & fdev |
187 | 188 | self.modify(client, sname, 0, fdev) |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2009-2015) | |
1 | # Copyright CEA/DAM/DIF (2009-2016) | |
2 | 2 | # Contributors: |
3 | 3 | # Henri DOREAU <henri.doreau@cea.fr> |
4 | 4 | # Aurelien DEGREMONT <aurelien.degremont@cea.fr> |
5 | # Stephane THIELL <stephane.thiell@cea.fr> | |
5 | # Stephane THIELL <sthiell@stanford.edu> | |
6 | 6 | # |
7 | 7 | # This file is part of the ClusterShell library. |
8 | 8 | # |
134 | 134 | if ex_errno == errno.EINTR: |
135 | 135 | continue |
136 | 136 | elif ex_errno in [errno.EINVAL, errno.EBADF, errno.ENOMEM]: |
137 | print >> sys.stderr, "EngineSelect: %s" % ex_strerror | |
137 | msg = "Increase RLIMIT_NOFILE?" | |
138 | logging.getLogger(__name__).error(msg) | |
138 | 139 | raise |
139 | 140 | |
140 | 141 | # iterate over fd on which events occured |
187 | 188 | # process clients timeout |
188 | 189 | self.fire_timers() |
189 | 190 | |
190 | self._debug("LOOP EXIT evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % \ | |
191 | (self.evlooprefcnt, self.reg_clifds, len(self.timerq))) | |
192 | ||
191 | self._debug("LOOP EXIT evlooprefcnt=%d (reg_clifds=%s) (timers=%d)" % | |
192 | (self.evlooprefcnt, self.reg_clifds, len(self.timerq))) |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2010-2015) | |
2 | # Copyright CEA/DAM/DIF (2010-2016) | |
3 | 3 | # Contributor: Henri DOREAU <henri.doreau@cea.fr> |
4 | 4 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
5 | 5 | # |
70 | 70 | |
71 | 71 | class WorkerTreeResponder(EventHandler): |
72 | 72 | """Gateway WorkerTree handler""" |
73 | ||
73 | 74 | def __init__(self, task, gwchan, srcwkr): |
74 | 75 | EventHandler.__init__(self) |
75 | 76 | self.gwchan = gwchan # gateway channel |
76 | 77 | self.srcwkr = srcwkr # id of distant parent WorkerTree |
77 | 78 | self.worker = None # local WorkerTree instance |
78 | # For messages grooming | |
79 | self.retcodes = {} # self-managed retcodes | |
80 | self.logger = logging.getLogger(__name__) | |
81 | ||
82 | # Grooming initialization | |
83 | self.timer = None | |
79 | 84 | qdelay = task.info("grooming_delay") |
80 | self.timer = task.timer(qdelay, self, qdelay, autoclose=True) | |
81 | self.logger = logging.getLogger(__name__) | |
82 | self.logger.debug("WorkerTreeResponder: initialized") | |
83 | # self-managed retcodes | |
84 | self.retcodes = {} | |
85 | if qdelay > 1.0e-3: | |
86 | # Enable messages and rc grooming - enable msgtree (#181) | |
87 | task.set_default("stdout_msgtree", True) | |
88 | task.set_default("stderr_msgtree", True) | |
89 | # create auto-closing timer object for grooming | |
90 | self.timer = task.timer(qdelay, self, qdelay, autoclose=True) | |
91 | ||
92 | self.logger.debug("WorkerTreeResponder initialized grooming=%f", qdelay) | |
85 | 93 | |
86 | 94 | def ev_start(self, worker): |
87 | 95 | self.logger.debug("WorkerTreeResponder: ev_start") |
95 | 103 | |
96 | 104 | # check for grooming opportunities for stdout/stderr |
97 | 105 | for msg_elem, nodes in self.worker.iter_errors(): |
98 | logger.debug("iter(stderr): %s: %d bytes" % \ | |
99 | (nodes, len(msg_elem.message()))) | |
100 | self.gwchan.send(StdErrMessage(nodes, msg_elem.message(), \ | |
106 | logger.debug("iter(stderr): %s: %d bytes", nodes, | |
107 | len(msg_elem.message())) | |
108 | self.gwchan.send(StdErrMessage(nodes, msg_elem.message(), | |
101 | 109 | self.srcwkr)) |
102 | 110 | for msg_elem, nodes in self.worker.iter_buffers(): |
103 | logger.debug("iter(stdout): %s: %d bytes" % \ | |
104 | (nodes, len(msg_elem.message()))) | |
105 | self.gwchan.send(StdOutMessage(nodes, msg_elem.message(), \ | |
111 | logger.debug("iter(stdout): %s: %d bytes", nodes, | |
112 | len(msg_elem.message())) | |
113 | self.gwchan.send(StdOutMessage(nodes, msg_elem.message(), | |
106 | 114 | self.srcwkr)) |
107 | 115 | # empty internal MsgTree buffers |
108 | 116 | self.worker.flush_buffers() |
112 | 120 | # retcodes to parent node, instead of doing it at ev_hup (no msg |
113 | 121 | # aggregation) or at ev_close (no parent node live updates) |
114 | 122 | for rc, nodes in self.retcodes.iteritems(): |
115 | self.logger.debug("iter(rc): %s: rc=%d" % (nodes, rc)) | |
123 | self.logger.debug("iter(rc): %s: rc=%d", nodes, rc) | |
116 | 124 | self.gwchan.send(RetcodeMessage(nodes, rc, self.srcwkr)) |
117 | 125 | self.retcodes.clear() |
118 | 126 | |
127 | def ev_read(self, worker): | |
128 | """message received on stdout""" | |
129 | if self.timer is None: | |
130 | self.gwchan.send(StdOutMessage(worker.current_node, | |
131 | worker.current_msg, | |
132 | self.srcwkr)) | |
133 | ||
119 | 134 | def ev_error(self, worker): |
120 | self.logger.debug("WorkerTreeResponder: ev_error %s" % \ | |
121 | worker.current_errmsg) | |
135 | """message received on stderr""" | |
136 | self.logger.debug("WorkerTreeResponder: ev_error %s %s", | |
137 | worker.current_node, | |
138 | worker.current_errmsg) | |
139 | if self.timer is None: | |
140 | self.gwchan.send(StdErrMessage(worker.current_node, | |
141 | worker.current_errmsg, | |
142 | self.srcwkr)) | |
122 | 143 | |
123 | 144 | def ev_timeout(self, worker): |
124 | 145 | """Received timeout event: some nodes did timeout""" |
125 | self.gwchan.send(TimeoutMessage( \ | |
126 | NodeSet._fromlist1(worker.iter_keys_timeout()), self.srcwkr)) | |
146 | msg = TimeoutMessage(NodeSet._fromlist1(worker.iter_keys_timeout()), | |
147 | self.srcwkr) | |
148 | self.gwchan.send(msg) | |
127 | 149 | |
128 | 150 | def ev_hup(self, worker): |
129 | 151 | """Received end of command from one node""" |
130 | if worker.current_rc in self.retcodes: | |
131 | self.retcodes[worker.current_rc].add(worker.current_node) | |
152 | if self.timer is None: | |
153 | self.gwchan.send(RetcodeMessage(worker.current_node, | |
154 | worker.current_rc, | |
155 | self.srcwkr)) | |
132 | 156 | else: |
133 | self.retcodes[worker.current_rc] = NodeSet(worker.current_node) | |
157 | # retcode grooming | |
158 | if worker.current_rc in self.retcodes: | |
159 | self.retcodes[worker.current_rc].add(worker.current_node) | |
160 | else: | |
161 | self.retcodes[worker.current_rc] = NodeSet(worker.current_node) | |
134 | 162 | |
135 | 163 | def ev_close(self, worker): |
136 | 164 | """End of CTL responder""" |
137 | 165 | self.logger.debug("WorkerTreeResponder: ev_close") |
138 | # finalize grooming | |
139 | self.ev_timer(None) | |
140 | self.timer.invalidate() | |
166 | if self.timer is not None: | |
167 | # finalize grooming | |
168 | self.ev_timer(None) | |
169 | self.timer.invalidate() | |
141 | 170 | |
142 | 171 | |
143 | 172 | class GatewayChannel(Channel): |
144 | 173 | """high level logic for gateways""" |
145 | 174 | def __init__(self, task): |
146 | """ | |
147 | """ | |
148 | 175 | Channel.__init__(self, error_response=True) |
149 | 176 | self.task = task |
150 | 177 | self.nodename = None |
221 | 248 | # topology |
222 | 249 | task_self().topology = self.topology = msg.data_decode() |
223 | 250 | self.logger.debug('decoded propagation tree') |
224 | self.logger.debug('\n%s' % self.topology) | |
251 | self.logger.debug('\n%s', self.topology) | |
225 | 252 | self.setup = True |
226 | 253 | self._ack(msg) |
227 | 254 | |
272 | 299 | self._ack(msg) |
273 | 300 | elif msg.action == 'write': |
274 | 301 | data = msg.data_decode() |
275 | self.logger.debug('GatewayChannel write: %d bytes', \ | |
302 | self.logger.debug('GatewayChannel write: %d bytes', | |
276 | 303 | len(data['buf'])) |
277 | 304 | self.propagation.write(data['buf']) |
278 | 305 | self._ack(msg) |
313 | 340 | sys.excepthook = gateway_excepthook |
314 | 341 | |
315 | 342 | logger.debug('Starting gateway on %s', host) |
316 | logger.debug("environ=%s" % os.environ) | |
343 | logger.debug("environ=%s", os.environ) | |
317 | 344 | |
318 | 345 | |
319 | 346 | set_nonblock_flag(sys.stdin.fileno()) |
322 | 349 | |
323 | 350 | task = task_self() |
324 | 351 | |
325 | # Pre-enable MsgTree buffering on gateway (FIXME) | |
326 | task.set_default("stdout_msgtree", True) | |
327 | task.set_default("stderr_msgtree", True) | |
352 | # Disable MsgTree buffering, it is enabled later when needed | |
353 | task.set_default("stdout_msgtree", False) | |
354 | task.set_default("stderr_msgtree", False) | |
328 | 355 | |
329 | 356 | if sys.stdin.isatty(): |
330 | 357 | logger.critical('Gateway failure: sys.stdin.isatty() is True') |
344 | 371 | except EngineAbortException, exc: |
345 | 372 | logger.debug('EngineAbortException') |
346 | 373 | except IOError, exc: |
347 | logger.debug('Broken pipe (%s)' % exc) | |
374 | logger.debug('Broken pipe (%s)', exc) | |
348 | 375 | raise |
349 | 376 | except Exception, exc: |
350 | logger.exception('Gateway failure: %s' % exc) | |
377 | logger.exception('Gateway failure: %s', exc) | |
351 | 378 | logger.debug('-------- The End --------') |
352 | 379 | |
353 | 380 | if __name__ == '__main__': |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2007-2015) | |
1 | # Copyright CEA/DAM/DIF (2007-2016) | |
2 | 2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
3 | 3 | # Contributor: Aurelien DEGREMONT <aurelien.degremont@cea.fr> |
4 | 4 | # |
216 | 216 | |
217 | 217 | def nsiter(self): |
218 | 218 | """Object-based NodeSet iterator on single nodes.""" |
219 | for pat, ivec, pad, autostep in self._iter(): | |
219 | for pat, ivec, pads, autostep in self._iter(): | |
220 | 220 | nodeset = self.__class__() |
221 | 221 | if ivec is not None: |
222 | 222 | if len(ivec) == 1: |
223 | nodeset._add_new(pat, \ | |
224 | RangeSet.fromone(ivec[0], pad[0] or 0)) | |
223 | pad = pads[0] or 0 | |
224 | nodeset._add_new(pat, RangeSet.fromone(ivec[0], pad)) | |
225 | 225 | else: |
226 | nodeset._add_new(pat, RangeSetND([ivec], None, autostep)) | |
226 | nodeset._add_new(pat, RangeSetND([ivec], pads, autostep)) | |
227 | 227 | else: |
228 | 228 | nodeset._add_new(pat, None) |
229 | 229 | yield nodeset |
921 | 921 | |
922 | 922 | def _scan_string_single(self, nsstr, autostep): |
923 | 923 | """Single node scan, returns (pat, list of rangesets)""" |
924 | if len(nsstr) == 0: | |
925 | raise NodeSetParseError(nsstr, "empty node name") | |
926 | ||
927 | 924 | # single node parsing |
928 | 925 | pfx_nd = [mobj.groups() for mobj in self.base_node_re.finditer(nsstr)] |
929 | 926 | pfx_nd = pfx_nd[:-1] |
963 | 960 | def _scan_string(self, nsstr, autostep): |
964 | 961 | """Parsing engine's string scanner method (iterator).""" |
965 | 962 | next_op_code = 'update' |
966 | while nsstr is not None: | |
963 | while nsstr: | |
967 | 964 | # Ignore whitespace(s) for convenience |
968 | 965 | nsstr = nsstr.lstrip() |
969 | 966 | |
1004 | 1001 | |
1005 | 1002 | pfxlen, sfxlen = len(pfx), len(sfx) |
1006 | 1003 | |
1007 | # pfx + sfx cannot be empty | |
1008 | if pfxlen + sfxlen == 0: | |
1009 | raise NodeSetParseError(nsstr, "empty node name") | |
1010 | ||
1011 | 1004 | if sfxlen > 0: |
1012 | 1005 | # amending trailing digits generates /steps |
1013 | 1006 | sfx, rng = self._amend_trailing_digits(sfx, rng) |
1015 | 1008 | if pfxlen > 0: |
1016 | 1009 | # this method supports /steps |
1017 | 1010 | pfx, rng = self._amend_leading_digits(pfx, rng) |
1018 | ||
1019 | # scan pfx as a single node (no bracket) | |
1020 | pfx, pfxrvec = self._scan_string_single(pfx, autostep) | |
1021 | rsets += pfxrvec | |
1011 | if pfx: | |
1012 | # scan any nonempty pfx as a single node (no bracket) | |
1013 | pfx, pfxrvec = self._scan_string_single(pfx, autostep) | |
1014 | rsets += pfxrvec | |
1022 | 1015 | |
1023 | 1016 | # readahead for sanity check |
1024 | 1017 | bracket_idx = sfx.find(self.BRACKET_OPEN, |
1030 | 1023 | raise NodeSetParseError(sfx, "empty node name before") |
1031 | 1024 | |
1032 | 1025 | if len(sfx) > 0 and sfx[0] == '[': |
1033 | raise NodeSetParseError(sfx, | |
1034 | "illegal reopening bracket") | |
1026 | msg = "illegal reopening bracket" | |
1027 | raise NodeSetParseError(sfx, msg) | |
1035 | 1028 | |
1036 | 1029 | newpat += "%s%%s" % pfx |
1037 | 1030 | try: |
1038 | 1031 | rsets.append(RangeSet(rng, autostep)) |
1039 | 1032 | except RangeSetParseError, ex: |
1040 | 1033 | raise NodeSetParseRangeError(ex) |
1034 | ||
1035 | # the following test forbids fully numeric nodeset | |
1036 | if len(pfx) + len(sfx) == 0: | |
1037 | msg = "fully numeric nodeset" | |
1038 | raise NodeSetParseError(nsstr, msg) | |
1041 | 1039 | |
1042 | 1040 | # Check if we have a next op-separated node or pattern |
1043 | 1041 | op_idx, next_op_code = self._next_op(sfx) |
1057 | 1055 | sfx, sfxrvec = self._scan_string_single(sfx, autostep) |
1058 | 1056 | newpat += sfx |
1059 | 1057 | rsets += sfxrvec |
1060 | ||
1061 | # pfx + sfx cannot be empty | |
1062 | if len(newpat) == 0: | |
1063 | raise NodeSetParseError(nsstr, "empty node name") | |
1064 | ||
1065 | 1058 | else: |
1066 | 1059 | # In this case, either there is no comma and no bracket, |
1067 | 1060 | # or the bracket is after the comma, then just return |
1252 | 1245 | """Class method that returns a new NodeSet with all nodes from optional |
1253 | 1246 | groupsource.""" |
1254 | 1247 | inst = NodeSet(autostep=autostep, resolver=resolver) |
1255 | if not inst._resolver: | |
1256 | raise NodeSetExternalError("No node group resolver") | |
1257 | # Fill this nodeset with all nodes found by resolver | |
1258 | inst.updaten(inst._parser.all_nodes(groupsource)) | |
1248 | try: | |
1249 | if not inst._resolver: | |
1250 | raise NodeSetExternalError("Group resolver is not defined") | |
1251 | else: | |
1252 | # fill this nodeset with all nodes found by resolver | |
1253 | inst.updaten(inst._parser.all_nodes(groupsource)) | |
1254 | except NodeUtils.GroupResolverError, exc: | |
1255 | errmsg = "Group source error (%s: %s)" % (exc.__class__.__name__, | |
1256 | exc) | |
1257 | raise NodeSetExternalError(errmsg) | |
1259 | 1258 | return inst |
1260 | 1259 | |
1261 | 1260 | def __getstate__(self): |
0 | # | |
1 | # Copyright CEA/DAM/DIF (2007-2015) | |
2 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
3 | # Contributor: Aurelien DEGREMONT <aurelien.degremont@cea.fr> | |
4 | # | |
5 | # This file is part of the ClusterShell library. | |
6 | # | |
7 | # This software is governed by the CeCILL-C license under French law and | |
8 | # abiding by the rules of distribution of free software. You can use, | |
9 | # modify and/ or redistribute the software under the terms of the CeCILL-C | |
10 | # license as circulated by CEA, CNRS and INRIA at the following URL | |
11 | # "http://www.cecill.info". | |
12 | # | |
13 | # As a counterpart to the access to the source code and rights to copy, | |
14 | # modify and redistribute granted by the license, users are provided only | |
15 | # with a limited warranty and the software's author, the holder of the | |
16 | # economic rights, and the successive licensors have only limited | |
17 | # liability. | |
18 | # | |
19 | # In this respect, the user's attention is drawn to the risks associated | |
20 | # with loading, using, modifying and/or developing or reproducing the | |
21 | # software by the user in light of its specific status of free software, | |
22 | # that may mean that it is complicated to manipulate, and that also | |
23 | # therefore means that it is reserved for developers and experienced | |
24 | # professionals having in-depth computer knowledge. Users are therefore | |
25 | # encouraged to load and test the software's suitability as regards their | |
26 | # requirements in conditions enabling the security of their systems and/or | |
27 | # data to be ensured and, more generally, to use and operate it in the | |
28 | # same conditions as regards security. | |
29 | # | |
30 | # The fact that you are presently reading this means that you have had | |
31 | # knowledge of the CeCILL-C license and that you accept its terms. | |
32 | ||
33 | """ | |
34 | Cluster node set module. | |
35 | ||
36 | A module to efficiently deal with node sets and node groups. | |
37 | Instances of NodeSet provide similar operations than the builtin set() type, | |
38 | see http://www.python.org/doc/lib/set-objects.html | |
39 | ||
40 | Usage example | |
41 | ============= | |
42 | >>> # Import NodeSet class | |
43 | ... from ClusterShell.NodeSet import NodeSet | |
44 | >>> | |
45 | >>> # Create a new nodeset from string | |
46 | ... nodeset = NodeSet("cluster[1-30]") | |
47 | >>> # Add cluster32 to nodeset | |
48 | ... nodeset.update("cluster32") | |
49 | >>> # Remove from nodeset | |
50 | ... nodeset.difference_update("cluster[2-5,8-31]") | |
51 | >>> # Print nodeset as a pdsh-like pattern | |
52 | ... print nodeset | |
53 | cluster[1,6-7,32] | |
54 | >>> # Iterate over node names in nodeset | |
55 | ... for node in nodeset: | |
56 | ... print node | |
57 | cluster1 | |
58 | cluster6 | |
59 | cluster7 | |
60 | cluster32 | |
61 | """ | |
62 | ||
63 | import re | |
64 | import sys | |
65 | ||
66 | import ClusterShell.NodeUtils as NodeUtils | |
67 | ||
68 | # Import all RangeSet module public objects | |
69 | from ClusterShell.RangeSet import RangeSet, RangeSetND, AUTOSTEP_DISABLED | |
70 | from ClusterShell.RangeSet import RangeSetException, RangeSetParseError | |
71 | from ClusterShell.RangeSet import RangeSetPaddingError | |
72 | ||
73 | ||
74 | # Define default GroupResolver object used by NodeSet | |
75 | DEF_GROUPS_CONFIG = "/etc/clustershell/groups.conf" | |
76 | ILLEGAL_GROUP_CHARS = set("@,!&^*") | |
77 | _DEF_RESOLVER_STD_GROUP = NodeUtils.GroupResolverConfig(DEF_GROUPS_CONFIG, \ | |
78 | ILLEGAL_GROUP_CHARS) | |
79 | # Standard group resolver | |
80 | RESOLVER_STD_GROUP = _DEF_RESOLVER_STD_GROUP | |
81 | # Special constants for NodeSet's resolver parameter | |
82 | # RESOLVER_NOGROUP => avoid any group resolution at all | |
83 | # RESOLVER_NOINIT => reserved use for optimized copy() | |
84 | RESOLVER_NOGROUP = -1 | |
85 | RESOLVER_NOINIT = -2 | |
86 | # 1.5 compat (deprecated) | |
87 | STD_GROUP_RESOLVER = RESOLVER_STD_GROUP | |
88 | NOGROUP_RESOLVER = RESOLVER_NOGROUP | |
89 | ||
90 | ||
91 | class NodeSetException(Exception): | |
92 | """Base NodeSet exception class.""" | |
93 | ||
94 | class NodeSetError(NodeSetException): | |
95 | """Raised when an error is encountered.""" | |
96 | ||
97 | class NodeSetParseError(NodeSetError): | |
98 | """Raised when NodeSet parsing cannot be done properly.""" | |
99 | def __init__(self, part, msg): | |
100 | if part: | |
101 | msg = "%s : \"%s\"" % (msg, part) | |
102 | NodeSetError.__init__(self, msg) | |
103 | # faulty part; this allows you to target the error | |
104 | self.part = part | |
105 | ||
106 | class NodeSetParseRangeError(NodeSetParseError): | |
107 | """Raised when bad range is encountered during NodeSet parsing.""" | |
108 | def __init__(self, rset_exc): | |
109 | NodeSetParseError.__init__(self, str(rset_exc), "bad range") | |
110 | ||
111 | class NodeSetExternalError(NodeSetError): | |
112 | """Raised when an external error is encountered.""" | |
113 | ||
114 | ||
115 | class NodeSetBase(object): | |
116 | """ | |
117 | Base class for NodeSet. | |
118 | ||
119 | This class allows node set base object creation from specified string | |
120 | pattern and rangeset object. If optional copy_rangeset boolean flag is | |
121 | set to True (default), provided rangeset object is copied (if needed), | |
122 | otherwise it may be referenced (should be seen as an ownership transfer | |
123 | upon creation). | |
124 | ||
125 | This class implements core node set arithmetics (no string parsing here). | |
126 | ||
127 | Example: | |
128 | >>> nsb = NodeSetBase('node%s-ipmi', RangeSet('1-5,7'), False) | |
129 | >>> str(nsb) | |
130 | 'node[1-5,7]-ipmi' | |
131 | >>> nsb = NodeSetBase('node%s-ib%s', RangeSetND([['1-5,7', '1-2']]), False) | |
132 | >>> str(nsb) | |
133 | 'node[1-5,7]-ib[1-2]' | |
134 | """ | |
135 | def __init__(self, pattern=None, rangeset=None, copy_rangeset=True, | |
136 | autostep=None): | |
137 | """New NodeSetBase object initializer""" | |
138 | self.fold_axis = None | |
139 | self._autostep = autostep | |
140 | self._length = 0 | |
141 | self._patterns = {} | |
142 | if pattern: | |
143 | self._add(pattern, rangeset, copy_rangeset) | |
144 | elif rangeset: | |
145 | raise ValueError("missing pattern") | |
146 | ||
147 | def get_autostep(self): | |
148 | """Get autostep value (property)""" | |
149 | return self._autostep | |
150 | ||
151 | def set_autostep(self, val): | |
152 | """Set autostep value (property)""" | |
153 | if val is None: | |
154 | self._autostep = None | |
155 | else: | |
156 | # Work around the pickling issue of sys.maxint (+inf) in py2.4 | |
157 | self._autostep = min(int(val), AUTOSTEP_DISABLED) | |
158 | ||
159 | # Update our RangeSet/RangeSetND objects | |
160 | for pat, rset in self._patterns.iteritems(): | |
161 | if rset: | |
162 | rset.autostep = self._autostep | |
163 | ||
164 | autostep = property(get_autostep, set_autostep) | |
165 | ||
166 | def _iter(self): | |
167 | """Iterator on internal item tuples | |
168 | (pattern, indexes, padding, autostep).""" | |
169 | for pat, rset in sorted(self._patterns.iteritems()): | |
170 | if rset: | |
171 | autostep = rset.autostep | |
172 | if rset.dim() == 1: | |
173 | assert isinstance(rset, RangeSet) | |
174 | padding = rset.padding | |
175 | for idx in rset: | |
176 | yield pat, (idx,), (padding,), autostep | |
177 | else: | |
178 | for args, padding in rset.iter_padding(): | |
179 | yield pat, args, padding, autostep | |
180 | else: | |
181 | yield pat, None, None, None | |
182 | ||
183 | def _iterbase(self): | |
184 | """Iterator on single, one-item NodeSetBase objects.""" | |
185 | for pat, ivec, pad, autostep in self._iter(): | |
186 | rset = None # 'no node index' by default | |
187 | if ivec is not None: | |
188 | assert len(ivec) > 0 | |
189 | if len(ivec) == 1: | |
190 | rset = RangeSet.fromone(ivec[0], pad[0] or 0, autostep) | |
191 | else: | |
192 | rset = RangeSetND([ivec], pad, autostep) | |
193 | yield NodeSetBase(pat, rset) | |
194 | ||
195 | def __iter__(self): | |
196 | """Iterator on single nodes as string.""" | |
197 | # Does not call self._iterbase() + str() for better performance. | |
198 | for pat, ivec, pads, _ in self._iter(): | |
199 | if ivec is not None: | |
200 | # For performance reasons, add a special case for 1D RangeSet | |
201 | if len(ivec) == 1: | |
202 | yield pat % ("%0*d" % (pads[0] or 0, ivec[0])) | |
203 | else: | |
204 | yield pat % tuple(["%0*d" % (pad or 0, i) \ | |
205 | for pad, i in zip(pads, ivec)]) | |
206 | else: | |
207 | yield pat % () | |
208 | ||
209 | # define striter() alias for convenience (to match RangeSet.striter()) | |
210 | striter = __iter__ | |
211 | ||
212 | # define nsiter() as an object-based iterator that could be used for | |
213 | # __iter__() in the future... | |
214 | ||
215 | def nsiter(self): | |
216 | """Object-based NodeSet iterator on single nodes.""" | |
217 | for pat, ivec, pad, autostep in self._iter(): | |
218 | nodeset = self.__class__() | |
219 | if ivec is not None: | |
220 | if len(ivec) == 1: | |
221 | nodeset._add_new(pat, \ | |
222 | RangeSet.fromone(ivec[0], pad[0] or 0)) | |
223 | else: | |
224 | nodeset._add_new(pat, RangeSetND([ivec], None, autostep)) | |
225 | else: | |
226 | nodeset._add_new(pat, None) | |
227 | yield nodeset | |
228 | ||
229 | def contiguous(self): | |
230 | """Object-based NodeSet iterator on contiguous node sets. | |
231 | ||
232 | Contiguous node set contains nodes with same pattern name and a | |
233 | contiguous range of indexes, like foobar[1-100].""" | |
234 | for pat, rangeset in sorted(self._patterns.iteritems()): | |
235 | if rangeset: | |
236 | for cont_rset in rangeset.contiguous(): | |
237 | nodeset = self.__class__() | |
238 | nodeset._add_new(pat, cont_rset) | |
239 | yield nodeset | |
240 | else: | |
241 | nodeset = self.__class__() | |
242 | nodeset._add_new(pat, None) | |
243 | yield nodeset | |
244 | ||
245 | def __len__(self): | |
246 | """Get the number of nodes in NodeSet.""" | |
247 | cnt = 0 | |
248 | for rangeset in self._patterns.itervalues(): | |
249 | if rangeset: | |
250 | cnt += len(rangeset) | |
251 | else: | |
252 | cnt += 1 | |
253 | return cnt | |
254 | ||
255 | def __str__(self): | |
256 | """Get ranges-based pattern of node list.""" | |
257 | results = [] | |
258 | try: | |
259 | for pat, rset in sorted(self._patterns.iteritems()): | |
260 | if not rset: | |
261 | results.append(pat % ()) | |
262 | elif rset.dim() == 1: | |
263 | rgs = str(rset) | |
264 | cnt = len(rset) | |
265 | if cnt > 1: | |
266 | rgs = "[%s]" % rgs | |
267 | results.append(pat % rgs) | |
268 | elif rset.dim() > 1: | |
269 | if self.fold_axis is not None: | |
270 | try: | |
271 | # user provided fold axis list | |
272 | fold_axis = list(self.fold_axis) | |
273 | except TypeError: | |
274 | # user provided fold axis max count | |
275 | max_dim = min(int(self.fold_axis), rset.dim()) | |
276 | ||
277 | # the following codeblock finds the best n fold axis ... | |
278 | ||
279 | # create a matrix of rangeset length to select best axes | |
280 | lenmat = [[len(rg) for rg in rgvec] for rgvec in rset.vectors()] | |
281 | # sum columns | |
282 | colsumvec = [sum(colitem) for colitem in zip(*lenmat)] | |
283 | # get max_dim most used axes | |
284 | # NOTE: could use heapq.nlargest() with py2.5+ | |
285 | fold_axis = sorted(range(len(colsumvec)), | |
286 | key=lambda k: colsumvec[k], | |
287 | reverse=True)[0:max_dim] | |
288 | ||
289 | # cast NodeSet to string... | |
290 | for rgvec in rset.vectors(): | |
291 | ||
292 | #print "rgvec", rgvec | |
293 | ||
294 | rgnargs = [] | |
295 | i = 0 | |
296 | for rangeset in rgvec: | |
297 | ||
298 | expand = self.fold_axis is not None and i not in fold_axis | |
299 | ||
300 | i += 1 | |
301 | #print "i=%d" % i | |
302 | ||
303 | cnt = len(rangeset) | |
304 | if cnt > 1: | |
305 | #print "cnt > 1" | |
306 | if expand: | |
307 | new_rgnargs = [] | |
308 | #print "expand" | |
309 | for idx in rangeset.striter(): | |
310 | if rgnargs: | |
311 | for rga in rgnargs: | |
312 | new_rgnargs.append(rga + [idx]) | |
313 | else: | |
314 | new_rgnargs.append([idx]) | |
315 | else: | |
316 | new_rgnargs = [] | |
317 | if rgnargs: | |
318 | for rga in rgnargs: | |
319 | new_rgnargs.append(rga + ["[%s]" % rangeset]) | |
320 | else: | |
321 | new_rgnargs.append(["[%s]" % rangeset]) | |
322 | else: | |
323 | #print "cnt == 1" | |
324 | new_rgnargs = [] | |
325 | if rgnargs: | |
326 | for rga in rgnargs: | |
327 | new_rgnargs.append(rga + [str(rangeset)]) | |
328 | else: | |
329 | new_rgnargs.append([str(rangeset)]) | |
330 | rgnargs = list(new_rgnargs) | |
331 | #print "rgnargs", rgnargs | |
332 | for rgargs in rgnargs: | |
333 | #print "append", rgargs | |
334 | results.append(pat % tuple(rgargs)) | |
335 | #print "results", results | |
336 | except TypeError: | |
337 | raise | |
338 | raise NodeSetParseError(pat, "Internal error: " \ | |
339 | "node pattern and ranges mismatch") | |
340 | return ",".join(results) | |
341 | ||
342 | def copy(self): | |
343 | """Return a shallow copy.""" | |
344 | cpy = self.__class__() | |
345 | cpy._autostep = self._autostep | |
346 | cpy._length = self._length | |
347 | dic = {} | |
348 | for pat, rangeset in self._patterns.iteritems(): | |
349 | if rangeset is None: | |
350 | dic[pat] = None | |
351 | else: | |
352 | dic[pat] = rangeset.copy() | |
353 | cpy._patterns = dic | |
354 | return cpy | |
355 | ||
356 | def __contains__(self, other): | |
357 | """Is node contained in NodeSet ?""" | |
358 | return self.issuperset(other) | |
359 | ||
360 | def _binary_sanity_check(self, other): | |
361 | # check that the other argument to a binary operation is also | |
362 | # a NodeSet, raising a TypeError otherwise. | |
363 | if not isinstance(other, NodeSetBase): | |
364 | raise TypeError, \ | |
365 | "Binary operation only permitted between NodeSetBase" | |
366 | ||
367 | def issubset(self, other): | |
368 | """Report whether another nodeset contains this nodeset.""" | |
369 | self._binary_sanity_check(other) | |
370 | return other.issuperset(self) | |
371 | ||
372 | def issuperset(self, other): | |
373 | """Report whether this nodeset contains another nodeset.""" | |
374 | self._binary_sanity_check(other) | |
375 | status = True | |
376 | for pat, erangeset in other._patterns.iteritems(): | |
377 | rangeset = self._patterns.get(pat) | |
378 | if rangeset: | |
379 | status = rangeset.issuperset(erangeset) | |
380 | else: | |
381 | # might be an unnumbered node (key in dict but no value) | |
382 | status = self._patterns.has_key(pat) | |
383 | if not status: | |
384 | break | |
385 | return status | |
386 | ||
387 | def __eq__(self, other): | |
388 | """NodeSet equality comparison.""" | |
389 | # See comment for for RangeSet.__eq__() | |
390 | if not isinstance(other, NodeSetBase): | |
391 | return NotImplemented | |
392 | return len(self) == len(other) and self.issuperset(other) | |
393 | ||
394 | # inequality comparisons using the is-subset relation | |
395 | __le__ = issubset | |
396 | __ge__ = issuperset | |
397 | ||
398 | def __lt__(self, other): | |
399 | """x.__lt__(y) <==> x<y""" | |
400 | self._binary_sanity_check(other) | |
401 | return len(self) < len(other) and self.issubset(other) | |
402 | ||
403 | def __gt__(self, other): | |
404 | """x.__gt__(y) <==> x>y""" | |
405 | self._binary_sanity_check(other) | |
406 | return len(self) > len(other) and self.issuperset(other) | |
407 | ||
408 | def _extractslice(self, index): | |
409 | """Private utility function: extract slice parameters from slice object | |
410 | `index` for an list-like object of size `length`.""" | |
411 | length = len(self) | |
412 | if index.start is None: | |
413 | sl_start = 0 | |
414 | elif index.start < 0: | |
415 | sl_start = max(0, length + index.start) | |
416 | else: | |
417 | sl_start = index.start | |
418 | if index.stop is None: | |
419 | sl_stop = sys.maxint | |
420 | elif index.stop < 0: | |
421 | sl_stop = max(0, length + index.stop) | |
422 | else: | |
423 | sl_stop = index.stop | |
424 | if index.step is None: | |
425 | sl_step = 1 | |
426 | elif index.step < 0: | |
427 | # We support negative step slicing with no start/stop, ie. r[::-n]. | |
428 | if index.start is not None or index.stop is not None: | |
429 | raise IndexError, \ | |
430 | "illegal start and stop when negative step is used" | |
431 | # As RangeSet elements are ordered internally, adjust sl_start | |
432 | # to fake backward stepping in case of negative slice step. | |
433 | stepmod = (length + -index.step - 1) % -index.step | |
434 | if stepmod > 0: | |
435 | sl_start += stepmod | |
436 | sl_step = -index.step | |
437 | else: | |
438 | sl_step = index.step | |
439 | if not isinstance(sl_start, int) or not isinstance(sl_stop, int) \ | |
440 | or not isinstance(sl_step, int): | |
441 | raise TypeError, "slice indices must be integers" | |
442 | return sl_start, sl_stop, sl_step | |
443 | ||
444 | def __getitem__(self, index): | |
445 | """Return the node at specified index or a subnodeset when a slice is | |
446 | specified.""" | |
447 | if isinstance(index, slice): | |
448 | inst = NodeSetBase() | |
449 | sl_start, sl_stop, sl_step = self._extractslice(index) | |
450 | sl_next = sl_start | |
451 | if sl_stop <= sl_next: | |
452 | return inst | |
453 | length = 0 | |
454 | for pat, rangeset in sorted(self._patterns.iteritems()): | |
455 | if rangeset: | |
456 | cnt = len(rangeset) | |
457 | offset = sl_next - length | |
458 | if offset < cnt: | |
459 | num = min(sl_stop - sl_next, cnt - offset) | |
460 | inst._add(pat, rangeset[offset:offset + num:sl_step]) | |
461 | else: | |
462 | #skip until sl_next is reached | |
463 | length += cnt | |
464 | continue | |
465 | else: | |
466 | cnt = num = 1 | |
467 | if sl_next > length: | |
468 | length += cnt | |
469 | continue | |
470 | inst._add(pat, None) | |
471 | # adjust sl_next... | |
472 | sl_next += num | |
473 | if (sl_next - sl_start) % sl_step: | |
474 | sl_next = sl_start + \ | |
475 | ((sl_next - sl_start)/sl_step + 1) * sl_step | |
476 | if sl_next >= sl_stop: | |
477 | break | |
478 | length += cnt | |
479 | return inst | |
480 | elif isinstance(index, int): | |
481 | if index < 0: | |
482 | length = len(self) | |
483 | if index >= -length: | |
484 | index = length + index # - -index | |
485 | else: | |
486 | raise IndexError, "%d out of range" % index | |
487 | length = 0 | |
488 | for pat, rangeset in sorted(self._patterns.iteritems()): | |
489 | if rangeset: | |
490 | cnt = len(rangeset) | |
491 | if index < length + cnt: | |
492 | # return a subrangeset of size 1 to manage padding | |
493 | if rangeset.dim() == 1: | |
494 | return pat % rangeset[index-length:index-length+1] | |
495 | else: | |
496 | sub = rangeset[index-length:index-length+1] | |
497 | for rgvec in sub.vectors(): | |
498 | return pat % (tuple(rgvec)) | |
499 | else: | |
500 | cnt = 1 | |
501 | if index == length: | |
502 | return pat | |
503 | length += cnt | |
504 | raise IndexError, "%d out of range" % index | |
505 | else: | |
506 | raise TypeError, "NodeSet indices must be integers" | |
507 | ||
508 | def _add_new(self, pat, rangeset): | |
509 | """Add nodes from a (pat, rangeset) tuple. | |
510 | Predicate: pattern does not exist in current set. RangeSet object is | |
511 | referenced (not copied).""" | |
512 | assert pat not in self._patterns | |
513 | self._patterns[pat] = rangeset | |
514 | ||
515 | def _add(self, pat, rangeset, copy_rangeset=True): | |
516 | """Add nodes from a (pat, rangeset) tuple. | |
517 | `pat' may be an existing pattern and `rangeset' may be None. | |
518 | RangeSet or RangeSetND objects are copied if re-used internally | |
519 | when provided and if copy_rangeset flag is set. | |
520 | """ | |
521 | if pat in self._patterns: | |
522 | # existing pattern: get RangeSet or RangeSetND entry... | |
523 | pat_e = self._patterns[pat] | |
524 | # sanity checks | |
525 | if (pat_e is None) is not (rangeset is None): | |
526 | raise NodeSetError("Invalid operation") | |
527 | # entry may exist but set to None (single node) | |
528 | if pat_e: | |
529 | pat_e.update(rangeset) | |
530 | else: | |
531 | # new pattern... | |
532 | if rangeset and copy_rangeset: | |
533 | # default is to inherit rangeset autostep value | |
534 | rangeset = rangeset.copy() | |
535 | # but if set, self._autostep does override it | |
536 | if self._autostep is not None: | |
537 | # works with rangeset 1D or nD | |
538 | rangeset.autostep = self._autostep | |
539 | self._add_new(pat, rangeset) | |
540 | ||
541 | def union(self, other): | |
542 | """ | |
543 | s.union(t) returns a new set with elements from both s and t. | |
544 | """ | |
545 | self_copy = self.copy() | |
546 | self_copy.update(other) | |
547 | return self_copy | |
548 | ||
549 | def __or__(self, other): | |
550 | """ | |
551 | Implements the | operator. So s | t returns a new nodeset with | |
552 | elements from both s and t. | |
553 | """ | |
554 | if not isinstance(other, NodeSetBase): | |
555 | return NotImplemented | |
556 | return self.union(other) | |
557 | ||
558 | def add(self, other): | |
559 | """ | |
560 | Add node to NodeSet. | |
561 | """ | |
562 | self.update(other) | |
563 | ||
564 | def update(self, other): | |
565 | """ | |
566 | s.update(t) returns nodeset s with elements added from t. | |
567 | """ | |
568 | for pat, rangeset in other._patterns.iteritems(): | |
569 | self._add(pat, rangeset) | |
570 | ||
571 | def updaten(self, others): | |
572 | """ | |
573 | s.updaten(list) returns nodeset s with elements added from given list. | |
574 | """ | |
575 | for other in others: | |
576 | self.update(other) | |
577 | ||
578 | def clear(self): | |
579 | """ | |
580 | Remove all nodes from this nodeset. | |
581 | """ | |
582 | self._patterns.clear() | |
583 | ||
584 | def __ior__(self, other): | |
585 | """ | |
586 | Implements the |= operator. So s |= t returns nodeset s with | |
587 | elements added from t. (Python version 2.5+ required) | |
588 | """ | |
589 | self._binary_sanity_check(other) | |
590 | self.update(other) | |
591 | return self | |
592 | ||
593 | def intersection(self, other): | |
594 | """ | |
595 | s.intersection(t) returns a new set with elements common to s | |
596 | and t. | |
597 | """ | |
598 | self_copy = self.copy() | |
599 | self_copy.intersection_update(other) | |
600 | return self_copy | |
601 | ||
602 | def __and__(self, other): | |
603 | """ | |
604 | Implements the & operator. So s & t returns a new nodeset with | |
605 | elements common to s and t. | |
606 | """ | |
607 | if not isinstance(other, NodeSet): | |
608 | return NotImplemented | |
609 | return self.intersection(other) | |
610 | ||
611 | def intersection_update(self, other): | |
612 | """ | |
613 | s.intersection_update(t) returns nodeset s keeping only | |
614 | elements also found in t. | |
615 | """ | |
616 | if other is self: | |
617 | return | |
618 | ||
619 | tmp_ns = NodeSetBase() | |
620 | ||
621 | for pat, irangeset in other._patterns.iteritems(): | |
622 | rangeset = self._patterns.get(pat) | |
623 | if rangeset: | |
624 | irset = rangeset.intersection(irangeset) | |
625 | # ignore pattern if empty rangeset | |
626 | if len(irset) > 0: | |
627 | tmp_ns._add(pat, irset, copy_rangeset=False) | |
628 | elif not irangeset and pat in self._patterns: | |
629 | # intersect two nodes with no rangeset | |
630 | tmp_ns._add(pat, None) | |
631 | ||
632 | # Substitute | |
633 | self._patterns = tmp_ns._patterns | |
634 | ||
635 | def __iand__(self, other): | |
636 | """ | |
637 | Implements the &= operator. So s &= t returns nodeset s keeping | |
638 | only elements also found in t. (Python version 2.5+ required) | |
639 | """ | |
640 | self._binary_sanity_check(other) | |
641 | self.intersection_update(other) | |
642 | return self | |
643 | ||
644 | def difference(self, other): | |
645 | """ | |
646 | s.difference(t) returns a new NodeSet with elements in s but not | |
647 | in t. | |
648 | """ | |
649 | self_copy = self.copy() | |
650 | self_copy.difference_update(other) | |
651 | return self_copy | |
652 | ||
653 | def __sub__(self, other): | |
654 | """ | |
655 | Implement the - operator. So s - t returns a new nodeset with | |
656 | elements in s but not in t. | |
657 | """ | |
658 | if not isinstance(other, NodeSetBase): | |
659 | return NotImplemented | |
660 | return self.difference(other) | |
661 | ||
662 | def difference_update(self, other, strict=False): | |
663 | """ | |
664 | s.difference_update(t) returns nodeset s after removing | |
665 | elements found in t. If strict is True, raise KeyError | |
666 | if an element cannot be removed. | |
667 | """ | |
668 | # the purge of each empty pattern is done afterward to allow self = ns | |
669 | purge_patterns = [] | |
670 | ||
671 | # iterate first over exclude nodeset rangesets which is usually smaller | |
672 | for pat, erangeset in other._patterns.iteritems(): | |
673 | # if pattern is found, deal with it | |
674 | rangeset = self._patterns.get(pat) | |
675 | if rangeset: | |
676 | # sub rangeset, raise KeyError if not found | |
677 | rangeset.difference_update(erangeset, strict) | |
678 | ||
679 | # check if no range left and add pattern to purge list | |
680 | if len(rangeset) == 0: | |
681 | purge_patterns.append(pat) | |
682 | else: | |
683 | # unnumbered node exclusion | |
684 | if self._patterns.has_key(pat): | |
685 | purge_patterns.append(pat) | |
686 | elif strict: | |
687 | raise KeyError, pat | |
688 | ||
689 | for pat in purge_patterns: | |
690 | del self._patterns[pat] | |
691 | ||
692 | def __isub__(self, other): | |
693 | """ | |
694 | Implement the -= operator. So s -= t returns nodeset s after | |
695 | removing elements found in t. (Python version 2.5+ required) | |
696 | """ | |
697 | self._binary_sanity_check(other) | |
698 | self.difference_update(other) | |
699 | return self | |
700 | ||
701 | def remove(self, elem): | |
702 | """ | |
703 | Remove element elem from the nodeset. Raise KeyError if elem | |
704 | is not contained in the nodeset. | |
705 | """ | |
706 | self.difference_update(elem, True) | |
707 | ||
708 | def symmetric_difference(self, other): | |
709 | """ | |
710 | s.symmetric_difference(t) returns the symmetric difference of | |
711 | two nodesets as a new NodeSet. | |
712 | ||
713 | (ie. all nodes that are in exactly one of the nodesets.) | |
714 | """ | |
715 | self_copy = self.copy() | |
716 | self_copy.symmetric_difference_update(other) | |
717 | return self_copy | |
718 | ||
719 | def __xor__(self, other): | |
720 | """ | |
721 | Implement the ^ operator. So s ^ t returns a new NodeSet with | |
722 | nodes that are in exactly one of the nodesets. | |
723 | """ | |
724 | if not isinstance(other, NodeSet): | |
725 | return NotImplemented | |
726 | return self.symmetric_difference(other) | |
727 | ||
728 | def symmetric_difference_update(self, other): | |
729 | """ | |
730 | s.symmetric_difference_update(t) returns nodeset s keeping all | |
731 | nodes that are in exactly one of the nodesets. | |
732 | """ | |
733 | purge_patterns = [] | |
734 | ||
735 | # iterate over our rangesets | |
736 | for pat, rangeset in self._patterns.iteritems(): | |
737 | brangeset = other._patterns.get(pat) | |
738 | if brangeset: | |
739 | rangeset.symmetric_difference_update(brangeset) | |
740 | else: | |
741 | if other._patterns.has_key(pat): | |
742 | purge_patterns.append(pat) | |
743 | ||
744 | # iterate over other's rangesets | |
745 | for pat, brangeset in other._patterns.iteritems(): | |
746 | rangeset = self._patterns.get(pat) | |
747 | if not rangeset and not pat in self._patterns: | |
748 | self._add(pat, brangeset) | |
749 | ||
750 | # check for patterns cleanup | |
751 | for pat, rangeset in self._patterns.iteritems(): | |
752 | if rangeset is not None and len(rangeset) == 0: | |
753 | purge_patterns.append(pat) | |
754 | ||
755 | # cleanup | |
756 | for pat in purge_patterns: | |
757 | del self._patterns[pat] | |
758 | ||
759 | def __ixor__(self, other): | |
760 | """ | |
761 | Implement the ^= operator. So s ^= t returns nodeset s after | |
762 | keeping all nodes that are in exactly one of the nodesets. | |
763 | (Python version 2.5+ required) | |
764 | """ | |
765 | self._binary_sanity_check(other) | |
766 | self.symmetric_difference_update(other) | |
767 | return self | |
768 | ||
769 | ||
770 | class ParsingEngine(object): | |
771 | """ | |
772 | Class that is able to transform a source into a NodeSetBase. | |
773 | """ | |
774 | OP_CODES = { 'update': ',', | |
775 | 'difference_update': '!', | |
776 | 'intersection_update': '&', | |
777 | 'symmetric_difference_update': '^' } | |
778 | ||
779 | BRACKET_OPEN = '[' | |
780 | BRACKET_CLOSE = ']' | |
781 | ||
782 | def __init__(self, group_resolver): | |
783 | """ | |
784 | Initialize Parsing Engine. | |
785 | """ | |
786 | self.group_resolver = group_resolver | |
787 | self.base_node_re = re.compile("(\D*)(\d*)") | |
788 | ||
789 | def parse(self, nsobj, autostep): | |
790 | """ | |
791 | Parse provided object if possible and return a NodeSetBase object. | |
792 | """ | |
793 | # passing None is supported | |
794 | if nsobj is None: | |
795 | return NodeSetBase() | |
796 | ||
797 | # is nsobj a NodeSetBase instance? | |
798 | if isinstance(nsobj, NodeSetBase): | |
799 | return nsobj | |
800 | ||
801 | # or is nsobj a string? | |
802 | if type(nsobj) is str: | |
803 | try: | |
804 | return self.parse_string(str(nsobj), autostep) | |
805 | except (NodeUtils.GroupSourceQueryFailed, RuntimeError), exc: | |
806 | raise NodeSetParseError(nsobj, str(exc)) | |
807 | ||
808 | raise TypeError("Unsupported NodeSet input %s" % type(nsobj)) | |
809 | ||
810 | def parse_string(self, nsstr, autostep, namespace=None): | |
811 | """Parse provided string in optional namespace. | |
812 | ||
813 | This method parses string, resolves all node groups, and | |
814 | computes set operations. | |
815 | ||
816 | Return a NodeSetBase object. | |
817 | """ | |
818 | nodeset = NodeSetBase() | |
819 | ||
820 | for opc, pat, rgnd in self._scan_string(nsstr, autostep): | |
821 | # Parser main debugging: | |
822 | #print "OPC %s PAT %s RANGESETS %s" % (opc, pat, rgnd) | |
823 | if self.group_resolver and pat[0] == '@': | |
824 | ns_group = NodeSetBase() | |
825 | for nodegroup in NodeSetBase(pat, rgnd): | |
826 | # parse/expand nodes group: get group string and namespace | |
827 | ns_str_ext, ns_nsp_ext = self.parse_group_string(nodegroup, | |
828 | namespace) | |
829 | if ns_str_ext: # may still contain groups | |
830 | # recursively parse and aggregate result | |
831 | ns_group.update(self.parse_string(ns_str_ext, | |
832 | autostep, | |
833 | ns_nsp_ext)) | |
834 | # perform operation | |
835 | getattr(nodeset, opc)(ns_group) | |
836 | else: | |
837 | getattr(nodeset, opc)(NodeSetBase(pat, rgnd, False)) | |
838 | ||
839 | return nodeset | |
840 | ||
841 | def parse_string_single(self, nsstr, autostep): | |
842 | """Parse provided string and return a NodeSetBase object.""" | |
843 | # ignore node boundary whitespace(s) | |
844 | pat, rangesets = self._scan_string_single(nsstr.strip(), autostep) | |
845 | if len(rangesets) > 1: | |
846 | rgobj = RangeSetND([rangesets], None, autostep, copy_rangeset=False) | |
847 | elif len(rangesets) == 1: | |
848 | rgobj = rangesets[0] | |
849 | else: # non-indexed nodename | |
850 | rgobj = None | |
851 | return NodeSetBase(pat, rgobj, False) | |
852 | ||
853 | def parse_group(self, group, namespace=None, autostep=None): | |
854 | """Parse provided single group name (without @ prefix).""" | |
855 | assert self.group_resolver is not None | |
856 | nodestr = self.group_resolver.group_nodes(group, namespace) | |
857 | return self.parse(",".join(nodestr), autostep) | |
858 | ||
859 | def parse_group_string(self, nodegroup, namespace=None): | |
860 | """Parse provided raw nodegroup string in optional namespace. | |
861 | ||
862 | Warning: 1 pass only, may still return groups. | |
863 | ||
864 | Return a tuple (grp_resolved_string, namespace). | |
865 | """ | |
866 | assert nodegroup[0] == '@' | |
867 | assert self.group_resolver is not None | |
868 | grpstr = group = nodegroup[1:] | |
869 | if grpstr.find(':') >= 0: | |
870 | # specified namespace does always override | |
871 | namespace, group = grpstr.split(':', 1) | |
872 | if group == '*': # @* or @source:* magic | |
873 | reslist = self.all_nodes(namespace) | |
874 | else: | |
875 | reslist = self.group_resolver.group_nodes(group, namespace) | |
876 | return ','.join(reslist), namespace | |
877 | ||
878 | def grouplist(self, namespace=None): | |
879 | """Return a sorted list of groups from current resolver (in optional | |
880 | group source / namespace).""" | |
881 | grpset = NodeSetBase() | |
882 | for grpstr in self.group_resolver.grouplist(namespace): | |
883 | # We scan each group string to expand any range seen... | |
884 | for opc, pat, rgnd in self._scan_string(grpstr, None): | |
885 | getattr(grpset, opc)(NodeSetBase(pat, rgnd, False)) | |
886 | return list(grpset) | |
887 | ||
888 | def all_nodes(self, namespace=None): | |
889 | """Get all nodes from group resolver as a list of strings.""" | |
890 | # namespace is the optional group source | |
891 | assert self.group_resolver is not None | |
892 | all = [] | |
893 | try: | |
894 | # Ask resolver to provide all nodes. | |
895 | all = self.group_resolver.all_nodes(namespace) | |
896 | except NodeUtils.GroupSourceNoUpcall: | |
897 | try: | |
898 | # As the resolver is not able to provide all nodes directly, | |
899 | # failback to list + map(s) method: | |
900 | for grp in self.grouplist(namespace): | |
901 | all += self.group_resolver.group_nodes(grp, namespace) | |
902 | except NodeUtils.GroupSourceNoUpcall: | |
903 | # We are not able to find "all" nodes, definitely. | |
904 | raise NodeSetExternalError("Not enough working external " \ | |
905 | "calls (all, or map + list) defined to get all nodes") | |
906 | except NodeUtils.GroupSourceQueryFailed, exc: | |
907 | raise NodeSetExternalError("Unable to get all nodes due to the " \ | |
908 | "following external failure:\n\t%s" % exc) | |
909 | return all | |
910 | ||
911 | def _next_op(self, pat): | |
912 | """Opcode parsing subroutine.""" | |
913 | op_idx = -1 | |
914 | next_op_code = None | |
915 | for opc, idx in [(k, pat.find(v)) \ | |
916 | for k, v in ParsingEngine.OP_CODES.iteritems()]: | |
917 | if idx >= 0 and (op_idx < 0 or idx <= op_idx): | |
918 | next_op_code = opc | |
919 | op_idx = idx | |
920 | return op_idx, next_op_code | |
921 | ||
922 | def _scan_string_single(self, nsstr, autostep): | |
923 | """Single node scan, returns (pat, list of rangesets)""" | |
924 | if len(nsstr) == 0: | |
925 | raise NodeSetParseError(nsstr, "empty node name") | |
926 | ||
927 | # single node parsing | |
928 | pfx_nd = [mobj.groups() for mobj in self.base_node_re.finditer(nsstr)] | |
929 | pfx_nd = pfx_nd[:-1] | |
930 | if not pfx_nd: | |
931 | raise NodeSetParseError(nsstr, "parse error") | |
932 | ||
933 | # pfx+sfx cannot be empty | |
934 | if len(pfx_nd) == 1 and len(pfx_nd[0][0]) == 0: | |
935 | raise NodeSetParseError(nsstr, "empty node name") | |
936 | ||
937 | pat = "" | |
938 | rangesets = [] | |
939 | for pfx, idx in pfx_nd: | |
940 | if idx: | |
941 | # optimization: process single index padding directly | |
942 | pad = 0 | |
943 | if int(idx) != 0: | |
944 | idxs = idx.lstrip("0") | |
945 | if len(idx) - len(idxs) > 0: | |
946 | pad = len(idx) | |
947 | idxint = int(idxs) | |
948 | else: | |
949 | if len(idx) > 1: | |
950 | pad = len(idx) | |
951 | idxint = 0 | |
952 | if idxint > 1e100: | |
953 | raise NodeSetParseRangeError( \ | |
954 | RangeSetParseError(idx, "invalid rangeset index")) | |
955 | # optimization: use numerical RangeSet constructor | |
956 | pat += "%s%%s" % pfx | |
957 | rangesets.append(RangeSet.fromone(idxint, pad, autostep)) | |
958 | else: | |
959 | # undefined pad means no node index | |
960 | pat += pfx | |
961 | return pat, rangesets | |
962 | ||
963 | def _scan_string(self, nsstr, autostep): | |
964 | """Parsing engine's string scanner method (iterator).""" | |
965 | pat = nsstr.strip() | |
966 | # avoid misformatting | |
967 | if pat.find('%') >= 0: | |
968 | pat = pat.replace('%', '%%') | |
969 | next_op_code = 'update' | |
970 | while pat is not None: | |
971 | # Ignore whitespace(s) for convenience | |
972 | pat = pat.lstrip() | |
973 | ||
974 | rsets = [] | |
975 | op_code = next_op_code | |
976 | ||
977 | op_idx, next_op_code = self._next_op(pat) | |
978 | bracket_idx = pat.find(self.BRACKET_OPEN) | |
979 | ||
980 | # Check if the operator is after the bracket, or if there | |
981 | # is no operator at all but some brackets. | |
982 | if bracket_idx >= 0 and (op_idx > bracket_idx or op_idx < 0): | |
983 | # In this case, we have a pattern of potentially several | |
984 | # nodes. | |
985 | # Fill prefix, range and suffix from pattern | |
986 | # eg. "forbin[3,4-10]-ilo" -> "forbin", "3,4-10", "-ilo" | |
987 | newpat = "" | |
988 | sfx = pat | |
989 | while bracket_idx >= 0 and (op_idx > bracket_idx or op_idx < 0): | |
990 | pfx, sfx = sfx.split(self.BRACKET_OPEN, 1) | |
991 | try: | |
992 | rng, sfx = sfx.split(self.BRACKET_CLOSE, 1) | |
993 | except ValueError: | |
994 | raise NodeSetParseError(pat, "missing bracket") | |
995 | ||
996 | # illegal closing bracket checks | |
997 | if pfx.find(self.BRACKET_CLOSE) > -1: | |
998 | raise NodeSetParseError(pfx, "illegal closing bracket") | |
999 | ||
1000 | if len(sfx) > 0: | |
1001 | bra_end = sfx.find(self.BRACKET_CLOSE) | |
1002 | bra_start = sfx.find(self.BRACKET_OPEN) | |
1003 | if bra_start == -1: | |
1004 | bra_start = bra_end + 1 | |
1005 | if bra_end >= 0 and bra_end < bra_start: | |
1006 | raise NodeSetParseError(sfx, \ | |
1007 | "illegal closing bracket") | |
1008 | pfxlen = len(pfx) | |
1009 | ||
1010 | # pfx + sfx cannot be empty | |
1011 | if pfxlen + len(sfx) == 0: | |
1012 | raise NodeSetParseError(pat, "empty node name") | |
1013 | ||
1014 | # but pfx itself can | |
1015 | if pfxlen > 0: | |
1016 | if pfx[-1] in "0123456789": | |
1017 | raise NodeSetParseError(pfx + "[", "illegal opening" | |
1018 | " bracket after digit") | |
1019 | pfx, pfxrvec = self._scan_string_single(pfx, autostep) | |
1020 | rsets += pfxrvec | |
1021 | ||
1022 | # readahead for sanity check | |
1023 | bracket_idx = sfx.find(self.BRACKET_OPEN, | |
1024 | bracket_idx - pfxlen) | |
1025 | op_idx, next_op_code = self._next_op(sfx) | |
1026 | ||
1027 | # Check for empty component or sequenced ranges | |
1028 | if len(pfx) == 0 and op_idx == 0: | |
1029 | raise NodeSetParseError(sfx, "empty node name before")\ | |
1030 | ||
1031 | if len(sfx) > 0 and sfx[0] in "0123456789[": | |
1032 | raise NodeSetParseError(sfx, \ | |
1033 | "illegal sequenced numeric ranges") | |
1034 | ||
1035 | newpat += "%s%%s" % pfx | |
1036 | try: | |
1037 | rsets.append(RangeSet(rng, autostep)) | |
1038 | except RangeSetParseError, ex: | |
1039 | raise NodeSetParseRangeError(ex) | |
1040 | ||
1041 | # Check if we have a next op-separated node or pattern | |
1042 | op_idx, next_op_code = self._next_op(sfx) | |
1043 | if op_idx < 0: | |
1044 | pat = None | |
1045 | else: | |
1046 | opc = self.OP_CODES[next_op_code] | |
1047 | sfx, pat = sfx.split(opc, 1) | |
1048 | # Detected character operator so right operand is mandatory | |
1049 | if not pat: | |
1050 | msg = "missing nodeset operand with '%s' operator" % opc | |
1051 | raise NodeSetParseError(None, msg) | |
1052 | ||
1053 | # Ignore whitespace(s) | |
1054 | sfx = sfx.rstrip() | |
1055 | if sfx: | |
1056 | sfx, sfxrvec = self._scan_string_single(sfx, autostep) | |
1057 | newpat += sfx | |
1058 | rsets += sfxrvec | |
1059 | ||
1060 | # pfx + sfx cannot be empty | |
1061 | if len(newpat) == 0: | |
1062 | raise NodeSetParseError(pat, "empty node name") | |
1063 | ||
1064 | else: | |
1065 | # In this case, either there is no comma and no bracket, | |
1066 | # or the bracket is after the comma, then just return | |
1067 | # the node. | |
1068 | if op_idx < 0: | |
1069 | node = pat | |
1070 | pat = None # break next time | |
1071 | else: | |
1072 | opc = self.OP_CODES[next_op_code] | |
1073 | node, pat = pat.split(opc, 1) | |
1074 | # Detected character operator so both operands are mandatory | |
1075 | if not node or not pat: | |
1076 | msg = "missing nodeset operand with '%s' operator" % opc | |
1077 | raise NodeSetParseError(node or pat, msg) | |
1078 | ||
1079 | # Check for illegal closing bracket | |
1080 | if node.find(self.BRACKET_CLOSE) > -1: | |
1081 | raise NodeSetParseError(node, "illegal closing bracket") | |
1082 | ||
1083 | # Ignore whitespace(s) | |
1084 | node = node.rstrip() | |
1085 | newpat, rsets = self._scan_string_single(node, autostep) | |
1086 | ||
1087 | if len(rsets) > 1: | |
1088 | yield op_code, newpat, RangeSetND([rsets], None, autostep, | |
1089 | copy_rangeset=False) | |
1090 | elif len(rsets) == 1: | |
1091 | yield op_code, newpat, rsets[0] | |
1092 | else: | |
1093 | yield op_code, newpat, None | |
1094 | ||
1095 | ||
1096 | class NodeSet(NodeSetBase): | |
1097 | """ | |
1098 | Iterable class of nodes with node ranges support. | |
1099 | ||
1100 | NodeSet creation examples: | |
1101 | >>> nodeset = NodeSet() # empty NodeSet | |
1102 | >>> nodeset = NodeSet("cluster3") # contains only cluster3 | |
1103 | >>> nodeset = NodeSet("cluster[5,10-42]") | |
1104 | >>> nodeset = NodeSet("cluster[0-10/2]") | |
1105 | >>> nodeset = NodeSet("cluster[0-10/2],othername[7-9,120-300]") | |
1106 | ||
1107 | NodeSet provides methods like update(), intersection_update() or | |
1108 | difference_update() methods, which conform to the Python Set API. | |
1109 | However, unlike RangeSet or standard Set, NodeSet is somewhat not | |
1110 | so strict for convenience, and understands NodeSet instance or | |
1111 | NodeSet string as argument. Also, there is no strict definition of | |
1112 | one element, for example, it IS allowed to do: | |
1113 | >>> nodeset = NodeSet("blue[1-50]") | |
1114 | >>> nodeset.remove("blue[36-40]") | |
1115 | >>> print nodeset | |
1116 | blue[1-35,41-50] | |
1117 | ||
1118 | Additionally, the NodeSet class recognizes the "extended string | |
1119 | pattern" which adds support for union (special character ","), | |
1120 | difference ("!"), intersection ("&") and symmetric difference ("^") | |
1121 | operations. String patterns are read from left to right, by | |
1122 | proceeding any character operators accordinately. | |
1123 | ||
1124 | Extended string pattern usage examples: | |
1125 | >>> nodeset = NodeSet("node[0-10],node[14-16]") # union | |
1126 | >>> nodeset = NodeSet("node[0-10]!node[8-10]") # difference | |
1127 | >>> nodeset = NodeSet("node[0-10]&node[5-13]") # intersection | |
1128 | >>> nodeset = NodeSet("node[0-10]^node[5-13]") # xor | |
1129 | """ | |
1130 | ||
1131 | _VERSION = 2 | |
1132 | ||
1133 | def __init__(self, nodes=None, autostep=None, resolver=None): | |
1134 | """Initialize a NodeSet object. | |
1135 | ||
1136 | The `nodes' argument may be a valid nodeset string or a NodeSet | |
1137 | object. If no nodes are specified, an empty NodeSet is created. | |
1138 | ||
1139 | The optional `autostep' argument is passed to underlying RangeSet | |
1140 | objects and aims to enable and make use of the range/step syntax | |
1141 | (eg. node[1-9/2]) when converting NodeSet to string (using folding). | |
1142 | To enable this feature, autostep must be set there to the min number of | |
1143 | indexes that are found at equal distance of each other inside a range | |
1144 | before NodeSet starts to use this syntax. For example, autostep=3 (or | |
1145 | less) will pack n[2,4,6] into n[2-6/2]. Default autostep value is None | |
1146 | which means "inherit whenever possible", ie. do not enable it unless | |
1147 | set in NodeSet objects passed as `nodes' here or during arithmetic | |
1148 | operations. | |
1149 | You may however use the special AUTOSTEP_DISABLED constant to force | |
1150 | turning off autostep feature. | |
1151 | ||
1152 | The optional `resolver' argument may be used to override the group | |
1153 | resolving behavior for this NodeSet object. It can either be set to a | |
1154 | GroupResolver object, to the RESOLVER_NOGROUP constant to disable any | |
1155 | group resolution, or to None (default) to use standard NodeSet group | |
1156 | resolver (see set_std_group_resolver() at the module level to change | |
1157 | it if needed). | |
1158 | """ | |
1159 | NodeSetBase.__init__(self, autostep=autostep) | |
1160 | ||
1161 | # Set group resolver. | |
1162 | if resolver in (RESOLVER_NOGROUP, RESOLVER_NOINIT): | |
1163 | self._resolver = None | |
1164 | else: | |
1165 | self._resolver = resolver or RESOLVER_STD_GROUP | |
1166 | ||
1167 | # Initialize default parser. | |
1168 | if resolver == RESOLVER_NOINIT: | |
1169 | self._parser = None | |
1170 | else: | |
1171 | self._parser = ParsingEngine(self._resolver) | |
1172 | self.update(nodes) | |
1173 | ||
1174 | @classmethod | |
1175 | def _fromlist1(cls, nodelist, autostep=None, resolver=None): | |
1176 | """Class method that returns a new NodeSet with single nodes from | |
1177 | provided list (optimized constructor).""" | |
1178 | inst = NodeSet(autostep=autostep, resolver=resolver) | |
1179 | for single in nodelist: | |
1180 | inst.update(inst._parser.parse_string_single(single, autostep)) | |
1181 | return inst | |
1182 | ||
1183 | @classmethod | |
1184 | def fromlist(cls, nodelist, autostep=None, resolver=None): | |
1185 | """Class method that returns a new NodeSet with nodes from provided | |
1186 | list.""" | |
1187 | inst = NodeSet(autostep=autostep, resolver=resolver) | |
1188 | inst.updaten(nodelist) | |
1189 | return inst | |
1190 | ||
1191 | @classmethod | |
1192 | def fromall(cls, groupsource=None, autostep=None, resolver=None): | |
1193 | """Class method that returns a new NodeSet with all nodes from optional | |
1194 | groupsource.""" | |
1195 | inst = NodeSet(autostep=autostep, resolver=resolver) | |
1196 | if not inst._resolver: | |
1197 | raise NodeSetExternalError("No node group resolver") | |
1198 | # Fill this nodeset with all nodes found by resolver | |
1199 | inst.updaten(inst._parser.all_nodes(groupsource)) | |
1200 | return inst | |
1201 | ||
1202 | def __getstate__(self): | |
1203 | """Called when pickling: remove references to group resolver.""" | |
1204 | odict = self.__dict__.copy() | |
1205 | odict['_version'] = NodeSet._VERSION | |
1206 | del odict['_resolver'] | |
1207 | del odict['_parser'] | |
1208 | return odict | |
1209 | ||
1210 | def __setstate__(self, dic): | |
1211 | """Called when unpickling: restore parser using non group | |
1212 | resolver.""" | |
1213 | self.__dict__.update(dic) | |
1214 | self._resolver = None | |
1215 | self._parser = ParsingEngine(None) | |
1216 | if getattr(self, '_version', 1) <= 1: | |
1217 | self.fold_axis = None | |
1218 | # if setting state from first version, a conversion is needed to | |
1219 | # support native RangeSetND | |
1220 | old_patterns = self._patterns | |
1221 | self._patterns = {} | |
1222 | for pat, rangeset in sorted(old_patterns.iteritems()): | |
1223 | if rangeset: | |
1224 | assert isinstance(rangeset, RangeSet) | |
1225 | rgs = str(rangeset) | |
1226 | if len(rangeset) > 1: | |
1227 | rgs = "[%s]" % rgs | |
1228 | self.update(pat % rgs) | |
1229 | else: | |
1230 | self.update(pat) | |
1231 | ||
1232 | def copy(self): | |
1233 | """Return a shallow copy of a NodeSet.""" | |
1234 | cpy = self.__class__(resolver=RESOLVER_NOINIT) | |
1235 | dic = {} | |
1236 | for pat, rangeset in self._patterns.iteritems(): | |
1237 | if rangeset is None: | |
1238 | dic[pat] = None | |
1239 | else: | |
1240 | dic[pat] = rangeset.copy() | |
1241 | cpy._patterns = dic | |
1242 | cpy._autostep = self._autostep | |
1243 | cpy._resolver = self._resolver | |
1244 | cpy._parser = self._parser | |
1245 | return cpy | |
1246 | ||
1247 | __copy__ = copy # For the copy module | |
1248 | ||
1249 | def _find_groups(self, node, namespace, allgroups): | |
1250 | """Find groups of node by namespace.""" | |
1251 | if allgroups: | |
1252 | # find node groups using in-memory allgroups | |
1253 | for grp, nodeset in allgroups.iteritems(): | |
1254 | if node in nodeset: | |
1255 | yield grp | |
1256 | else: | |
1257 | # find node groups using resolver | |
1258 | for group in self._resolver.node_groups(node, namespace): | |
1259 | yield group | |
1260 | ||
1261 | def _groups2(self, groupsource=None, autostep=None): | |
1262 | """Find node groups this nodeset belongs to. [private]""" | |
1263 | if not self._resolver: | |
1264 | raise NodeSetExternalError("No node group resolver") | |
1265 | try: | |
1266 | # Get all groups in specified group source. | |
1267 | allgrplist = self._parser.grouplist(groupsource) | |
1268 | except NodeUtils.GroupSourceError: | |
1269 | # If list query failed, we still might be able to regroup | |
1270 | # using reverse. | |
1271 | allgrplist = None | |
1272 | groups_info = {} | |
1273 | allgroups = {} | |
1274 | # Check for external reverse presence, and also use the | |
1275 | # following heuristic: external reverse is used only when number | |
1276 | # of groups is greater than the NodeSet size. | |
1277 | if self._resolver.has_node_groups(groupsource) and \ | |
1278 | (not allgrplist or len(allgrplist) >= len(self)): | |
1279 | # use external reverse | |
1280 | pass | |
1281 | else: | |
1282 | if not allgrplist: # list query failed and no way to reverse! | |
1283 | return groups_info # empty | |
1284 | try: | |
1285 | # use internal reverse: populate allgroups | |
1286 | for grp in allgrplist: | |
1287 | nodelist = self._resolver.group_nodes(grp, groupsource) | |
1288 | allgroups[grp] = NodeSet(",".join(nodelist), | |
1289 | resolver=self._resolver) | |
1290 | except NodeUtils.GroupSourceQueryFailed, exc: | |
1291 | # External result inconsistency | |
1292 | raise NodeSetExternalError("Unable to map a group " \ | |
1293 | "previously listed\n\tFailed command: %s" % exc) | |
1294 | ||
1295 | # For each NodeSetBase in self, find its groups. | |
1296 | for node in self._iterbase(): | |
1297 | for grp in self._find_groups(node, groupsource, allgroups): | |
1298 | if grp not in groups_info: | |
1299 | nodes = self._parser.parse_group(grp, groupsource, autostep) | |
1300 | groups_info[grp] = (1, nodes) | |
1301 | else: | |
1302 | i, nodes = groups_info[grp] | |
1303 | groups_info[grp] = (i + 1, nodes) | |
1304 | return groups_info | |
1305 | ||
1306 | def groups(self, groupsource=None, noprefix=False): | |
1307 | """Find node groups this nodeset belongs to. | |
1308 | ||
1309 | Return a dictionary of the form: | |
1310 | group_name => (group_nodeset, contained_nodeset) | |
1311 | ||
1312 | Group names are always prefixed with "@". If groupsource is provided, | |
1313 | they are prefixed with "@groupsource:", unless noprefix is True. | |
1314 | """ | |
1315 | groups = self._groups2(groupsource, self._autostep) | |
1316 | result = {} | |
1317 | for grp, (_, nsb) in groups.iteritems(): | |
1318 | if groupsource and not noprefix: | |
1319 | key = "@%s:%s" % (groupsource, grp) | |
1320 | else: | |
1321 | key = "@" + grp | |
1322 | result[key] = (NodeSet(nsb, resolver=self._resolver), | |
1323 | self.intersection(nsb)) | |
1324 | return result | |
1325 | ||
1326 | def regroup(self, groupsource=None, autostep=None, overlap=False, | |
1327 | noprefix=False): | |
1328 | """Regroup nodeset using node groups. | |
1329 | ||
1330 | Try to find fully matching node groups (within specified groupsource) | |
1331 | and return a string that represents this node set (containing these | |
1332 | potential node groups). When no matching node groups are found, this | |
1333 | method returns the same result as str().""" | |
1334 | groups = self._groups2(groupsource, autostep) | |
1335 | if not groups: | |
1336 | return str(self) | |
1337 | ||
1338 | # Keep only groups that are full. | |
1339 | fulls = [] | |
1340 | for k, (i, nodes) in groups.iteritems(): | |
1341 | assert i <= len(nodes) | |
1342 | if i == len(nodes): | |
1343 | fulls.append((i, k)) | |
1344 | ||
1345 | rest = NodeSet(self, resolver=RESOLVER_NOGROUP) | |
1346 | regrouped = NodeSet(resolver=RESOLVER_NOGROUP) | |
1347 | ||
1348 | bigalpha = lambda x, y: cmp(y[0], x[0]) or cmp(x[1], y[1]) | |
1349 | ||
1350 | # Build regrouped NodeSet by selecting largest groups first. | |
1351 | for _, grp in sorted(fulls, cmp=bigalpha): | |
1352 | if not overlap and groups[grp][1] not in rest: | |
1353 | continue | |
1354 | if groupsource and not noprefix: | |
1355 | regrouped.update("@%s:%s" % (groupsource, grp)) | |
1356 | else: | |
1357 | regrouped.update("@" + grp) | |
1358 | rest.difference_update(groups[grp][1]) | |
1359 | if not rest: | |
1360 | return str(regrouped) | |
1361 | ||
1362 | if regrouped: | |
1363 | return "%s,%s" % (regrouped, rest) | |
1364 | ||
1365 | return str(rest) | |
1366 | ||
1367 | def issubset(self, other): | |
1368 | """ | |
1369 | Report whether another nodeset contains this nodeset. | |
1370 | """ | |
1371 | nodeset = self._parser.parse(other, self._autostep) | |
1372 | return NodeSetBase.issuperset(nodeset, self) | |
1373 | ||
1374 | def issuperset(self, other): | |
1375 | """ | |
1376 | Report whether this nodeset contains another nodeset. | |
1377 | """ | |
1378 | nodeset = self._parser.parse(other, self._autostep) | |
1379 | return NodeSetBase.issuperset(self, nodeset) | |
1380 | ||
1381 | def __getitem__(self, index): | |
1382 | """ | |
1383 | Return the node at specified index or a subnodeset when a slice | |
1384 | is specified. | |
1385 | """ | |
1386 | base = NodeSetBase.__getitem__(self, index) | |
1387 | if not isinstance(base, NodeSetBase): | |
1388 | return base | |
1389 | # return a real NodeSet | |
1390 | inst = NodeSet(autostep=self._autostep, resolver=self._resolver) | |
1391 | inst._patterns = base._patterns | |
1392 | return inst | |
1393 | ||
1394 | def split(self, nbr): | |
1395 | """ | |
1396 | Split the nodeset into nbr sub-nodesets (at most). Each | |
1397 | sub-nodeset will have the same number of elements more or | |
1398 | less 1. Current nodeset remains unmodified. | |
1399 | ||
1400 | >>> for nodeset in NodeSet("foo[1-5]").split(3): | |
1401 | ... print nodeset | |
1402 | foo[1-2] | |
1403 | foo[3-4] | |
1404 | foo5 | |
1405 | """ | |
1406 | assert(nbr > 0) | |
1407 | ||
1408 | # We put the same number of element in each sub-nodeset. | |
1409 | slice_size = len(self) / nbr | |
1410 | left = len(self) % nbr | |
1411 | ||
1412 | begin = 0 | |
1413 | for i in range(0, min(nbr, len(self))): | |
1414 | length = slice_size + int(i < left) | |
1415 | yield self[begin:begin + length] | |
1416 | begin += length | |
1417 | ||
1418 | def update(self, other): | |
1419 | """ | |
1420 | s.update(t) returns nodeset s with elements added from t. | |
1421 | """ | |
1422 | nodeset = self._parser.parse(other, self._autostep) | |
1423 | NodeSetBase.update(self, nodeset) | |
1424 | ||
1425 | def intersection_update(self, other): | |
1426 | """ | |
1427 | s.intersection_update(t) returns nodeset s keeping only | |
1428 | elements also found in t. | |
1429 | """ | |
1430 | nodeset = self._parser.parse(other, self._autostep) | |
1431 | NodeSetBase.intersection_update(self, nodeset) | |
1432 | ||
1433 | def difference_update(self, other, strict=False): | |
1434 | """ | |
1435 | s.difference_update(t) returns nodeset s after removing | |
1436 | elements found in t. If strict is True, raise KeyError | |
1437 | if an element cannot be removed. | |
1438 | """ | |
1439 | nodeset = self._parser.parse(other, self._autostep) | |
1440 | NodeSetBase.difference_update(self, nodeset, strict) | |
1441 | ||
1442 | def symmetric_difference_update(self, other): | |
1443 | """ | |
1444 | s.symmetric_difference_update(t) returns nodeset s keeping all | |
1445 | nodes that are in exactly one of the nodesets. | |
1446 | """ | |
1447 | nodeset = self._parser.parse(other, self._autostep) | |
1448 | NodeSetBase.symmetric_difference_update(self, nodeset) | |
1449 | ||
1450 | ||
1451 | def expand(pat): | |
1452 | """ | |
1453 | Commodity function that expands a nodeset pattern into a list of nodes. | |
1454 | """ | |
1455 | return list(NodeSet(pat)) | |
1456 | ||
1457 | def fold(pat): | |
1458 | """ | |
1459 | Commodity function that clean dups and fold provided pattern with ranges | |
1460 | and "/step" support. | |
1461 | """ | |
1462 | return str(NodeSet(pat)) | |
1463 | ||
1464 | def grouplist(namespace=None, resolver=None): | |
1465 | """ | |
1466 | Commodity function that retrieves the list of raw groups for a specified | |
1467 | group namespace (or use default namespace). | |
1468 | Group names are not prefixed with "@". | |
1469 | """ | |
1470 | return ParsingEngine(resolver or RESOLVER_STD_GROUP).grouplist(namespace) | |
1471 | ||
1472 | def std_group_resolver(): | |
1473 | """ | |
1474 | Get the current resolver used for standard "@" group resolution. | |
1475 | """ | |
1476 | return RESOLVER_STD_GROUP | |
1477 | ||
1478 | def set_std_group_resolver(new_resolver): | |
1479 | """ | |
1480 | Override the resolver used for standard "@" group resolution. The | |
1481 | new resolver should be either an instance of | |
1482 | NodeUtils.GroupResolver or None. In the latter case, the group | |
1483 | resolver is restored to the default one. | |
1484 | """ | |
1485 | global RESOLVER_STD_GROUP | |
1486 | RESOLVER_STD_GROUP = new_resolver or _DEF_RESOLVER_STD_GROUP | |
1487 |
0 | # Copyright CEA/DAM/DIF (2010-2015) | |
0 | # | |
1 | # Copyright CEA/DAM/DIF (2010-2016) | |
1 | 2 | # Contributors: |
2 | 3 | # Stephane THIELL <sthiell@stanford.edu> |
3 | 4 | # Aurelien DEGREMONT <aurelien.degremont@cea.fr> |
41 | 42 | """ |
42 | 43 | |
43 | 44 | import glob |
45 | import logging | |
44 | 46 | import os |
45 | 47 | import shlex |
46 | import sys | |
47 | 48 | import time |
48 | 49 | |
49 | 50 | from ConfigParser import ConfigParser, NoOptionError, NoSectionError |
159 | 160 | list_upcall=None, reverse_upcall=None, cfgdir=None, |
160 | 161 | cache_time=None): |
161 | 162 | GroupSource.__init__(self, name) |
162 | self.verbosity = 0 | |
163 | self.verbosity = 0 # deprecated | |
163 | 164 | self.cfgdir = cfgdir |
165 | self.logger = logging.getLogger(__name__) | |
164 | 166 | |
165 | 167 | # Supported external upcalls |
166 | 168 | self.upcalls = {} |
190 | 192 | 'reverse': {} |
191 | 193 | } |
192 | 194 | |
193 | def _verbose_print(self, msg): | |
194 | """Print msg depending on the verbosity level.""" | |
195 | if self.verbosity > 0: | |
196 | print >> sys.stderr, "%s<%s> %s" % \ | |
197 | (self.__class__.__name__, self.name, msg) | |
198 | ||
199 | 195 | def _upcall_read(self, cmdtpl, args=dict()): |
200 | 196 | """ |
201 | 197 | Invoke the specified upcall command, raise an Exception if |
202 | 198 | something goes wrong and return the command output otherwise. |
203 | 199 | """ |
204 | 200 | cmdline = Template(self.upcalls[cmdtpl]).safe_substitute(args) |
205 | self._verbose_print("EXEC '%s'" % cmdline) | |
201 | self.logger.debug("EXEC '%s'", cmdline) | |
206 | 202 | proc = Popen(cmdline, stdout=PIPE, shell=True, cwd=self.cfgdir) |
207 | 203 | output = proc.communicate()[0].strip() |
208 | self._verbose_print("READ '%s'" % output) | |
204 | self.logger.debug("READ '%s'", output) | |
209 | 205 | if proc.returncode != 0: |
210 | self._verbose_print("ERROR '%s' returned %d" % (cmdline, \ | |
211 | proc.returncode)) | |
206 | self.logger.debug("ERROR '%s' returned %d", cmdline, | |
207 | proc.returncode) | |
212 | 208 | raise GroupSourceQueryFailed(cmdline, self) |
213 | 209 | return output |
214 | 210 | |
225 | 221 | |
226 | 222 | # Purge expired data from cache |
227 | 223 | if key in cache and cache[key][1] < time.time(): |
228 | self._verbose_print("PURGE EXPIRED (%d)'%s'" % (cache[key][1], key)) | |
224 | self.logger.debug("PURGE EXPIRED (%d)'%s'", cache[key][1], key) | |
229 | 225 | del cache[key] |
230 | 226 | |
231 | 227 | # Fetch the data if unknown of just purged |
376 | 372 | self._sources[default_source.name] = default_source |
377 | 373 | |
378 | 374 | def set_verbosity(self, value): |
379 | """Set debugging verbosity value. """ | |
375 | """Set debugging verbosity value (DEPRECATED: use logging.DEBUG).""" | |
380 | 376 | for source in self._sources.itervalues(): |
381 | 377 | source.verbosity = value |
382 | 378 |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2010-2015) | |
2 | # Copyright CEA/DAM/DIF (2010-2016) | |
3 | 3 | # Contributor: Henri DOREAU <henri.doreau@cea.fr> |
4 | 4 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
5 | 5 | # |
262 | 262 | |
263 | 263 | def recv(self, msg): |
264 | 264 | """process incoming messages""" |
265 | self.logger.debug("[DBG] rcvd from: %s", msg) | |
265 | self.logger.debug("recv: %s", msg) | |
266 | 266 | if msg.type == EndMessage.ident: |
267 | 267 | #??#self.ptree.notify_close() |
268 | 268 | self.logger.debug("got EndMessage; closing") |
343 | 343 | |
344 | 344 | def recv_ctl(self, msg): |
345 | 345 | """handle incoming messages for state 'control'""" |
346 | self.logger.debug("recv_ctl") | |
347 | 346 | if msg.type == 'ACK': |
348 | 347 | self.logger.debug("got ack (%s)", msg.type) |
349 | 348 | # check if ack matches write history msgid to generate ev_written |
357 | 356 | elif isinstance(msg, RoutedMessageBase): |
358 | 357 | metaworker = self.workers[msg.srcid] |
359 | 358 | if msg.type == StdOutMessage.ident: |
360 | if metaworker.eh: | |
361 | nodeset = NodeSet(msg.nodes) | |
362 | decoded = msg.data_decode() + '\n' | |
363 | self.logger.debug("StdOutMessage: \"%s\"", decoded) | |
364 | for line in decoded.splitlines(): | |
365 | for node in nodeset: | |
366 | metaworker._on_remote_node_msgline(node, | |
367 | line, | |
368 | 'stdout', | |
369 | self.gateway) | |
359 | nodeset = NodeSet(msg.nodes) | |
360 | decoded = msg.data_decode() + '\n' | |
361 | for line in decoded.splitlines(): | |
362 | for node in nodeset: | |
363 | metaworker._on_remote_node_msgline(node, line, 'stdout', | |
364 | self.gateway) | |
370 | 365 | elif msg.type == StdErrMessage.ident: |
371 | if metaworker.eh: | |
372 | nodeset = NodeSet(msg.nodes) | |
373 | decoded = msg.data_decode() + '\n' | |
374 | self.logger.debug("StdErrMessage: \"%s\"", decoded) | |
375 | for line in decoded.splitlines(): | |
376 | for node in nodeset: | |
377 | metaworker._on_remote_node_msgline(node, | |
378 | line, | |
379 | 'stderr', | |
380 | self.gateway) | |
366 | nodeset = NodeSet(msg.nodes) | |
367 | decoded = msg.data_decode() + '\n' | |
368 | for line in decoded.splitlines(): | |
369 | for node in nodeset: | |
370 | metaworker._on_remote_node_msgline(node, line, 'stderr', | |
371 | self.gateway) | |
381 | 372 | elif msg.type == RetcodeMessage.ident: |
382 | 373 | rc = msg.retcode |
383 | 374 | for node in NodeSet(msg.nodes): |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2012-2015) | |
1 | # Copyright CEA/DAM/DIF (2012-2016) | |
2 | 2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
3 | 3 | # Contributor: Aurelien DEGREMONT <aurelien.degremont@cea.fr> |
4 | 4 | # |
194 | 194 | ends = end |
195 | 195 | stop = int(ends) |
196 | 196 | except ValueError: |
197 | raise RangeSetParseError(subrange, | |
198 | "cannot convert string to integer") | |
197 | if len(subrange) == 0: | |
198 | msg = "empty range" | |
199 | else: | |
200 | msg = "cannot convert string to integer" | |
201 | raise RangeSetParseError(subrange, msg) | |
199 | 202 | |
200 | 203 | # check preconditions |
201 | 204 | if stop > 1e100 or start > stop or step < 1: |
938 | 941 | |
939 | 942 | def pads(self): |
940 | 943 | """Get a tuple of padding length info for each dimension.""" |
941 | try: | |
942 | return tuple(rg.padding for rg in self._veclist[0]) | |
943 | except IndexError: | |
944 | return () | |
944 | # return a tuple of max padding length for each axis | |
945 | pad_veclist = ((rg.padding for rg in vec) for vec in self._veclist) | |
946 | return tuple(max(pads) for pads in zip(*pad_veclist)) | |
945 | 947 | |
946 | 948 | def get_autostep(self): |
947 | 949 | """Get autostep value (property)""" |
1153 | 1155 | # Simple heuristic that makes us faster |
1154 | 1156 | if len(self._veclist) * (len(self._veclist) - 1) / 2 > max_length * 10: |
1155 | 1157 | # *** nD full expand is preferred *** |
1156 | self._veclist = [[RangeSet.fromone(i) for i in tvec] \ | |
1158 | pads = self.pads() | |
1159 | self._veclist = [[RangeSet.fromone(i, pad=pads[axis]) | |
1160 | for axis, i in enumerate(tvec)] | |
1157 | 1161 | for tvec in set(self._iter())] |
1158 | 1162 | return |
1159 | 1163 |
34 | 34 | |
35 | 35 | Simple example of use: |
36 | 36 | |
37 | >>> from ClusterShell.Task import task_self | |
37 | >>> from ClusterShell.Task import task_self, NodeSet | |
38 | 38 | >>> |
39 | 39 | >>> # get task associated with calling thread |
40 | 40 | ... task = task_self() |
44 | 44 | <ClusterShell.Worker.Ssh.WorkerSsh object at 0x7f41da71b890> |
45 | 45 | >>> |
46 | 46 | >>> # run task in calling thread |
47 | ... task.resume() | |
47 | ... task.run() | |
48 | 48 | >>> |
49 | 49 | >>> # get results |
50 | ... for buf, nodelist in task.iter_buffers(): | |
51 | ... print NodeSet.fromlist(nodelist), buf | |
50 | ... for output, nodelist in task.iter_buffers(): | |
51 | ... print '%s: %s' % (NodeSet.fromlist(nodelist), output) | |
52 | 52 | ... |
53 | 53 | |
54 | 54 | """ |
654 | 654 | self._engine.add(port) |
655 | 655 | |
656 | 656 | @tasksyncmethod() |
657 | def _remove_port(self, port): | |
658 | """Remove a port from Engine (private method).""" | |
657 | def remove_port(self, port): | |
658 | """Close and remove a port from task previously created with port().""" | |
659 | 659 | self._engine.remove(port) |
660 | 660 | |
661 | 661 | def port(self, handler=None, autoclose=False): |
1301 | 1301 | # create gateway channel if needed |
1302 | 1302 | if gateway not in self.gateways: |
1303 | 1303 | chan = PropagationChannel(self, gateway) |
1304 | logging.getLogger(__name__).info("pchannel: creating new channel") | |
1304 | logger = logging.getLogger(__name__) | |
1305 | logger.info("pchannel: creating new channel %s", chan) | |
1305 | 1306 | # invoke gateway |
1306 | 1307 | timeout = None # FIXME: handle timeout for gateway channels |
1307 | 1308 | wrkcls = self.default('distant_worker') |
1327 | 1328 | Lookup by gateway, decref associated metaworker set and release |
1328 | 1329 | channel worker if needed. |
1329 | 1330 | """ |
1330 | logging.getLogger(__name__).info("pchannel_release %s %s", gateway, | |
1331 | metaworker) | |
1331 | logger = logging.getLogger(__name__) | |
1332 | logger.debug("pchannel_release %s %s", gateway, metaworker) | |
1332 | 1333 | |
1333 | 1334 | if gateway not in self.gateways: |
1334 | logging.getLogger(__name__).error("pchannel_release: no pchannel" | |
1335 | "found for gateway %s", | |
1336 | gateway) | |
1335 | logger.error("pchannel_release: no pchannel found for gateway %s", | |
1336 | gateway) | |
1337 | 1337 | else: |
1338 | 1338 | # TODO: delay gateway closing when other gateways are running |
1339 | 1339 | chanworker, metaworkers = self.gateways[gateway] |
1340 | 1340 | metaworkers.remove(metaworker) |
1341 | 1341 | if len(metaworkers) == 0: |
1342 | logging.getLogger(__name__).info("worker finishing") | |
1342 | logger.info("pchannel_release: destroying channel %s", | |
1343 | chanworker.eh) | |
1343 | 1344 | chanworker.abort() |
1344 | 1345 | # delete gateway reference |
1345 | 1346 | del self.gateways[gateway] |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2010-2015) | |
2 | # Copyright CEA/DAM/DIF (2010-2016) | |
3 | 3 | # Contributor: Henri DOREAU <henri.doreau@cea.fr> |
4 | 4 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
5 | 5 | # |
219 | 219 | return group |
220 | 220 | raise TopologyError('TopologyNodeGroup not found for node %s' % node) |
221 | 221 | |
222 | def inner_node_count(self): | |
223 | """helper to get inner node count (root and gateway nodes)""" | |
224 | return sum(len(group.nodeset) for group in self.groups | |
225 | if group.children_len() > 0) | |
226 | ||
227 | def leaf_node_count(self): | |
228 | """helper to get leaf node count""" | |
229 | return sum(len(group.nodeset) for group in self.groups | |
230 | if group.children_len() == 0) | |
222 | 231 | |
223 | 232 | class TopologyRoute(object): |
224 | 233 | """A single route between two nodesets""" |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2009-2014) | |
2 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
1 | # Copyright CEA/DAM/DIF (2009-2016) | |
2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> | |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
5 | 5 | # |
40 | 40 | """ |
41 | 41 | |
42 | 42 | import errno |
43 | import logging | |
43 | 44 | import os |
44 | 45 | import Queue |
45 | 46 | import thread |
48 | 49 | set_nonblock_flag |
49 | 50 | |
50 | 51 | from ClusterShell.Engine.Engine import EngineBaseTimer, E_READ, E_WRITE |
52 | ||
53 | ||
54 | LOGGER = logging.getLogger(__name__) | |
51 | 55 | |
52 | 56 | |
53 | 57 | class EngineClientException(Exception): |
318 | 322 | try: |
319 | 323 | wcnt = os.write(wfile.fd, wfile.wbuf) |
320 | 324 | except OSError, exc: |
321 | if (exc.errno == errno.EAGAIN): | |
325 | if exc.errno == errno.EAGAIN: | |
326 | # _handle_write() is not only called by the engine but also | |
327 | # by _write(), so this is legit: we just try again later | |
322 | 328 | self._set_writing(sname) |
329 | return | |
330 | if exc.errno == errno.EPIPE: | |
331 | # broken pipe: log warning message and do NOT retry | |
332 | LOGGER.warning('%s: %s', self, exc) | |
323 | 333 | return |
324 | 334 | raise |
325 | 335 | if wcnt > 0: |
326 | self.worker._on_written(self.key, wcnt, sname) | |
327 | 336 | # dequeue written buffer |
328 | 337 | wfile.wbuf = wfile.wbuf[wcnt:] |
329 | 338 | # check for possible ending |
330 | 339 | if wfile.eof and not wfile.wbuf: |
340 | self.worker._on_written(self.key, wcnt, sname) | |
331 | 341 | # remove stream from engine (not directly) |
332 | 342 | self._engine.remove_stream(self, wfile) |
333 | 343 | else: |
334 | 344 | self._set_writing(sname) |
345 | self.worker._on_written(self.key, wcnt, sname) | |
335 | 346 | |
336 | 347 | def _exec_nonblock(self, commandlist, shell=False, env=None): |
337 | 348 | """ |
477 | 488 | id(self), fd_in, fd_out) |
478 | 489 | |
479 | 490 | def _start(self): |
491 | """Start port.""" | |
480 | 492 | return self |
481 | 493 | |
482 | 494 | def _close(self, abort, timeout): |
483 | """ | |
484 | Close port pipes. | |
485 | """ | |
495 | """Close port.""" | |
486 | 496 | if not self._msgq.empty(): |
487 | 497 | # purge msgq |
488 | 498 | try: |
512 | 522 | """ |
513 | 523 | Port message send method that will wait for acknowledgement |
514 | 524 | unless the send_once parameter if set. |
515 | """ | |
525 | ||
526 | May be called from another thread. Will generate ev_msg() on | |
527 | Port event handler (in Port task/thread). | |
528 | ||
529 | Return False if the message cannot be sent (eg. port closed). | |
530 | """ | |
531 | if self._msgq is None: # called after port closed? | |
532 | return False | |
533 | ||
516 | 534 | pmsg = EnginePort._Msg(send_msg, not send_once) |
517 | 535 | self._msgq.put(pmsg, block=True, timeout=None) |
518 | 536 | try: |
524 | 542 | |
525 | 543 | def msg_send(self, send_msg): |
526 | 544 | """ |
527 | Port message send-once method (no acknowledgement). | |
528 | """ | |
529 | self.msg(send_msg, send_once=True) | |
545 | Port message send-once method (no acknowledgement). See msg(). | |
546 | ||
547 | Return False if the message cannot be sent (eg. port closed). | |
548 | """ | |
549 | return self.msg(send_msg, send_once=True) |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2007-2014) | |
2 | # Contributor: Stephane THIELL <stephane.thiell@cea.fr> | |
1 | # Copyright CEA/DAM/DIF (2007-2016) | |
2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> | |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
5 | 5 | # |
38 | 38 | import errno |
39 | 39 | import os |
40 | 40 | import shlex |
41 | import sys | |
42 | 41 | |
43 | 42 | from ClusterShell.NodeSet import NodeSet |
44 | 43 | from ClusterShell.Worker.EngineClient import EngineClientError |
140 | 139 | # 0 1 2 3 4 5 6 7 |
141 | 140 | # pdcp@cors113: cors115: fatal: /var/cache/shine/... |
142 | 141 | # 0 1 2 3... |
143 | ||
144 | words = line.split() | |
142 | words = line.split() | |
145 | 143 | # Set return code for nodename of worker |
146 | 144 | if self.MODE == 'pdsh': |
147 | 145 | if len(words) == 4 and words[2] == "command" and \ |
156 | 154 | self.worker._on_node_rc(words[1][:-1], errno.ENOENT) |
157 | 155 | |
158 | 156 | except Exception, exc: |
159 | print >> sys.stderr, exc | |
160 | raise EngineClientError() | |
157 | raise EngineClientError("Pdsh parser error: %s" % exc) | |
161 | 158 | else: |
162 | 159 | # split pdsh reply "nodename: msg" |
163 | 160 | nodename, msg = line.split(': ', 1) |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2011-2015) | |
1 | # Copyright CEA/DAM/DIF (2011-2016) | |
2 | 2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
33 | 33 | ClusterShell v2 tree propagation worker |
34 | 34 | """ |
35 | 35 | |
36 | import base64 | |
36 | 37 | import logging |
37 | 38 | import os |
38 | 39 | from os.path import basename, dirname, isfile, normpath |
66 | 67 | """ |
67 | 68 | Called to indicate that a worker has data to read. |
68 | 69 | """ |
69 | self.logger.debug("MetaWorkerEventHandler: ev_read (%s)", | |
70 | worker.current_sname) | |
71 | 70 | self.metaworker._on_node_msgline(worker.current_node, |
72 | 71 | worker.current_msg, |
73 | 72 | 'stdout') |
87 | 86 | metaworker = self.metaworker |
88 | 87 | metaworker.current_node = node |
89 | 88 | metaworker.current_sname = sname |
90 | metaworker.eh.ev_written(metaworker, node, sname, size) | |
89 | if metaworker.eh: | |
90 | metaworker.eh.ev_written(metaworker, node, sname, size) | |
91 | 91 | |
92 | 92 | def ev_hup(self, worker): |
93 | 93 | """ |
115 | 115 | """ |
116 | 116 | self.logger.debug("MetaWorkerEventHandler: ev_close") |
117 | 117 | self.metaworker._check_fini() |
118 | ##print >>sys.stderr, "ev_close?" | |
119 | 118 | #self._completed += 1 |
120 | 119 | #if self._completed >= self.grpcount: |
121 | # #print >>sys.stderr, "ev_close!" | |
122 | 120 | # metaworker = self.metaworker |
123 | 121 | # metaworker.eh.ev_close(metaworker) |
124 | 122 | |
128 | 126 | ClusterShell tree worker Class. |
129 | 127 | |
130 | 128 | """ |
131 | UNTAR_CMD_FMT = 'tar -xf - -C "%s"' | |
129 | # copy and rcopy tar command formats | |
130 | # the choice of single or double quotes is essential | |
131 | UNTAR_CMD_FMT = "tar -xf - -C '%s'" | |
132 | TAR_CMD_FMT = "tar -cf - -C '%s' " \ | |
133 | "--transform \"s,^\\([^/]*\\)[/]*,\\1.$(hostname -s)/,\" " \ | |
134 | "'%s' | base64 -w 65536" | |
132 | 135 | |
133 | 136 | def __init__(self, nodes, handler, timeout, **kwargs): |
134 | 137 | """ |
143 | 146 | """ |
144 | 147 | DistantWorker.__init__(self, handler) |
145 | 148 | |
149 | self.logger = logging.getLogger(__name__) | |
146 | 150 | self.workers = [] |
147 | 151 | self.nodes = NodeSet(nodes) |
148 | 152 | self.timeout = timeout |
151 | 155 | self.dest = kwargs.get('dest') |
152 | 156 | autoclose = kwargs.get('autoclose', False) |
153 | 157 | self.stderr = kwargs.get('stderr', False) |
158 | self.logger.debug("stderr=%s", self.stderr) | |
154 | 159 | self.remote = kwargs.get('remote', True) |
160 | self.preserve = kwargs.get('preserve', None) | |
161 | self.reverse = kwargs.get('reverse', False) | |
162 | self._rcopy_bufs = {} | |
163 | self._rcopy_tars = {} | |
155 | 164 | self._close_count = 0 |
156 | 165 | self._start_count = 0 |
157 | 166 | self._child_count = 0 |
158 | 167 | self._target_count = 0 |
159 | 168 | self._has_timeout = False |
160 | self.logger = logging.getLogger(__name__) | |
161 | 169 | |
162 | 170 | if self.command is None and self.source is None: |
163 | 171 | raise ValueError("missing command or source parameter in " |
164 | 172 | "WorkerTree constructor") |
165 | 173 | |
174 | # rcopy is enforcing separated stderr to handle tar error messages | |
175 | # because stdout is used for data transfer | |
176 | if self.source and self.reverse: | |
177 | self.stderr = True | |
178 | ||
166 | 179 | # build gateway invocation command |
167 | 180 | invoke_gw_args = [] |
168 | for envname in ('PYTHONPATH', \ | |
169 | 'CLUSTERSHELL_GW_LOG_DIR', \ | |
170 | 'CLUSTERSHELL_GW_LOG_LEVEL'): | |
181 | for envname in ('PYTHONPATH', | |
182 | 'CLUSTERSHELL_GW_LOG_DIR', | |
183 | 'CLUSTERSHELL_GW_LOG_LEVEL', | |
184 | 'CLUSTERSHELL_GW_B64_LINE_LENGTH'): | |
171 | 185 | envval = os.getenv(envname) |
172 | 186 | if envval: |
173 | 187 | invoke_gw_args.append("%s=%s" % (envname, envval)) |
183 | 197 | self.router = None |
184 | 198 | |
185 | 199 | self.upchannel = None |
200 | ||
186 | 201 | self.metahandler = MetaWorkerEventHandler(self) |
187 | 202 | |
188 | 203 | # gateway -> active targets selection |
211 | 226 | # Prepare copy params if source is defined |
212 | 227 | destdir = None |
213 | 228 | if self.source: |
214 | self.logger.debug("copy self.dest=%s", self.dest) | |
215 | # Special processing to determine best arcname and destdir for tar. | |
216 | # The only case that we don't support is when source is a file and | |
217 | # dest is a dir without a finishing / (in that case we cannot | |
218 | # determine remotely whether it is a file or a directory). | |
219 | if isfile(self.source): | |
220 | # dest is not normalized here | |
221 | arcname = basename(self.dest) or basename(normpath(self.source)) | |
222 | destdir = dirname(self.dest) | |
229 | if self.reverse: | |
230 | self.logger.debug("rcopy source=%s, dest=%s", self.source, | |
231 | self.dest) | |
232 | # dest is a directory | |
233 | destdir = self.dest | |
223 | 234 | else: |
224 | arcname = basename(normpath(self.source)) | |
225 | destdir = os.path.normpath(self.dest) | |
226 | self.logger.debug("copy arcname=%s destdir=%s", arcname, destdir) | |
235 | self.logger.debug("copy source=%s, dest=%s", self.source, | |
236 | self.dest) | |
237 | # Special processing to determine best arcname and destdir for | |
238 | # tar. The only case that we don't support is when source is a | |
239 | # file and dest is a dir without a finishing / (in that case we | |
240 | # cannot determine remotely whether it is a file or a | |
241 | # directory). | |
242 | if isfile(self.source): | |
243 | # dest is not normalized here | |
244 | arcname = basename(self.dest) or \ | |
245 | basename(normpath(self.source)) | |
246 | destdir = dirname(self.dest) | |
247 | else: | |
248 | arcname = basename(normpath(self.source)) | |
249 | destdir = os.path.normpath(self.dest) | |
250 | self.logger.debug("copy arcname=%s destdir=%s", arcname, | |
251 | destdir) | |
227 | 252 | |
228 | 253 | # And launch stuffs |
229 | 254 | next_hops = self._distribute(self.task.info("fanout"), nodes.copy()) |
238 | 263 | self._target_count += len(targets) |
239 | 264 | if self.remote: |
240 | 265 | if self.source: |
241 | self.logger.debug('_launch remote untar (destdir=%s)', | |
242 | destdir) | |
243 | self.command = self.UNTAR_CMD_FMT % destdir | |
244 | worker = self.task.shell(self.command, | |
245 | nodes=targets, | |
246 | timeout=self.timeout, | |
247 | handler=self.metahandler, | |
248 | stderr=self.stderr, | |
249 | tree=False) | |
266 | # Note: specific case where targets are not in topology | |
267 | # as self.source is never used on remote gateways | |
268 | # so we try a direct copy/rcopy: | |
269 | self.logger.debug('_launch copy r=%s source=%s dest=%s', | |
270 | self.reverse, self.source, self.dest) | |
271 | worker = self.task.copy(self.source, self.dest, targets, | |
272 | handler=self.metahandler, | |
273 | stderr=self.stderr, | |
274 | timeout=self.timeout, | |
275 | preserve=self.preserve, | |
276 | reverse=self.reverse, | |
277 | tree=False) | |
250 | 278 | else: |
251 | 279 | worker = self.task.shell(self.command, |
252 | 280 | nodes=targets, |
270 | 298 | self.logger.debug("trying gateway %s to reach %s", gw, targets) |
271 | 299 | if self.source: |
272 | 300 | self._copy_remote(self.source, destdir, targets, gw, |
273 | self.timeout) | |
301 | self.timeout, self.reverse) | |
274 | 302 | else: |
275 | 303 | self._execute_remote(self.command, targets, gw, |
276 | 304 | self.timeout) |
277 | 305 | |
278 | 306 | # Copy mode: send tar data after above workers have been initialized |
279 | if self.source: | |
307 | if self.source and not self.reverse: | |
280 | 308 | try: |
281 | 309 | # create temporary tar file with all source files |
282 | 310 | tmptar = tempfile.TemporaryFile() |
305 | 333 | distribution[gw] = dstset |
306 | 334 | return distribution |
307 | 335 | |
308 | def _copy_remote(self, source, dest, targets, gateway, timeout): | |
336 | def _copy_remote(self, source, dest, targets, gateway, timeout, reverse): | |
309 | 337 | """run a remote copy in tree mode (using gateway)""" |
310 | self.logger.debug("_copy_remote gateway=%s source=%s dest=%s", | |
311 | gateway, source, dest) | |
338 | self.logger.debug("_copy_remote gateway=%s source=%s dest=%s " | |
339 | "reverse=%s", gateway, source, dest, reverse) | |
312 | 340 | |
313 | 341 | self._target_count += len(targets) |
314 | 342 | |
315 | 343 | self.gwtargets[gateway] = targets.copy() |
316 | 344 | |
317 | cmd = self.UNTAR_CMD_FMT % dest | |
345 | # tar commands are built here and launched on targets | |
346 | if reverse: | |
347 | # these weird replace calls aim to escape single quotes ' within '' | |
348 | srcdir = dirname(source).replace("'", '\'\"\'\"\'') | |
349 | srcbase = basename(normpath(self.source)).replace("'", '\'\"\'\"\'') | |
350 | cmd = self.TAR_CMD_FMT % (srcdir, srcbase) | |
351 | else: | |
352 | cmd = self.UNTAR_CMD_FMT % dest.replace("'", '\'\"\'\"\'') | |
353 | ||
354 | self.logger.debug('_copy_remote: tar cmd: %s', cmd) | |
318 | 355 | |
319 | 356 | pchan = self.task._pchannel(gateway, self) |
320 | 357 | pchan.shell(nodes=targets, command=cmd, worker=self, timeout=timeout, |
343 | 380 | return [] |
344 | 381 | |
345 | 382 | def _on_remote_node_msgline(self, node, msg, sname, gateway): |
346 | DistantWorker._on_node_msgline(self, node, msg, sname) | |
383 | """remote msg received""" | |
384 | if not self.source or not self.reverse or sname != 'stdout': | |
385 | DistantWorker._on_node_msgline(self, node, msg, sname) | |
386 | return | |
387 | ||
388 | # rcopy only: we expect base64 encoded tar content on stdout | |
389 | encoded = self._rcopy_bufs.setdefault(node, '') + msg | |
390 | if node not in self._rcopy_tars: | |
391 | self._rcopy_tars[node] = tempfile.TemporaryFile() | |
392 | ||
393 | # partial base64 decoding requires a multiple of 4 characters | |
394 | encoded_sz = (len(encoded) // 4) * 4 | |
395 | # write decoded binary msg to node temporary tarfile | |
396 | self._rcopy_tars[node].write(base64.b64decode(encoded[0:encoded_sz])) | |
397 | # keep trailing encoded chars for next time | |
398 | self._rcopy_bufs[node] = encoded[encoded_sz:] | |
347 | 399 | |
348 | 400 | def _on_remote_node_rc(self, node, rc, gateway): |
401 | """remote rc received""" | |
349 | 402 | DistantWorker._on_node_rc(self, node, rc) |
350 | 403 | self.logger.debug("_on_remote_node_rc %s %s via gw %s", node, |
351 | 404 | self._close_count, gateway) |
405 | ||
406 | # finalize rcopy: extract tar data | |
407 | if self.source and self.reverse: | |
408 | for node, buf in self._rcopy_bufs.iteritems(): | |
409 | tarfileobj = self._rcopy_tars[node] | |
410 | if len(buf) > 0: | |
411 | self.logger.debug("flushing node %s buf %d bytes", node, | |
412 | len(buf)) | |
413 | tarfileobj.write(buf) | |
414 | tarfileobj.flush() | |
415 | tarfileobj.seek(0) | |
416 | try: | |
417 | tmptar = tarfile.open(fileobj=tarfileobj) | |
418 | try: | |
419 | self.logger.debug("%s extracting %d members in dest %s", | |
420 | node, len(tmptar.getmembers()), | |
421 | self.dest) | |
422 | tmptar.extractall(path=self.dest) | |
423 | except IOError, ex: | |
424 | self._on_remote_node_msgline(node, ex, 'stderr', | |
425 | gateway) | |
426 | # note: try-except-finally not supported before python 2.5 | |
427 | finally: | |
428 | tmptar.close() | |
429 | self._rcopy_bufs = {} | |
430 | self._rcopy_tars = {} | |
431 | ||
352 | 432 | self.gwtargets[gateway].remove(node) |
353 | 433 | self._close_count += 1 |
354 | 434 | self._check_fini(gateway) |
355 | 435 | |
356 | 436 | def _on_remote_node_timeout(self, node, gateway): |
437 | """remote node timeout received""" | |
357 | 438 | DistantWorker._on_node_timeout(self, node) |
358 | 439 | self.logger.debug("_on_remote_node_timeout %s via gw %s", node, gateway) |
359 | 440 | self._close_count += 1 |
391 | 472 | if gateway: |
392 | 473 | targets = self.gwtargets[gateway] |
393 | 474 | if not targets: |
475 | # no more active targets for this gateway | |
394 | 476 | self.logger.debug("WorkerTree._check_fini %s call pchannel_" |
395 | 477 | "release for gw %s", self, gateway) |
396 | 478 | self.task._pchannel_release(gateway, self) |
479 | del self.gwtargets[gateway] | |
397 | 480 | |
398 | 481 | def write(self, buf): |
399 | 482 | """Write to worker clients.""" |
404 | 487 | worker.write(buf) |
405 | 488 | except OSError, exc: |
406 | 489 | osexc = exc |
490 | ||
407 | 491 | for gateway, targets in self.gwtargets.items(): |
492 | assert len(targets) > 0 | |
408 | 493 | self.task._pchannel(gateway, self).write(nodes=targets, |
409 | 494 | buf=buf, |
410 | 495 | worker=self) |
420 | 505 | for worker in self.workers: |
421 | 506 | worker.set_write_eof() |
422 | 507 | for gateway, targets in self.gwtargets.items(): |
508 | assert len(targets) > 0 | |
423 | 509 | self.task._pchannel(gateway, self).set_write_eof(nodes=targets, |
424 | 510 | worker=self) |
425 | 511 |
658 | 658 | def error(self): |
659 | 659 | """Read worker error buffer.""" |
660 | 660 | return self.read(sname='stderr') |
661 | ||
662 | def _on_start(self, key): | |
663 | """Called on command start.""" | |
664 | if not self.started: | |
665 | self.started = True | |
666 | if self.eh: | |
667 | self.eh.ev_start(self) | |
668 | ||
669 | if self.eh: | |
670 | self.eh.ev_pickup(self) | |
671 | ||
672 | def _on_rc(self, key, rc): | |
673 | """Command return code received.""" | |
674 | self.current_rc = rc | |
675 | ||
676 | self.task._rc_set(self, key, rc) | |
677 | ||
678 | if self.eh: | |
679 | self.eh.ev_hup(self) | |
680 | ||
681 | def _on_written(self, key, bytes_count, sname): | |
682 | """Notification of bytes written.""" | |
683 | # set node and stream name (compat only) | |
684 | self.current_sname = sname | |
685 | ||
686 | # generate event - for ev_written, also check for new signature (1.7) | |
687 | # NOTE: add DeprecationWarning in 1.8 for old ev_written signature | |
688 | if self.eh and len(inspect.getargspec(self.eh.ev_written)[0]) == 5: | |
689 | self.eh.ev_written(self, key, sname, bytes_count) |
0 | 0 | # |
1 | # Copyright CEA/DAM/DIF (2007-2015) | |
1 | # Copyright CEA/DAM/DIF (2007-2016) | |
2 | 2 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
3 | 3 | # |
4 | 4 | # This file is part of the ClusterShell library. |
47 | 47 | - ClusterShell.Task |
48 | 48 | """ |
49 | 49 | |
50 | __version__ = '1.7' | |
50 | __version__ = '1.7.2' | |
51 | 51 | __version_info__ = tuple([ int(_n) for _n in __version__.split('.')]) |
52 | __date__ = '2015/11/10' | |
52 | __date__ = '2016/06/18' | |
53 | 53 | __author__ = 'Stephane Thiell <sthiell@stanford.edu>' |
54 | 54 | __url__ = 'http://clustershell.readthedocs.org/' |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # |
2 | # Copyright CEA/DAM/DIF (2008-2015) | |
2 | # Copyright CEA/DAM/DIF (2008-2016) | |
3 | 3 | # Contributor: Stephane THIELL <sthiell@stanford.edu> |
4 | 4 | # |
5 | 5 | # This file is part of the ClusterShell library. |
48 | 48 | # User, in-prefix config install (rpmbuild or pip as user) |
49 | 49 | CFGDIR = 'etc/clustershell' |
50 | 50 | |
51 | VERSION='1.7' | |
51 | VERSION='1.7.2' | |
52 | 52 | |
53 | 53 | setup(name='ClusterShell', |
54 | 54 | version=VERSION, |
70 | 70 | 'scripts/clush', |
71 | 71 | 'scripts/nodeset'], |
72 | 72 | author='Stephane Thiell', |
73 | author_email='stephane.thiell@cea.fr', | |
73 | author_email='sthiell@stanford.edu', | |
74 | 74 | license='CeCILL-C (French equivalent to LGPLv2+)', |
75 | 75 | url='http://clustershell.sourceforge.net/', |
76 | 76 | download_url='http://sourceforge.net/projects/clustershell/files/' |
31 | 31 | self._clubak_t([], "]o[o]: bar\n", outfmt % "]o[o]") |
32 | 32 | self._clubak_t([], "foo:\n", "---------------\nfoo\n---------------\n\n") |
33 | 33 | self._clubak_t([], "foo: \n", "---------------\nfoo\n---------------\n \n") |
34 | # nD | |
35 | self._clubak_t([], "n1c1: bar\n", outfmt % "n1c1") | |
36 | # Ticket #286 | |
37 | self._clubak_t([], "n1c01: bar\n", outfmt % "n1c01") | |
38 | self._clubak_t([], "n01c01: bar\n", outfmt % "n01c01") | |
39 | self._clubak_t([], "n001c01: bar\nn001c02: bar\n", outfmt % "n001c01" + outfmt % "n001c02") | |
34 | 40 | |
35 | 41 | def test_001_verbosity(self): |
36 | 42 | """test clubak (-q/-v/-d)""" |
57 | 63 | self._clubak_t(["-b"], "]o[o]: bar\n", outfmt % "]o[o]") |
58 | 64 | self._clubak_t(["-b"], "foo:\n", "---------------\nfoo\n---------------\n\n") |
59 | 65 | self._clubak_t(["-b"], "foo: \n", "---------------\nfoo\n---------------\n \n") |
66 | # nD | |
67 | self._clubak_t(["-b"], "n1c1: bar\n", outfmt % "n1c1") | |
68 | # Ticket #286 | |
69 | self._clubak_t(["-b"], "n1c01: bar\n", outfmt % "n1c01") | |
70 | self._clubak_t(["-b"], "n001c01: bar\n", outfmt % "n001c01") | |
71 | self._clubak_t(["-b"], "n001c01: bar\nn001c02: bar\n", outfmt % "n001c[01-02] (2)") | |
60 | 72 | |
61 | 73 | def test_003_L(self): |
62 | 74 | """test clubak (line mode -L)""" |
64 | 76 | self._clubak_t(["-L", "-S", ": "], "foo: bar\n", "foo: bar\n") |
65 | 77 | self._clubak_t(["-bL"], "foo: bar\n", "foo: bar\n") |
66 | 78 | self._clubak_t(["-bL", "-S", ": "], "foo: bar\n", "foo: bar\n") |
79 | # nD | |
80 | self._clubak_t(["-bL", "-S", ": "], "n1c01: bar\n", "n1c01: bar\n") | |
67 | 81 | |
68 | 82 | def test_004_N(self): |
69 | 83 | """test clubak (no header -N)""" |
109 | 123 | self._clubak_t(["-b", "--interpret-keys=always"], "foo[1-3]: bar\n", outfmt % "foo[1-3] (3)") |
110 | 124 | self._clubak_t(["-b", "--interpret-keys=auto"], "[]: bar\n", outfmt % "[]") |
111 | 125 | self._clubak_t(["-b", "--interpret-keys=never"], "[]: bar\n", outfmt % "[]") |
112 | self._clubak_t(["-b", "--interpret-keys=always"], "[]: bar\n", '', 1, "Parse error: empty node name: \"[]\"\n") | |
126 | self._clubak_t(["-b", "--interpret-keys=always"], "[]: bar\n", '', 1, "Parse error: bad range: \"empty range\"\n") | |
113 | 127 | |
114 | 128 | def test_008_color(self): |
115 | 129 | """test clubak (--color)""" |
22 | 22 | from ClusterShell.CLI.Clush import main |
23 | 23 | from ClusterShell.NodeSet import NodeSet |
24 | 24 | from ClusterShell.Task import task_cleanup |
25 | ||
26 | from ClusterShell.Worker.EngineClient import EngineClientNotSupportedError | |
25 | 27 | |
26 | 28 | |
27 | 29 | class CLIClushTest_A(unittest.TestCase): |
456 | 458 | re.compile(r'%s: foo\nclush: 0/1\r.*' |
457 | 459 | % HOSTNAME)) |
458 | 460 | |
461 | def test_032_worker_pdsh(self): | |
462 | """test clush (worker pdsh)""" | |
463 | # Warning: same as: echo -n | clush --worker=pdsh when launched from | |
464 | # jenkins (not a tty), so we need --nostdin as pdsh worker doesn't | |
465 | # support write | |
466 | self._clush_t(["-w", HOSTNAME, "--worker=pdsh", "--nostdin", | |
467 | "echo foo"], | |
468 | None, "%s: foo\n" % HOSTNAME, 0) | |
469 | # write not supported by pdsh worker | |
470 | self.assertRaises(EngineClientNotSupportedError, self._clush_t, | |
471 | ["-w", HOSTNAME, "-R", "pdsh", "cat"], "bar", None, 1) | |
472 | ||
473 | def test_033_worker_pdsh_tty(self): | |
474 | """test clush (worker pdsh) [tty]""" | |
475 | setattr(ClusterShell.CLI.Clush, '_f_user_interaction', True) | |
476 | try: | |
477 | self._clush_t(["-w", HOSTNAME, "--worker=pdsh", "echo foo"], | |
478 | None, "%s: foo\n" % HOSTNAME, 0) | |
479 | finally: | |
480 | delattr(ClusterShell.CLI.Clush, '_f_user_interaction') | |
481 | ||
482 | def test_034_pick(self): | |
483 | """test clush --pick""" | |
484 | self._clush_t(["-w", "%s,localhost" % HOSTNAME, "--pick", "1", | |
485 | "echo foo"], None, | |
486 | re.compile(r"^(localhost|%s): foo\n$" % HOSTNAME)) | |
487 | self._clush_t(["-w", "%s,localhost" % HOSTNAME, "--pick", "2", | |
488 | "echo foo"], None, | |
489 | re.compile(r"^((localhost|%s): foo\n){2}$" % HOSTNAME)) | |
459 | 490 | |
460 | 491 | |
461 | 492 | class CLIClushTest_B_StdinFailure(unittest.TestCase): |
9 | 9 | import shutil |
10 | 10 | import sys |
11 | 11 | import tempfile |
12 | from textwrap import dedent | |
12 | 13 | import unittest |
13 | 14 | |
14 | 15 | sys.path.insert(0, '../lib') |
78 | 79 | """test CLI.Config.ClushConfig (default)""" |
79 | 80 | |
80 | 81 | f = tempfile.NamedTemporaryFile(prefix='testclushconfig') |
81 | f.write(""" | |
82 | [Main] | |
83 | fanout: 42 | |
84 | connect_timeout: 14 | |
85 | command_timeout: 0 | |
86 | history_size: 100 | |
87 | color: auto | |
88 | verbosity: 1 | |
89 | #ssh_user: root | |
90 | #ssh_path: /usr/bin/ssh | |
91 | #ssh_options: -oStrictHostKeyChecking=no | |
92 | """) | |
93 | ||
82 | f.write(dedent(""" | |
83 | [Main] | |
84 | fanout: 42 | |
85 | connect_timeout: 14 | |
86 | command_timeout: 0 | |
87 | history_size: 100 | |
88 | color: auto | |
89 | verbosity: 1 | |
90 | #ssh_user: root | |
91 | #ssh_path: /usr/bin/ssh | |
92 | #ssh_options: -oStrictHostKeyChecking=no""")) | |
94 | 93 | f.flush() |
95 | 94 | parser = OptionParser("dummy") |
96 | 95 | parser.install_config_options() |
113 | 112 | self.assertEqual(config.ssh_path, None) |
114 | 113 | self.assertEqual(config.ssh_options, None) |
115 | 114 | f.close() |
116 | ||
115 | ||
117 | 116 | def testClushConfigFull(self): |
118 | 117 | """test CLI.Config.ClushConfig (full)""" |
119 | 118 | |
150 | 149 | self.assertEqual(config.ssh_path, "/usr/bin/ssh") |
151 | 150 | self.assertEqual(config.ssh_options, "-oStrictHostKeyChecking=no") |
152 | 151 | f.close() |
153 | ||
152 | ||
154 | 153 | def testClushConfigError(self): |
155 | 154 | """test CLI.Config.ClushConfig (error)""" |
156 | 155 | |
215 | 214 | soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) |
216 | 215 | hard2 = min(32768, hard) |
217 | 216 | f = tempfile.NamedTemporaryFile(prefix='testclushconfig') |
218 | f.write(""" | |
219 | [Main] | |
220 | fanout: 42 | |
221 | connect_timeout: 14 | |
222 | command_timeout: 0 | |
223 | history_size: 100 | |
224 | color: auto | |
225 | fd_max: %d | |
226 | verbosity: 1 | |
227 | """ % hard2) | |
228 | ||
217 | f.write(dedent(""" | |
218 | [Main] | |
219 | fanout: 42 | |
220 | connect_timeout: 14 | |
221 | command_timeout: 0 | |
222 | history_size: 100 | |
223 | color: auto | |
224 | fd_max: %d | |
225 | verbosity: 1 | |
226 | """ % hard2)) | |
229 | 227 | f.flush() |
230 | 228 | parser = OptionParser("dummy") |
231 | 229 | parser.install_config_options() |
245 | 243 | soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) |
246 | 244 | self.assertEqual(soft, hard2) |
247 | 245 | f.close() |
248 | ||
246 | ||
247 | def testClushConfigSetRlimitValueError(self): | |
248 | """test CLI.Config.ClushConfig (setrlimit ValueError)""" | |
249 | soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) | |
250 | f = tempfile.NamedTemporaryFile(prefix='testclushconfig') | |
251 | f.write(dedent(""" | |
252 | [Main] | |
253 | fanout: 42 | |
254 | connect_timeout: 14 | |
255 | command_timeout: 0 | |
256 | history_size: 100 | |
257 | color: auto | |
258 | # Use wrong fd_max value to generate ValueError | |
259 | fd_max: -1 | |
260 | verbosity: 1""")) | |
261 | f.flush() | |
262 | parser = OptionParser("dummy") | |
263 | parser.install_config_options() | |
264 | parser.install_display_options(verbose_options=True) | |
265 | parser.install_connector_options() | |
266 | options, _ = parser.parse_args([]) | |
267 | config = ClushConfig(options, filename=f.name) | |
268 | f.close() | |
269 | display = Display(options, config) | |
270 | ||
271 | class TestException(Exception): pass | |
272 | ||
273 | def mock_vprint_err(level, message): | |
274 | if message.startswith('Warning: Failed to set max open files'): | |
275 | raise TestException() | |
276 | ||
277 | display.vprint_err = mock_vprint_err | |
278 | self.assertRaises(TestException, set_fdlimit, config.fd_max, display) | |
279 | ||
280 | soft2, _ = resource.getrlimit(resource.RLIMIT_NOFILE) | |
281 | self.assertEqual(soft, soft2) | |
282 | ||
249 | 283 | def testClushConfigDefaultWithOptions(self): |
250 | 284 | """test CLI.Config.ClushConfig (default with options)""" |
251 | 285 | |
252 | 286 | f = tempfile.NamedTemporaryFile(prefix='testclushconfig') |
253 | f.write(""" | |
254 | [Main] | |
255 | fanout: 42 | |
256 | connect_timeout: 14 | |
257 | command_timeout: 0 | |
258 | history_size: 100 | |
259 | color: auto | |
260 | verbosity: 1 | |
261 | #ssh_user: root | |
262 | #ssh_path: /usr/bin/ssh | |
263 | #ssh_options: -oStrictHostKeyChecking=no | |
264 | """) | |
265 | ||
287 | f.write(dedent(""" | |
288 | [Main] | |
289 | fanout: 42 | |
290 | connect_timeout: 14 | |
291 | command_timeout: 0 | |
292 | history_size: 100 | |
293 | color: auto | |
294 | verbosity: 1""")) | |
266 | 295 | f.flush() |
267 | 296 | parser = OptionParser("dummy") |
268 | 297 | parser.install_config_options() |
19 | 19 | from ClusterShell.NodeSet import NodeSet, set_std_group_resolver |
20 | 20 | |
21 | 21 | from ClusterShell.NodeUtils import GroupResolverConfig |
22 | ||
23 | 22 | |
24 | 23 | |
25 | 24 | def makeTestFile(text): |
28 | 28 | """Unit test class for testing CLI/Nodeset.py""" |
29 | 29 | |
30 | 30 | def _battery_count(self, args): |
31 | self._nodeset_t(args + ["--count", ""], None, "0\n") | |
31 | 32 | self._nodeset_t(args + ["--count", "foo"], None, "1\n") |
32 | 33 | self._nodeset_t(args + ["--count", "foo", "bar"], None, "2\n") |
33 | 34 | self._nodeset_t(args + ["--count", "foo", "foo"], None, "1\n") |
43 | 44 | self._nodeset_t(args + ["--count", "foo[395-442]", "foo", "foo[0-200,245-394]"], None, "400\n") |
44 | 45 | self._nodeset_t(args + ["--count", "foo[395-442]", "bar3,bar24", "foo[1-200,245-394]"], None, "400\n") |
45 | 46 | # from stdin |
47 | self._nodeset_t(args + ["--count"], "\n", "0\n") | |
46 | 48 | self._nodeset_t(args + ["--count"], "foo\n", "1\n") |
47 | 49 | self._nodeset_t(args + ["--count"], "foo\nbar\n", "2\n") |
48 | 50 | self._nodeset_t(args + ["--count"], "foo\nfoo\n", "1\n") |
96 | 98 | self._nodeset_t(["--count", "--intersection", "bar3,bar24", "-i", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n") |
97 | 99 | |
98 | 100 | def _battery_fold(self, args): |
101 | self._nodeset_t(args + ["--fold", ""], None, "\n") | |
99 | 102 | self._nodeset_t(args + ["--fold", "foo"], None, "foo\n") |
100 | 103 | self._nodeset_t(args + ["--fold", "foo", "bar"], None, "bar,foo\n") |
101 | 104 | self._nodeset_t(args + ["--fold", "foo", "foo"], None, "foo\n") |
111 | 114 | self._nodeset_t(args + ["--fold", "foo[395-442]", "foo", "foo[0-200,245-394]"], None, "foo,foo[0-200,245-442]\n") |
112 | 115 | self._nodeset_t(args + ["--fold", "foo[395-442]", "bar3,bar24", "foo[1-200,245-394]"], None, "bar[3,24],foo[1-200,245-442]\n") |
113 | 116 | # stdin |
117 | self._nodeset_t(args + ["--fold"], "\n", "\n") | |
114 | 118 | self._nodeset_t(args + ["--fold"], "foo\n", "foo\n") |
115 | 119 | self._nodeset_t(args + ["--fold"], "foo\nbar\n", "bar,foo\n") |
116 | 120 | self._nodeset_t(args + ["--fold"], "foo\nfoo\n", "foo\n") |
158 | 162 | |
159 | 163 | def test_006_expand(self): |
160 | 164 | """test nodeset --expand""" |
165 | self._nodeset_t(["--expand", ""], None, "\n") | |
161 | 166 | self._nodeset_t(["--expand", "foo"], None, "foo\n") |
162 | 167 | self._nodeset_t(["--expand", "foo", "bar"], None, "bar foo\n") |
163 | 168 | self._nodeset_t(["--expand", "foo", "foo"], None, "foo\n") |
169 | 174 | |
170 | 175 | def test_007_expand_stdin(self): |
171 | 176 | """test nodeset --expand (stdin)""" |
177 | self._nodeset_t(["--expand"], "\n", "\n") | |
172 | 178 | self._nodeset_t(["--expand"], "foo\n", "foo\n") |
173 | 179 | self._nodeset_t(["--expand"], "foo\nbar\n", "bar foo\n") |
174 | 180 | self._nodeset_t(["--expand"], "foo\nfoo\n", "foo\n") |
404 | 410 | self._nodeset_t(["--expand", "-O", "/path/%s/", "foo[2]"], None, "/path/foo2/\n") |
405 | 411 | self._nodeset_t(["--expand", "-O", "%s-ib0", "foo[1-4]"], None, "foo1-ib0 foo2-ib0 foo3-ib0 foo4-ib0\n") |
406 | 412 | self._nodeset_t(["--expand", "-O", "%s-ib0", "-S", ":", "foo[1-4]"], None, "foo1-ib0:foo2-ib0:foo3-ib0:foo4-ib0\n") |
413 | self._nodeset_t(["--fold", "-O", "%s-ipmi", "foo", "bar"], None, "bar-ipmi,foo-ipmi\n") | |
407 | 414 | self._nodeset_t(["--fold", "-O", "%s-ib0", "foo1", "foo2"], None, "foo[1-2]-ib0\n") |
415 | self._nodeset_t(["--fold", "-O", "%s-ib0", "foo1", "foo2", "bar1", "bar2"], None, "bar[1-2]-ib0,foo[1-2]-ib0\n") | |
416 | self._nodeset_t(["--fold", "-O", "%s-ib0", "--autostep=auto", "foo[1-9/2]"], None, "foo[1-9/2]-ib0\n") | |
417 | self._nodeset_t(["--fold", "-O", "%s-ib0", "--autostep=6", "foo[1-9/2]"], None, "foo[1,3,5,7,9]-ib0\n") | |
418 | self._nodeset_t(["--fold", "-O", "%s-ib0", "--autostep=5", "foo[1-9/2]"], None, "foo[1-9/2]-ib0\n") | |
408 | 419 | self._nodeset_t(["--count", "-O", "result-%s", "foo1", "foo2"], None, "result-2\n") |
409 | 420 | self._nodeset_t(["--contiguous", "-O", "%s-ipmi", "-f", "foo[2-3,7]", "bar9"], None, "bar9-ipmi\nfoo[2-3]-ipmi\nfoo7-ipmi\n") |
410 | 421 | self._nodeset_t(["--split=2", "-O", "%s-ib", "-e", "foo[2-9]"], None, "foo2-ib foo3-ib foo4-ib foo5-ib\nfoo6-ib foo7-ib foo8-ib foo9-ib\n") |
523 | 534 | |
524 | 535 | self._nodeset_t(["--axis=2","-f"], '\n'.join(ndnodes) + '\n', |
525 | 536 | ','.join(exp_result) + '\n') |
537 | ||
538 | def test_025_pick(self): | |
539 | """test nodeset --pick""" | |
540 | for num in range(1, 100): | |
541 | self._nodeset_t(["--count", "--pick", str(num), "foo[1-100]"], | |
542 | None, "%s\n" % num) | |
543 | self._nodeset_t(["--count", "--pick", str(num), "-R", "1-100"], | |
544 | None, "%s\n" % num) | |
545 | ||
526 | 546 | |
527 | 547 | |
528 | 548 | class CLINodesetGroupResolverTest1(CLINodesetTestBase): |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell.CLI.OptionParser test suite |
2 | # Written by S. Thiell 2010-09-25 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for CLI.OptionParser""" |
50 | 50 | parser.install_nodes_options() |
51 | 51 | parser.install_display_options(separator_option=True, dshbak_compat=True) |
52 | 52 | options, _ = parser.parse_args([]) |
53 | ||
54 | ||
55 | if __name__ == '__main__': | |
56 | suites = [unittest.TestLoader().loadTestsFromTestCase(CLIOptionParserTest)] | |
57 | unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(suites)) |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell test suite |
2 | # Written by S. Thiell 2010-02-19 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for ClusterShell common library misusages""" |
50 | 50 | worker = task.shell("/bin/echo itsme") |
51 | 51 | self.assertRaises(WorkerError, task.schedule, worker) |
52 | 52 | task.abort() |
53 | ||
54 | ||
55 | if __name__ == '__main__': | |
56 | suite = unittest.TestLoader().loadTestsFromTestCase(MisusageTest) | |
57 | unittest.TextTestRunner(verbosity=2).run(suite) | |
58 |
84 | 84 | cnt += 1 |
85 | 85 | self.assertEqual(cnt, 5) |
86 | 86 | self.assertEqual(len(list(iter(tree.items()))), 5) |
87 | ||
87 | ||
88 | 88 | # test walk() iterator (iterate by msg and give the list of |
89 | 89 | # associated keys) |
90 | 90 | cnt = 0 |
287 | 287 | [(['item1'], 'message0\nmessage3\nmessage5'), |
288 | 288 | (['item3'], 'message2'), |
289 | 289 | (['item2'], 'message6')]) |
290 |
32 | 32 | |
33 | 33 | def testBadRangeUsages(self): |
34 | 34 | """test NodeSet parse errors in range""" |
35 | self._testNS("", NodeSetParseError) | |
36 | 35 | self._testNS("nova[]", NodeSetParseRangeError) |
37 | 36 | self._testNS("nova[-]", NodeSetParseRangeError) |
38 | 37 | self._testNS("nova[A]", NodeSetParseRangeError) |
109 | 108 | self._testNS("x[1-30]p4-9]", NodeSetParseError) |
110 | 109 | self._testNS("xazer][1-30]p[4-9]", NodeSetParseError) |
111 | 110 | self._testNS("xa[[zer[1-30]p[4-9]", NodeSetParseRangeError) |
111 | # entirely numeric hostnames are not allowed | |
112 | self._testNS("[0-10]", NodeSetParseError) | |
113 | self._testNS("0[0-10]", NodeSetParseError) | |
114 | self._testNS("[0-10]0", NodeSetParseError) | |
115 | self._testNS("0[0-10]0", NodeSetParseError) | |
112 | 116 | |
113 | 117 | def testTypeSanityCheck(self): |
114 | 118 | """test NodeSet input type sanity check""" |
190 | 190 | """test NodeSet.fromall() with no resolver""" |
191 | 191 | self.assertRaises(NodeSetExternalError, NodeSet.fromall, |
192 | 192 | resolver=RESOLVER_NOGROUP) |
193 | ||
194 | # Also test with a nonfunctional resolver (#263) | |
195 | res = GroupResolver() | |
196 | self.assertRaises(NodeSetExternalError, NodeSet.fromall, resolver=res) | |
193 | 197 | |
194 | 198 | def testGroupsNoResolver(self): |
195 | 199 | """test NodeSet.groups() with no resolver""" |
23 | 23 | self.assertEqual(str(nodeset), nodename) |
24 | 24 | self.assertEqual(list(nodeset), [nodename]) |
25 | 25 | self.assertEqual(len(nodeset), 1) |
26 | ||
27 | def testEmptyNode(self): | |
28 | """test NodeSet with empty node""" | |
29 | # empty strings and any strip()able chars are OK | |
30 | for arg in (None, " ", "\n", "\t", " " * 100): | |
31 | nodeset = NodeSet(arg) | |
32 | self.assertEqual(str(nodeset), "") | |
33 | self.assertEqual(len(nodeset), 0) | |
26 | 34 | |
27 | 35 | def testUnnumberedNode(self): |
28 | 36 | """test NodeSet with unnumbered node""" |
363 | 371 | self.assertEqual(str(nodeset), "x[010-011]y[010-011]z[010-011]-ipmi") |
364 | 372 | self.assertEqual(len(nodeset), 2*2*2) |
365 | 373 | |
374 | # #284 - hostname labels starting with digits (RFC 1123) | |
375 | nodeset = NodeSet("0[3-9/2]abc") | |
376 | self.assertEqual(str(nodeset), "[03,05,07,09]abc") | |
377 | nodeset = NodeSet("0[3-9]abc") | |
378 | self.assertEqual(str(nodeset), "[03-09]abc") | |
379 | nodeset = NodeSet("[3,5,7,9]0abc") | |
380 | self.assertEqual(str(nodeset), "[30,50,70,90]abc") | |
381 | nodeset = NodeSet("[3-9]0abc") | |
382 | self.assertEqual(str(nodeset), "[30,40,50,60,70,80,90]abc") | |
383 | nodeset = NodeSet("3abc0[1]0") | |
384 | self.assertEqual(str(nodeset), "3abc010") | |
385 | nodeset = NodeSet("3abc16[1-4]56d") | |
386 | self.assertEqual(str(nodeset), "3abc[16156,16256,16356,16456]d") | |
387 | nodeset = NodeSet("0[3,6,9]1abc16[1-4]56d") | |
388 | self.assertEqual(str(nodeset), "[031,061,091]abc[16156,16256,16356,16456]d") | |
389 | nodeset = NodeSet("0123[0-100]L6") | |
390 | self.assertEqual(str(nodeset), "[01230-123100]L6") | |
391 | nodeset = NodeSet("0123[000-100]L6") | |
392 | self.assertEqual(str(nodeset), "[0123000-0123100]L6") | |
393 | ||
366 | 394 | def testCommaSeparated(self): |
367 | 395 | """test NodeSet comma separated to ranges (folding)""" |
368 | 396 | nodeset = NodeSet("cluster115,cluster116,cluster117,cluster130," |
419 | 447 | def testStringUpdatesFromEmptyNodeSet(self): |
420 | 448 | """test NodeSet string-based NodeSet.update() from empty nodeset""" |
421 | 449 | nodeset = NodeSet() |
450 | self.assertEqual(str(nodeset), "") | |
451 | nodeset.update("") | |
452 | self.assertEqual(str(nodeset), "") | |
453 | nodeset.update(" ") | |
422 | 454 | self.assertEqual(str(nodeset), "") |
423 | 455 | nodeset.update("cluster115") |
424 | 456 | self.assertEqual(str(nodeset), "cluster115") |
730 | 762 | # result |
731 | 763 | self.assertEqual(str(inodeset), "green,red[342-403]") |
732 | 764 | self.assertEqual(len(inodeset), 63) |
765 | ||
766 | inodeset = nodeset.difference("") | |
767 | self.assertEqual(str(inodeset), str(nodeset)) | |
768 | self.assertEqual(inodeset, nodeset) | |
733 | 769 | |
734 | 770 | def testDifferenceUpdate(self): |
735 | 771 | """test NodeSet.difference_update()""" |
1401 | 1437 | 'roma58-ipmi', 'roma59-ipmi', |
1402 | 1438 | 'roma60-ipmi', 'roma61-ipmi', 'tigrou3']) |
1403 | 1439 | self.assertEqual(list(ns1), [str(ns) for ns in ns1.nsiter()]) |
1440 | # Ticket #286 - broken nsiter() in 1.7 with nD + 0-padding | |
1441 | ns1 = NodeSet("n0c01") | |
1442 | self.assertEqual(list(ns1), ['n0c01']) | |
1443 | self.assertEqual(list(ns1), [str(ns) for ns in ns1.nsiter()]) | |
1444 | ns1 = NodeSet("n0c01,n1c01") | |
1445 | self.assertEqual(list(ns1), ['n0c01', 'n1c01']) | |
1446 | self.assertEqual(list(ns1), [str(ns) for ns in ns1.nsiter()]) | |
1404 | 1447 | |
1405 | 1448 | def test_contiguous(self): |
1406 | 1449 | """test NodeSet.contiguous() iterator""" |
2499 | 2542 | self._assertNode(nodeset, "node1") |
2500 | 2543 | # not sure about that, can it work if PYTHONIOENCODING is set? |
2501 | 2544 | self.assertRaises(UnicodeEncodeError, NodeSet, u"\u0ad0[000-042]") |
2545 | ||
2546 | def test_nd_fold_padding(self): | |
2547 | """test NodeSet nD heuristic folding with padding""" | |
2548 | # Ticket #286 - not broken in 1.7 | |
2549 | n1 = NodeSet("n1c01,n1c02,n1c03,n1c04,n1c05,n1c06,n1c07,n1c08,n1c09,n2c01,n2c02,n2c03,n2c04,n2c05,n2c06,n2c07,n2c08,n2c09,n3c01,n3c02,n3c03,n3c04,n3c05,n3c06,n3c07,n3c08,n3c09,n4c01,n4c02,n4c03,n4c04,n4c05,n4c06,n4c07") | |
2550 | self.assertEqual(str(n1), "n[1-3]c[01-09],n4c[01-07]") | |
2551 | self.assertEqual(len(n1), 34) | |
2552 | # Ticket #286 - broken in 1.7 - trigger RangeSetND._fold_multivariate_expand full expand | |
2553 | n1 = NodeSet("n1c01,n1c02,n1c03,n1c04,n1c05,n1c06,n1c07,n1c08,n1c09,n2c01,n2c02,n2c03,n2c04,n2c05,n2c06,n2c07,n2c08,n2c09,n3c01,n3c02,n3c03,n3c04,n3c05,n3c06,n3c07,n3c08,n3c09,n4c01,n4c02,n4c03,n4c04,n4c05,n4c06,n4c07,n4c08,n4c09") | |
2554 | self.assertEqual(str(n1), "n[1-4]c[01-09]") | |
2555 | self.assertEqual(len(n1), 36) |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell.NodeSet.RangeSet error handling test suite |
2 | # Written by S. Thiell 2008-09-28 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for RangeSet errors""" |
26 | 26 | except: |
27 | 27 | raise |
28 | 28 | self.assert_(0, "error not detected/no exception raised") |
29 | ||
30 | 29 | |
31 | 30 | def testBadUsages(self): |
32 | 31 | """test parse errors""" |
44 | 43 | self._testRS("4-2/-2", RangeSetParseError) |
45 | 44 | self._testRS("004-002", RangeSetParseError) |
46 | 45 | self._testRS("3-59/2,102a", RangeSetParseError) |
47 | ||
48 | ||
49 | ||
50 | ||
51 | if __name__ == '__main__': | |
52 | suite = unittest.TestLoader().loadTestsFromTestCase(RangeSetErrorTest) | |
53 | unittest.TextTestRunner(verbosity=2).run(suite) |
458 | 458 | self.assertEqual(str(rn1), "02-06; 006-009,411\n01-02; 003-004\n") |
459 | 459 | self.assertEqual(len(rn1), 29) |
460 | 460 | self.assertEqual(rn1.pads(), (2, 3)) |
461 | # Note: padding mismatch is NOT supported by ClusterShell | |
462 | # We just track any regressions here (MAY CHANGE!) | |
463 | rn1 = RangeSetND([['01-02', '003'], ['01-02', '0101'], ['02-06', '006-009,411']]) | |
464 | # here 0101 padding is changed to 101 | |
465 | self.assertEqual(str(rn1), '02-06; 006-009,411\n01-02; 003,101\n') | |
466 | self.assertEqual(len(rn1), 29) | |
467 | self.assertEqual(rn1.pads(), (2, 3)) | |
468 | rn1 = RangeSetND([['01-02', '0003'], ['01-02', '004'], ['02-06', '006-009,411']]) | |
469 | # here 004 padding is changed to 0004 | |
470 | self.assertEqual(str(rn1), '02-06; 006-009,411\n01-02; 0003-0004\n') | |
471 | self.assertEqual(len(rn1), 29) | |
472 | self.assertEqual(rn1.pads(), (2, 4)) # pads() returns max padding length by axis | |
461 | 473 | |
462 | 474 | def test_mutability_1(self): |
463 | 475 | rs0 = RangeSet("2-5") |
252 | 252 | r1.difference_update(r2) |
253 | 253 | self.assertEqual(str(r1), "16-34/2") |
254 | 254 | self.assertEqual(len(r1), 10) |
255 | ||
255 | ||
256 | 256 | # case 3 diff right |
257 | 257 | r1 = RangeSet("4-34/2", autostep=3) |
258 | 258 | r2 = RangeSet("28-52/2", autostep=3) |
259 | 259 | r1.difference_update(r2) |
260 | 260 | self.assertEqual(str(r1), "4-26/2") |
261 | 261 | self.assertEqual(len(r1), 12) |
262 | ||
262 | ||
263 | 263 | # case 4 diff with ranges split |
264 | 264 | r1 = RangeSet("4-34/2", autostep=3) |
265 | 265 | r2 = RangeSet("12-18/2", autostep=3) |
661 | 661 | r1.clear() |
662 | 662 | self.assertEqual(len(r1), 0) |
663 | 663 | self.assertEqual(str(r1), "") |
664 | ||
664 | ||
665 | 665 | def testConstructorIterate(self): |
666 | 666 | """test RangeSet(iterable) constructor""" |
667 | 667 | # from list |
1036 | 1036 | self.assertEqual(str(r1), "112,114-117,119,121,130,132,134,136,138-141,144,147-148") |
1037 | 1037 | r1.autostep = 5 |
1038 | 1038 | self.assertEqual(str(r1), "112,114-117,119,121,130-138/2,139-141,144,147-148") |
1039 | ||
1039 | ||
1040 | 1040 | r1 = RangeSet("1,3-4,6,8") |
1041 | 1041 | self.assertEqual(str(r1), "1,3-4,6,8") |
1042 | 1042 | r1 = RangeSet("1,3-4,6,8", autostep=4) |
1093 | 1093 | self.assertEqual(r0.dim(), 0) |
1094 | 1094 | r1 = RangeSet("1-10,15-20") |
1095 | 1095 | self.assertEqual(r1.dim(), 1) |
1096 | ||
1097 | ||
1098 | if __name__ == '__main__': | |
1099 | suite = unittest.TestLoader().loadTestsFromTestCase(RangeSetTest) | |
1100 | unittest.TextTestRunner(verbosity=2).run(suite) | |
1101 |
199 | 199 | self.assertRaises(OSError, os.close, rfd1) |
200 | 200 | os.close(wfd1) |
201 | 201 | |
202 | def test_006_worker_abort_on_written(self): | |
203 | """test StreamWorker abort on ev_written""" | |
204 | ||
205 | # This test creates a writable StreamWorker that will abort after the | |
206 | # first write, to check whether ev_written is generated in the right | |
207 | # place. | |
208 | ||
209 | class TestH(EventHandler): | |
210 | def __init__(self, testcase, rfd): | |
211 | self.testcase = testcase | |
212 | self.rfd = rfd | |
213 | self.check_written = 0 | |
214 | ||
215 | def ev_written(self, worker, node, sname, size): | |
216 | self.check_written += 1 | |
217 | self.testcase.assertEqual(os.read(self.rfd, 1024), "initial") | |
218 | worker.abort() | |
219 | ||
220 | rfd, wfd = os.pipe() | |
221 | ||
222 | hdlr = TestH(self, rfd) | |
223 | worker = StreamWorker(handler=hdlr) | |
224 | ||
225 | worker.set_writer("test", wfd) # closefd=True | |
226 | worker.write("initial", "test") | |
227 | ||
228 | self.run_worker(worker) | |
229 | self.assertEqual(hdlr.check_written, 1) | |
230 | os.close(rfd) | |
231 | ||
232 | def test_007_worker_abort_on_written_eof(self): | |
233 | """test StreamWorker abort on ev_written (with EOF)""" | |
234 | ||
235 | # This test is similar to previous test test_006 but does | |
236 | # write() + set_write_eof(). | |
237 | ||
238 | class TestH(EventHandler): | |
239 | def __init__(self, testcase, rfd): | |
240 | self.testcase = testcase | |
241 | self.rfd = rfd | |
242 | self.check_written = 0 | |
243 | ||
244 | def ev_written(self, worker, node, sname, size): | |
245 | self.check_written += 1 | |
246 | self.testcase.assertEqual(os.read(self.rfd, 1024), "initial") | |
247 | worker.abort() | |
248 | ||
249 | rfd, wfd = os.pipe() | |
250 | ||
251 | hdlr = TestH(self, rfd) | |
252 | worker = StreamWorker(handler=hdlr) | |
253 | ||
254 | worker.set_writer("test", wfd) # closefd=True | |
255 | worker.write("initial", "test") | |
256 | worker.set_write_eof() | |
257 | ||
258 | self.run_worker(worker) | |
259 | self.assertEqual(hdlr.check_written, 1) | |
260 | os.close(rfd) | |
261 | ||
262 | def test_008_broken_pipe_on_write(self): | |
263 | """test StreamWorker with broken pipe on write()""" | |
264 | ||
265 | # This test creates a writable StreamWorker that will close the read | |
266 | # side of the pipe just after the first write to generate a broken | |
267 | # pipe error. | |
268 | ||
269 | class TestH(EventHandler): | |
270 | def __init__(self, testcase, rfd): | |
271 | self.testcase = testcase | |
272 | self.rfd = rfd | |
273 | self.check_hup = 0 | |
274 | self.check_written = 0 | |
275 | ||
276 | def ev_hup(self, worker): | |
277 | self.check_hup += 1 | |
278 | ||
279 | def ev_written(self, worker, node, sname, size): | |
280 | self.check_written += 1 | |
281 | self.testcase.assertEqual(os.read(self.rfd, 1024), "initial") | |
282 | # close reader, that will stop the StreamWorker | |
283 | os.close(self.rfd) | |
284 | # The following write call used to raise broken pipe before | |
285 | # version 1.7.2. | |
286 | worker.write("final") | |
287 | ||
288 | rfd, wfd = os.pipe() | |
289 | ||
290 | hdlr = TestH(self, rfd) | |
291 | worker = StreamWorker(handler=hdlr) | |
292 | ||
293 | worker.set_writer("test", wfd) # closefd=True | |
294 | worker.write("initial", "test") | |
295 | ||
296 | self.run_worker(worker) | |
297 | self.assertEqual(hdlr.check_hup, 1) | |
298 | self.assertEqual(hdlr.check_written, 1) |
5 | 5 | """Unit test for ClusterShell Task (event-based mode)""" |
6 | 6 | |
7 | 7 | import copy |
8 | import socket | |
8 | 9 | import sys |
10 | import thread | |
9 | 11 | import unittest |
10 | 12 | |
11 | 13 | sys.path.insert(0, '../lib') |
15 | 17 | from ClusterShell.NodeSet import NodeSet |
16 | 18 | from ClusterShell.Task import * |
17 | 19 | from ClusterShell.Event import EventHandler |
18 | ||
19 | import socket | |
20 | import thread | |
21 | 20 | |
22 | 21 | |
23 | 22 | class TestHandler(EventHandler): |
149 | 148 | |
150 | 149 | eh.do_asserts_timeout() |
151 | 150 | |
151 | def test_popen_specific_behaviour(self): | |
152 | """test current_node is None for WorkerPopen events""" | |
153 | ||
154 | class WorkerPopenEH(TestHandler): | |
155 | def __init__(self, testcase): | |
156 | TestHandler.__init__(self) | |
157 | self.testcase = testcase | |
158 | ||
159 | def ev_start(self, worker): | |
160 | TestHandler.ev_start(self, worker) | |
161 | self.testcase.assertEqual(worker.current_node, None) | |
162 | ||
163 | def ev_read(self, worker): | |
164 | TestHandler.ev_read(self, worker) | |
165 | self.testcase.assertEqual(worker.current_node, None) | |
166 | ||
167 | def ev_error(self, worker): | |
168 | TestHandler.ev_error(self, worker) | |
169 | self.testcase.assertEqual(worker.current_node, None) | |
170 | ||
171 | def ev_written(self, worker, node, sname, size): | |
172 | TestHandler.ev_written(self, worker, node, sname, size) | |
173 | self.testcase.assertEqual(worker.current_node, None) | |
174 | ||
175 | def ev_pickup(self, worker): | |
176 | TestHandler.ev_pickup(self, worker) | |
177 | self.testcase.assertEqual(worker.current_node, None) | |
178 | ||
179 | def ev_hup(self, worker): | |
180 | TestHandler.ev_hup(self, worker) | |
181 | self.testcase.assertEqual(worker.current_node, None) | |
182 | ||
183 | def ev_close(self, worker): | |
184 | TestHandler.ev_close(self, worker) | |
185 | self.testcase.assertEqual(worker.current_node, None) | |
186 | ||
187 | task = task_self() | |
188 | eh = WorkerPopenEH(self) | |
189 | ||
190 | worker = task.shell("cat", handler=eh) | |
191 | content = "abcdefghijklmnopqrstuvwxyz\n" | |
192 | worker.write(content) | |
193 | worker.set_write_eof() | |
194 | ||
195 | self.assertNotEqual(worker, None) | |
196 | task.run() | |
197 | eh.do_asserts_read_write_notimeout() | |
198 | ||
152 | 199 | class TInFlyAdder(EventHandler): |
153 | 200 | """Test handler that schedules new commands in-fly""" |
154 | 201 | def ev_read(self, worker): |
190 | 237 | eh = AbortOnReadHandler() |
191 | 238 | for i in range(10): |
192 | 239 | worker = task.shell("echo ok; sleep 1", handler=eh) |
240 | self.assert_(worker is not None) | |
193 | 241 | worker.write("OK\n") |
194 | self.assert_(worker is not None) | |
195 | 242 | task.resume() |
196 | 243 | finally: |
197 | 244 | task.set_info("fanout", fanout) |
143 | 143 | # only stderr should have been buffered at task level |
144 | 144 | self.assertEqual(len(list(task.iter_buffers())), 0) |
145 | 145 | self.assertEqual(len(list(task.iter_errors())), 1) |
146 |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell test suite |
2 | # Written by S. Thiell 2009-12-19 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for ClusterShell inter-Task msg""" |
7 | 7 | import pickle |
8 | 8 | import sys |
9 | 9 | import threading |
10 | import time | |
10 | 11 | import unittest |
11 | 12 | |
12 | 13 | sys.path.insert(0, '../lib') |
22 | 23 | |
23 | 24 | def testPortMsg1(self): |
24 | 25 | """test port msg from main thread to task""" |
25 | ||
26 | ||
26 | 27 | TaskPortTest.got_msg = False |
27 | 28 | |
28 | 29 | # create task in new thread |
45 | 46 | self.assert_(TaskPortTest.got_msg) |
46 | 47 | |
47 | 48 | def testPortRemove(self): |
48 | """test port remove [private as of 1.2]""" | |
49 | ||
50 | task = Task() | |
49 | """test remove_port()""" | |
51 | 50 | |
52 | 51 | class PortHandler(EventHandler): |
53 | 52 | def ev_msg(self, port, msg): |
54 | 53 | pass |
55 | 54 | |
55 | task = Task() # new thread | |
56 | 56 | port = task.port(handler=PortHandler(), autoclose=True) |
57 | 57 | task.resume() |
58 | task._remove_port(port) | |
58 | task.remove_port(port) | |
59 | 59 | task_wait() |
60 | 60 | |
61 | def testPortClosed(self): | |
62 | """test port msg on closed port""" | |
63 | # test sending message to "stillborn" port | |
64 | self.port_msg_result = None | |
61 | 65 | |
62 | if __name__ == '__main__': | |
63 | suite = unittest.TestLoader().loadTestsFromTestCase(TaskPortTest) | |
64 | unittest.TextTestRunner(verbosity=2).run(suite) | |
66 | # thread will wait a bit and send a port message | |
67 | def test_thread_start(port, test): | |
68 | time.sleep(0.5) | |
69 | test.port_msg_result = port.msg('foobar') | |
65 | 70 | |
71 | class TestHandler(EventHandler): | |
72 | pass | |
73 | ||
74 | task = task_self() | |
75 | test_handler = TestHandler() | |
76 | task.timer(0.2, handler=test_handler, autoclose=False) | |
77 | port = task.port(handler=test_handler, autoclose=True) | |
78 | thread = threading.Thread(None, test_thread_start, args=(port, self)) | |
79 | thread.setDaemon(True) | |
80 | thread.start() | |
81 | task.resume() | |
82 | task.abort(kill=True) # will remove_port() | |
83 | thread.join() | |
84 | self.assertEqual(self.port_msg_result, False) # test vs. None and True |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell task resource consumption/limits test suite |
2 | # Written by S. Thiell 2010-10-19 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for ClusterShell Task (resource limits)""" |
85 | 85 | def testRemotePdshStderr(self): |
86 | 86 | """test resource usage with WorkerPdsh(stderr=True)""" |
87 | 87 | self._testRemotePdsh(True) |
88 | ||
89 | if __name__ == '__main__': | |
90 | suite = unittest.TestLoader().loadTestsFromTestCase(TaskRLimitsTest) | |
91 | unittest.TextTestRunner(verbosity=2).run(suite) | |
92 |
120 | 120 | time.sleep(1) # for pretty display, because unhandled exception |
121 | 121 | # traceback may be sent to stderr after the join() |
122 | 122 | self.assertFalse(task.running()) |
123 |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell test suite |
2 | # Written by S. Thiell 2010-01-16 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for ClusterShell in multithreaded environments""" |
74 | 74 | time.sleep(1) |
75 | 75 | task_wait() |
76 | 76 | self.assert_(self.resumed or suspended == False) |
77 | ||
78 | ||
79 | if __name__ == '__main__': | |
80 | suite = unittest.TestLoader().loadTestsFromTestCase(TaskThreadSuspendTest) | |
81 | unittest.TextTestRunner(verbosity=2).run(suite) | |
82 |
0 | 0 | #!/usr/bin/env python |
1 | 1 | # ClusterShell (local) test suite |
2 | # Written by S. Thiell 2009-02-09 | |
2 | # Written by S. Thiell | |
3 | 3 | |
4 | 4 | |
5 | 5 | """Unit test for ClusterShell Task/Worker timeout support""" |
6 | 6 | |
7 | 7 | import copy |
8 | import socket | |
8 | 9 | import sys |
10 | import thread | |
9 | 11 | import unittest |
10 | 12 | |
11 | 13 | sys.path.insert(0, '../lib') |
15 | 17 | from ClusterShell.NodeSet import NodeSet |
16 | 18 | from ClusterShell.Task import * |
17 | 19 | |
18 | import socket | |
19 | ||
20 | import thread | |
21 | ||
22 | 20 | |
23 | 21 | class TaskTimeoutTest(unittest.TestCase): |
24 | ||
22 | ||
25 | 23 | def testWorkersTimeoutBuffers(self): |
26 | 24 | """test worker buffers with timeout""" |
27 | 25 | task = task_self() |
39 | 37 | self.assertEqual(buf, """some buffer |
40 | 38 | here...""") |
41 | 39 | self.assertEqual(test, 0, "task.iter_buffers() did not work") |
42 | ||
43 | ||
44 | ||
45 | ||
46 | if __name__ == '__main__': | |
47 | suite = unittest.TestLoader().loadTestsFromTestCase(TaskTimeoutTest) | |
48 | unittest.TextTestRunner(verbosity=2).run(suite) | |
49 |
85 | 85 | class TRepeaterTimerChecker(EventHandler): |
86 | 86 | def __init__(self): |
87 | 87 | self.count = 0 |
88 | ||
88 | ||
89 | 89 | def ev_timer(self, timer): |
90 | 90 | self.count += 1 |
91 | 91 | timer.set_nextfire(0.2) |
280 | 280 | task.resume() |
281 | 281 | # test timer did fire one time |
282 | 282 | self.assertEqual(test_eh.timer_count, 5) |
283 | ||
283 | ||
284 | 284 | class TEventHandlerTimerOtherInvalidate(EventHandler): |
285 | 285 | """timer operations event handler simulator""" |
286 | 286 | def __init__(self, test): |
414 | 414 | # run task |
415 | 415 | task.resume() |
416 | 416 | self.assertEqual(test_handler.count, 0) |
417 | ||
417 | ||
418 | 418 | def testAutocloseWithTwoTimers(self): |
419 | 419 | """test timer autoclose (two timers)""" |
420 | 420 | task = task_self() |
11 | 11 | import ClusterShell.Task |
12 | 12 | from ClusterShell.Worker.Tree import WorkerTree |
13 | 13 | |
14 | from TLib import HOSTNAME, make_temp_file | |
14 | from TLib import HOSTNAME, make_temp_dir, make_temp_file | |
15 | 15 | |
16 | 16 | # live logging with nosetests --nologcapture |
17 | 17 | logging.basicConfig(level=logging.DEBUG) |
22 | 22 | |
23 | 23 | TEST_INST = None |
24 | 24 | |
25 | def _copy_remote(self, source, dest, targets, gateway, timeout): | |
25 | def _copy_remote(self, source, dest, targets, gateway, timeout, reverse): | |
26 | 26 | """run a remote copy in tree mode (using gateway)""" |
27 | self.TEST_INST.assertEqual(source, self.TEST_INST.tfile.name) | |
28 | # check that dest is our tfile.name dirname | |
29 | self.TEST_INST.assertEqual(dest, dirname(self.TEST_INST.tfile.name)) | |
30 | self.TEST_INST.assertEqual(targets, NodeSet("n60")) | |
27 | if reverse: | |
28 | self.TEST_INST.assertEqual(source, self.TEST_INST.tfile.name) | |
29 | self.TEST_INST.assertEqual(dest, self.TEST_INST.tdir) | |
30 | self.TEST_INST.assertEqual(targets, NodeSet("n60")) | |
31 | else: | |
32 | self.TEST_INST.assertEqual(source, self.TEST_INST.tfile.name) | |
33 | # check that dest is our tfile.name dirname | |
34 | self.TEST_INST.assertEqual(dest, dirname(self.TEST_INST.tfile.name)) | |
35 | self.TEST_INST.assertEqual(targets, NodeSet("n60")) | |
31 | 36 | self.TEST_INST.test_ok = True |
32 | 37 | |
33 | 38 | def write(self, buf): |
68 | 73 | task_self().resume() |
69 | 74 | self.assertTrue(self.test_ok) |
70 | 75 | |
76 | def test_rcopy(self): | |
77 | """test file rcopy setup in tree mode (1 gateway)""" | |
78 | self.test_ok = False | |
79 | self.tfile = make_temp_file("dummy-src") | |
80 | self.tdir = make_temp_dir() | |
81 | task_self().rcopy(self.tfile.name, self.tdir, "n60") | |
82 | task_self().resume() | |
83 | self.assertTrue(self.test_ok) |
313 | 313 | """test gateway channel message badly encoded payload""" |
314 | 314 | self._check_channel_err( |
315 | 315 | '<message msgid="14" type="CFG" gateway="n1">bar</message>', |
316 | 'Incorrect padding') | |
316 | 'Message CFG has an invalid payload') | |
317 | 317 | |
318 | 318 | def test_channel_basic_abort(self): |
319 | 319 | """test gateway channel aborted while opened""" |
189 | 189 | |
190 | 190 | ns_all = NodeSet('admin2,nodes[2-3,20-29]') |
191 | 191 | ns_tree = NodeSet() |
192 | for nodegroup in parser.tree('admin2'): | |
192 | tree = parser.tree('admin2') | |
193 | self.assertEqual(tree.inner_node_count(), 3) | |
194 | self.assertEqual(tree.leaf_node_count(), 10) | |
195 | for nodegroup in tree: | |
193 | 196 | ns_tree.add(nodegroup.nodeset) |
194 | 197 | self.assertEqual(str(ns_all), str(ns_tree)) |
195 | 198 | |
303 | 306 | |
304 | 307 | ns_all = NodeSet('admin,proxy,STA[0-1],STB[0-3],nodes[0-10]') |
305 | 308 | ns_tree = NodeSet() |
306 | for nodegroup in parser.tree('admin'): | |
309 | tree = parser.tree('admin') | |
310 | self.assertEqual(tree.inner_node_count(), 8) | |
311 | self.assertEqual(tree.leaf_node_count(), 11) | |
312 | for nodegroup in tree: | |
307 | 313 | ns_tree.add(nodegroup.nodeset) |
308 | 314 | self.assertEqual(str(ns_all), str(ns_tree)) |
309 | 315 | |
324 | 330 | |
325 | 331 | ns_all = NodeSet('admin,nodes[0-159]') |
326 | 332 | ns_tree = NodeSet() |
327 | for nodegroup in parser.tree('admin'): | |
333 | tree = parser.tree('admin') | |
334 | self.assertEqual(tree.inner_node_count(), 151) | |
335 | self.assertEqual(tree.leaf_node_count(), 10) | |
336 | for nodegroup in tree: | |
328 | 337 | ns_tree.add(nodegroup.nodeset) |
329 | 338 | self.assertEqual(str(ns_all), str(ns_tree)) |
330 | 339 | |
342 | 351 | |
343 | 352 | ns_all = NodeSet('admin,ST[0-4],STA[0-49],nodes[0-10000]') |
344 | 353 | ns_tree = NodeSet() |
345 | for nodegroup in parser.tree('admin'): | |
354 | tree = parser.tree('admin') | |
355 | self.assertEqual(tree.inner_node_count(), 56) | |
356 | self.assertEqual(tree.leaf_node_count(), 10001) | |
357 | for nodegroup in tree: | |
346 | 358 | ns_tree.add(nodegroup.nodeset) |
347 | 359 | self.assertEqual(str(ns_all), str(ns_tree)) |
348 | 360 | |
439 | 451 | g = TopologyGraph() |
440 | 452 | # XXX: Actually if g is not empty other things will be printed out... |
441 | 453 | self.assertEquals(str(g), '<TopologyGraph>\n') |
442 |
0 | #!/usr/bin/python | |
1 | ||
2 | import timeit | |
3 | ||
4 | setup = ''' | |
5 | import random | |
6 | ||
7 | from ClusterShell.NodeSet import NodeSet | |
8 | ||
9 | a = list(NodeSet("node[0000-1000]")) | |
10 | ||
11 | random.shuffle(a) | |
12 | ''' | |
13 | ||
14 | print min(timeit.Timer('ns=NodeSet.fromlist(a)', setup=setup).repeat(3, 100)) | |
15 | print min(timeit.Timer('ns=NodeSet._fromlist1(a)', setup=setup).repeat(3, 100)) |