Import upstream version 1.1.7+git20210228.1.9fad18d
Debian Janitor
2 years ago
0 | MANIFEST | |
1 | build | |
2 | dist | |
3 | *.log | |
4 | *.pyc | |
5 | .idea | |
6 | *.iml | |
7 | .venv | |
8 | .coverage | |
9 | coverage.xml | |
10 | .tox | |
11 | conf/*.conf | |
12 | storage | |
13 | _trial_temp | |
14 | htmlcov | |
15 | *.swp | |
16 | .eggs/ | |
17 | *.egg-info/ | |
18 | lib/twisted/plugins/dropin.cache |
0 | # http://travis-ci.org/#!/graphite-project/carbon | |
1 | dist: xenial | |
2 | language: python | |
3 | python: 2.7 | |
4 | ||
5 | matrix: | |
6 | include: | |
7 | - python: pypy | |
8 | env: | |
9 | - TOXENV=pypy | |
10 | - python: 3.5 | |
11 | env: | |
12 | - TOXENV=py35 | |
13 | - python: 3.6 | |
14 | env: | |
15 | - TOXENV=py36 | |
16 | - python: 3.7 | |
17 | env: | |
18 | - TOXENV=py37 | |
19 | - python: 3.8 | |
20 | env: | |
21 | - TOXENV=py38-pyhash | |
22 | - python: 3.8 | |
23 | env: | |
24 | - TOXENV=py38 | |
25 | - python: 3.8 | |
26 | env: | |
27 | - TOXENV=lint | |
28 | - python: 2.7 | |
29 | arch: s390x | |
30 | env: | |
31 | - TOXENV=py27 | |
32 | - python: 2.7 | |
33 | arch: s390x | |
34 | env: | |
35 | - TOXENV=lint | |
36 | - python: 3.5 | |
37 | arch: s390x | |
38 | env: | |
39 | - TOXENV=py35 | |
40 | - python: 3.6 | |
41 | arch: s390x | |
42 | env: | |
43 | - TOXENV=py36 | |
44 | - python: 3.7 | |
45 | arch: s390x | |
46 | env: | |
47 | - TOXENV=py37 | |
48 | - python: 3.8 | |
49 | arch: s390x | |
50 | env: | |
51 | - TOXENV=py38 | |
52 | - python: 3.8 | |
53 | arch: s390x | |
54 | env: | |
55 | - TOXENV=lint | |
56 | ||
57 | env: | |
58 | - TOXENV=py27 | |
59 | - TOXENV=py27-pyhash | |
60 | - TOXENV=lint | |
61 | ||
62 | install: | |
63 | - if [[ $(uname -m) == 's390x' ]]; then sudo rm -rf $HOME/.cache/pip; fi | |
64 | - if echo "$TOXENV" | grep -q 'pyhash' ; then sudo apt-get -q install -y libboost-python-dev; fi | |
65 | - if echo "$TOXENV" | grep -q '^py2' ; then pip install --upgrade pip virtualenv; fi | |
66 | - pip install tox | |
67 | ||
68 | script: | |
69 | - tox -e $TOXENV | |
70 | ||
71 | after_success: | |
72 | - pip install codecov | |
73 | - codecov |
0 | Metadata-Version: 1.1 | |
1 | Name: carbon | |
2 | Version: 1.2.0 | |
3 | Summary: Backend data caching and persistence daemon for Graphite | |
4 | Home-page: http://graphiteapp.org/ | |
5 | Author: Chris Davis | |
6 | Author-email: chrismd@gmail.com | |
7 | License: Apache Software License 2.0 | |
8 | Description: # Carbon | |
9 | ||
10 | [![Codacy Badge](https://api.codacy.com/project/badge/Grade/85221cd3bb6e49d7bbd6fed376a88264)](https://www.codacy.com/app/graphite-project/carbon?utm_source=github.com&utm_medium=referral&utm_content=graphite-project/carbon&utm_campaign=badger) | |
11 | [![Build Status](https://secure.travis-ci.org/graphite-project/carbon.png?branch=master)](http://travis-ci.org/graphite-project/carbon) | |
12 | [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fgraphite-project%2Fcarbon.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fgraphite-project%2Fcarbon?ref=badge_shield) | |
13 | [![codecov](https://codecov.io/gh/graphite-project/carbon/branch/master/graph/badge.svg)](https://codecov.io/gh/graphite-project/carbon) | |
14 | ||
15 | ## Overview | |
16 | ||
17 | Carbon is one of three components within the Graphite project: | |
18 | ||
19 | 1. [Graphite-Web](https://github.com/graphite-project/graphite-web), a Django-based web application that renders graphs and dashboards | |
20 | 2. The Carbon metric processing daemons | |
21 | 3. The [Whisper](https://github.com/graphite-project/whisper) time-series database library | |
22 | ||
23 | ![Graphite Components](https://github.com/graphite-project/graphite-web/raw/master/webapp/content/img/overview.png "Graphite Components") | |
24 | ||
25 | Carbon is responsible for receiving metrics over the network, caching them in memory for "hot queries" from the Graphite-Web application, and persisting them to disk using the Whisper time-series library. | |
26 | ||
27 | ## Installation, Configuration and Usage | |
28 | ||
29 | Please refer to the instructions at [readthedocs](http://graphite.readthedocs.org/). | |
30 | ||
31 | ## License | |
32 | ||
33 | Carbon is licensed under version 2.0 of the Apache License. See the [LICENSE](https://github.com/graphite-project/carbon/blob/master/LICENSE) file for details. | |
34 | ||
35 | Platform: UNKNOWN | |
36 | Classifier: Intended Audience :: Developers | |
37 | Classifier: Natural Language :: English | |
38 | Classifier: License :: OSI Approved :: Apache Software License | |
39 | Classifier: Programming Language :: Python | |
40 | Classifier: Programming Language :: Python :: 2 | |
41 | Classifier: Programming Language :: Python :: 2.7 | |
42 | Classifier: Programming Language :: Python :: 3 | |
43 | Classifier: Programming Language :: Python :: 3.5 | |
44 | Classifier: Programming Language :: Python :: 3.6 | |
45 | Classifier: Programming Language :: Python :: 3.7 | |
46 | Classifier: Programming Language :: Python :: 3.8 | |
47 | Classifier: Programming Language :: Python :: Implementation :: CPython | |
48 | Classifier: Programming Language :: Python :: Implementation :: PyPy |
0 | #!/usr/bin/env python | |
1 | """Copyright 2009 Chris Davis | |
2 | ||
3 | Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | you may not use this file except in compliance with the License. | |
5 | You may obtain a copy of the License at | |
6 | ||
7 | http://www.apache.org/licenses/LICENSE-2.0 | |
8 | ||
9 | Unless required by applicable law or agreed to in writing, software | |
10 | distributed under the License is distributed on an "AS IS" BASIS, | |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | See the License for the specific language governing permissions and | |
13 | limitations under the License.""" | |
14 | ||
15 | import sys | |
16 | from os.path import dirname, join, abspath, exists | |
17 | from optparse import OptionParser | |
18 | ||
19 | # Figure out where we're installed | |
20 | BIN_DIR = dirname(abspath(__file__)) | |
21 | ROOT_DIR = dirname(BIN_DIR) | |
22 | CONF_DIR = join(ROOT_DIR, 'conf') | |
23 | default_relayrules = join(CONF_DIR, 'relay-rules.conf') | |
24 | ||
25 | # Make sure that carbon's 'lib' dir is in the $PYTHONPATH if we're running from | |
26 | # source. | |
27 | LIB_DIR = join(ROOT_DIR, 'lib') | |
28 | sys.path.insert(0, LIB_DIR) | |
29 | ||
30 | try: | |
31 | from twisted.internet import epollreactor | |
32 | epollreactor.install() | |
33 | except ImportError: | |
34 | pass | |
35 | ||
36 | from twisted.internet import stdio, reactor, defer # noqa | |
37 | from twisted.protocols.basic import LineReceiver # noqa | |
38 | from carbon.routers import ConsistentHashingRouter, RelayRulesRouter # noqa | |
39 | from carbon.client import CarbonClientManager # noqa | |
40 | from carbon import log, events # noqa | |
41 | ||
42 | ||
43 | option_parser = OptionParser(usage="%prog [options] <host:port:instance> <host:port:instance> ...") | |
44 | option_parser.add_option('--debug', action='store_true', help="Log debug info to stdout") | |
45 | option_parser.add_option('--keyfunc', help="Use a custom key function (path/to/module.py:myFunc)") | |
46 | option_parser.add_option('--replication', type='int', default=1, help='Replication factor') | |
47 | option_parser.add_option( | |
48 | '--routing', default='consistent-hashing', | |
49 | help='Routing method: "consistent-hashing" (default) or "relay"') | |
50 | option_parser.add_option( | |
51 | '--diverse-replicas', action='store_true', help="Spread replicas across diff. servers") | |
52 | option_parser.add_option( | |
53 | '--relayrules', default=default_relayrules, help='relay-rules.conf file to use for relay routing') | |
54 | ||
55 | options, args = option_parser.parse_args() | |
56 | ||
57 | if not args: | |
58 | print('At least one host:port destination required\n') | |
59 | option_parser.print_usage() | |
60 | raise SystemExit(1) | |
61 | ||
62 | if options.routing not in ('consistent-hashing', 'relay'): | |
63 | print("Invalid --routing value, must be one of:") | |
64 | print(" consistent-hashing") | |
65 | print(" relay") | |
66 | raise SystemExit(1) | |
67 | ||
68 | destinations = [] | |
69 | for arg in args: | |
70 | parts = arg.split(':', 2) | |
71 | host = parts[0] | |
72 | port = int(parts[1]) | |
73 | if len(parts) > 2: | |
74 | instance = parts[2] | |
75 | else: | |
76 | instance = None | |
77 | destinations.append((host, port, instance)) | |
78 | ||
79 | if options.debug: | |
80 | log.logToStdout() | |
81 | log.setDebugEnabled(True) | |
82 | defer.setDebugging(True) | |
83 | ||
84 | if options.routing == 'consistent-hashing': | |
85 | router = ConsistentHashingRouter(options.replication, diverse_replicas=options.diverse_replicas) | |
86 | elif options.routing == 'relay': | |
87 | if exists(options.relayrules): | |
88 | router = RelayRulesRouter(options.relayrules) | |
89 | else: | |
90 | print("relay rules file %s does not exist" % options.relayrules) | |
91 | raise SystemExit(1) | |
92 | ||
93 | client_manager = CarbonClientManager(router) | |
94 | reactor.callWhenRunning(client_manager.startService) | |
95 | ||
96 | if options.keyfunc: | |
97 | router.setKeyFunctionFromModule(options.keyfunc) | |
98 | ||
99 | firstConnectAttempts = [client_manager.startClient(dest) for dest in destinations] | |
100 | firstConnectsAttempted = defer.DeferredList(firstConnectAttempts) | |
101 | ||
102 | ||
103 | class StdinMetricsReader(LineReceiver): | |
104 | delimiter = '\n' | |
105 | ||
106 | def lineReceived(self, line): | |
107 | # log.msg("[DEBUG] lineReceived(): %s" % line) | |
108 | try: | |
109 | (metric, value, timestamp) = line.split() | |
110 | datapoint = (float(timestamp), float(value)) | |
111 | assert datapoint[1] == datapoint[1] # filter out NaNs | |
112 | client_manager.sendDatapoint(metric, datapoint) | |
113 | except ValueError: | |
114 | log.err(None, 'Dropping invalid line: %s' % line) | |
115 | ||
116 | def connectionLost(self, reason): | |
117 | log.msg('stdin disconnected') | |
118 | ||
119 | def startShutdown(results): | |
120 | log.msg("startShutdown(%s)" % str(results)) | |
121 | allStopped = client_manager.stopAllClients() | |
122 | allStopped.addCallback(shutdown) | |
123 | ||
124 | firstConnectsAttempted.addCallback(startShutdown) | |
125 | ||
126 | ||
127 | stdio.StandardIO(StdinMetricsReader()) | |
128 | ||
129 | exitCode = 0 | |
130 | ||
131 | ||
132 | def shutdown(results): | |
133 | global exitCode | |
134 | for success, result in results: | |
135 | if not success: | |
136 | exitCode = 1 | |
137 | break | |
138 | if reactor.running: | |
139 | reactor.stop() | |
140 | ||
141 | ||
142 | reactor.run() | |
143 | raise SystemExit(exitCode) |
40 | 40 | # 'prod.applications.apache.all.requests'. |
41 | 41 | # |
42 | 42 | # Note that any time this file is modified, it will be re-read automatically. |
43 | # | |
44 | # The following aggregation methods are available as defined in lib/carbon/aggregator/rules.py | |
45 | # | |
46 | # 'sum': sum, | |
47 | # 'avg': avg, | |
48 | # 'min': min, | |
49 | # 'max': max, | |
50 | # 'p50': percentile(0.50), | |
51 | # 'p75': percentile(0.75), | |
52 | # 'p80': percentile(0.80), | |
53 | # 'p90': percentile(0.90), | |
54 | # 'p95': percentile(0.95), | |
55 | # 'p99': percentile(0.99), | |
56 | # 'p999': percentile(0.999), | |
57 | # 'count': count, | |
58 | # |
60 | 60 | # AMQP_METRIC_NAME_IN_BODY = False |
61 | 61 | |
62 | 62 | # NOTE: you cannot run both a cache and a relay on the same server |
63 | # with the default configuration, you have to specify a distinict | |
63 | # with the default configuration, you have to specify distinct | |
64 | 64 | # interfaces and ports for the listeners. |
65 | 65 | |
66 | 66 | [relay] |
164 | 164 | # disk in that order. |
165 | 165 | # |
166 | 166 | # timesorted - All metrics in the list will be looked at and sorted according |
167 | # to the timestamp of there datapoints. The metric that were the least recently | |
167 | # to the timestamp of their datapoints. The metric that were the least recently | |
168 | 168 | # written will be written first. This is an hybrid strategy between max and |
169 | 169 | # sorted which is particularly adapted to sets of metrics with non-uniform |
170 | 170 | # resolutions. |
175 | 175 | # updated metrics may only ever be persisted to disk at daemon shutdown if |
176 | 176 | # there are a large number of metrics which receive very frequent updates OR if |
177 | 177 | # disk i/o is very slow. |
178 | # | |
179 | # bucketmax (experimental) - As 'max' but uses a different algorithm to | |
180 | # determine the metric with the most datapoints. | |
181 | # Should perform better than 'max' on most loads, could perform worse if the | |
182 | # number of metrics is very small while the number of datapoints per metric is | |
183 | # very high. | |
178 | 184 | # |
179 | 185 | # naive - Metrics will be flushed from the cache to disk in an unordered |
180 | 186 | # fashion. This strategy may be desirable in situations where the storage for |
271 | 277 | # CARBON_METRIC_PREFIX = carbon |
272 | 278 | # CARBON_METRIC_INTERVAL = 60 |
273 | 279 | |
274 | # Enable AMQP if you want to receve metrics using an amqp broker | |
280 | # Enable AMQP if you want to receive metrics using an amqp broker | |
275 | 281 | # ENABLE_AMQP = False |
276 | 282 | |
277 | 283 | # Verbose means a line will be logged for every metric received |
320 | 326 | |
321 | 327 | # Tag support, when enabled carbon will make HTTP calls to graphite-web to update the tag index |
322 | 328 | # ENABLE_TAGS = True |
329 | ||
330 | # Skip tagging when metric do not contain tags | |
331 | # SKIP_TAGS_FOR_NONTAGGED = True | |
323 | 332 | |
324 | 333 | # Tag update interval, this specifies how frequently updates to existing series will trigger |
325 | 334 | # an update to the tag index, the default setting is once every 100 updates |
351 | 360 | # In order to turn off logging of successful connections for the line |
352 | 361 | # receiver, set this to False |
353 | 362 | # LOG_LISTENER_CONN_SUCCESS = True |
363 | # | |
364 | # To turn off logging of lost connections for the line receiver set this to | |
365 | # False | |
366 | # LOG_LISTENER_CONN_LOST = False | |
354 | 367 | |
355 | 368 | [relay] |
356 | 369 | LINE_RECEIVER_INTERFACE = 0.0.0.0 |
437 | 450 | |
438 | 451 | # This allows to have multiple connections per destinations, this will |
439 | 452 | # pool all the replicas of a single host in the same queue and distribute |
440 | # points accross these replicas instead of replicating them. | |
453 | # points across these replicas instead of replicating them. | |
441 | 454 | # The following example will balance the load between :0 and :1. |
442 | 455 | ## DESTINATIONS = foo:2001:0, foo:2001:1 |
443 | 456 | ## RELAY_METHOD = rules |
516 | 529 | # In order to turn off logging of successful connections for the line |
517 | 530 | # receiver, set this to False |
518 | 531 | # LOG_LISTENER_CONN_SUCCESS = True |
532 | # | |
533 | # To turn off logging of lost connections for the line receiver set this to | |
534 | # False | |
535 | # LOG_LISTENER_CONN_LOST = False | |
519 | 536 | |
520 | 537 | # If you're connecting from the relay to a destination that's over the |
521 | 538 | # internet or similarly iffy connection, a backlog can develop because |
529 | 546 | MIN_RESET_STAT_FLOW=1000 |
530 | 547 | |
531 | 548 | # When the ratio of stats being sent in a reporting interval is far |
532 | # enough from 1.0, we will disconnect the socket and reconnecto to | |
549 | # enough from 1.0, we will disconnect the socket and reconnect to | |
533 | 550 | # clear out queued stats. The default ratio of 0.9 indicates that 10% |
534 | 551 | # of stats aren't being delivered within one CARBON_METRIC_INTERVAL |
535 | 552 | # (default of 60 seconds), which can lead to a queue backup. Under |
652 | 669 | # In order to turn off logging of successful connections for the line |
653 | 670 | # receiver, set this to False |
654 | 671 | # LOG_LISTENER_CONN_SUCCESS = True |
672 | # | |
673 | # To turn off logging of lost connections for the line receiver set this to | |
674 | # False | |
675 | # LOG_LISTENER_CONN_LOST = False | |
655 | 676 | |
656 | 677 | # In order to turn off logging of metrics with no corresponding |
657 | 678 | # aggregation rules receiver, set this to False |
23 | 23 | [sum] |
24 | 24 | pattern = \.count$ |
25 | 25 | xFilesFactor = 0 |
26 | # for monotonically increasing counters | |
26 | 27 | aggregationMethod = max |
28 | # for counters that reset every interval (statsd-style) | |
29 | #aggregationMethod = sum | |
27 | 30 | |
28 | 31 | [default_average] |
29 | 32 | pattern = .* |
0 | #!/bin/bash | |
1 | # chkconfig: - 25 75 | |
2 | # description: carbon-aggregator | |
3 | # processname: carbon-aggregator | |
4 | ||
5 | export PYTHONPATH="$GRAPHITE_DIR/lib:$PYTHONPATH" | |
6 | ||
7 | # Source function library. | |
8 | if [ -e /lib/lsb/init-functions ]; then | |
9 | . /lib/lsb/init-functions | |
10 | fi; | |
11 | ||
12 | CARBON_DAEMON="aggregator" | |
13 | GRAPHITE_DIR="/opt/graphite" | |
14 | INSTANCES=`grep "^\[${CARBON_DAEMON}" ${GRAPHITE_DIR}/conf/carbon.conf | cut -d \[ -f 2 | cut -d \] -f 1 | cut -d : -f 2` | |
15 | ||
16 | function die { | |
17 | echo $1 | |
18 | exit 1 | |
19 | } | |
20 | ||
21 | start(){ | |
22 | cd $GRAPHITE_DIR; | |
23 | ||
24 | for INSTANCE in ${INSTANCES}; do | |
25 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
26 | INSTANCE="a"; | |
27 | fi; | |
28 | log_action_begin_msg "Starting carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
29 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} start; | |
30 | ||
31 | if [ $? -eq 0 ]; then | |
32 | log_success_msg | |
33 | else | |
34 | log_failure_msg | |
35 | fi; | |
36 | echo "" | |
37 | done; | |
38 | } | |
39 | ||
40 | stop(){ | |
41 | cd $GRAPHITE_DIR | |
42 | ||
43 | for INSTANCE in ${INSTANCES}; do | |
44 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
45 | INSTANCE="a"; | |
46 | fi; | |
47 | log_action_begin_msg "Stopping carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
48 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} stop | |
49 | ||
50 | if [ `sleep 3; /usr/bin/pgrep -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}" | /usr/bin/wc -l` -gt 0 ]; then | |
51 | echo "Carbon did not stop yet. Sleeping longer, then force killing it..."; | |
52 | sleep 20; | |
53 | /usr/bin/pkill -9 -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}"; | |
54 | fi; | |
55 | ||
56 | if [ $? -eq 0 ]; then | |
57 | log_success_msg | |
58 | else | |
59 | log_failure_msg | |
60 | fi; | |
61 | echo "" | |
62 | done; | |
63 | } | |
64 | ||
65 | status(){ | |
66 | cd $GRAPHITE_DIR; | |
67 | ||
68 | for INSTANCE in ${INSTANCES}; do | |
69 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
70 | INSTANCE="a"; | |
71 | fi; | |
72 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} status; | |
73 | ||
74 | if [ $? -eq 0 ]; then | |
75 | log_success_msg | |
76 | else | |
77 | log_failure_msg | |
78 | fi; | |
79 | echo "" | |
80 | done; | |
81 | } | |
82 | ||
83 | case "$1" in | |
84 | start) | |
85 | start | |
86 | ;; | |
87 | stop) | |
88 | stop | |
89 | ;; | |
90 | status) | |
91 | status | |
92 | ;; | |
93 | restart|reload) | |
94 | stop | |
95 | start | |
96 | ;; | |
97 | *) | |
98 | echo $"Usage: $0 {start|stop|restart|status}" | |
99 | exit 1 | |
100 | esac⏎ |
0 | #!/bin/bash | |
1 | # chkconfig: - 25 75 | |
2 | # description: carbon-cache | |
3 | # processname: carbon-cache | |
4 | ||
5 | export PYTHONPATH="$GRAPHITE_DIR/lib:$PYTHONPATH" | |
6 | ||
7 | # Source function library. | |
8 | if [ -e /lib/lsb/init-functions ]; then | |
9 | . /lib/lsb/init-functions | |
10 | fi; | |
11 | ||
12 | CARBON_DAEMON="cache" | |
13 | GRAPHITE_DIR="/opt/graphite" | |
14 | INSTANCES=`grep "^\[${CARBON_DAEMON}" ${GRAPHITE_DIR}/conf/carbon.conf | cut -d \[ -f 2 | cut -d \] -f 1 | cut -d : -f 2` | |
15 | ||
16 | function die { | |
17 | echo $1 | |
18 | exit 1 | |
19 | } | |
20 | ||
21 | start(){ | |
22 | cd $GRAPHITE_DIR; | |
23 | ||
24 | for INSTANCE in ${INSTANCES}; do | |
25 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
26 | INSTANCE="a"; | |
27 | fi; | |
28 | log_action_begin_msg "Starting carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
29 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} start; | |
30 | ||
31 | if [ $? -eq 0 ]; then | |
32 | log_success_msg | |
33 | else | |
34 | log_failure_msg | |
35 | fi; | |
36 | echo "" | |
37 | done; | |
38 | } | |
39 | ||
40 | stop(){ | |
41 | cd $GRAPHITE_DIR | |
42 | ||
43 | for INSTANCE in ${INSTANCES}; do | |
44 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
45 | INSTANCE="a"; | |
46 | fi; | |
47 | log_action_begin_msg "Stopping carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
48 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} stop | |
49 | ||
50 | if [ `sleep 3; /usr/bin/pgrep -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}" | /usr/bin/wc -l` -gt 0 ]; then | |
51 | echo "Carbon did not stop yet. Sleeping longer, then force killing it..."; | |
52 | sleep 20; | |
53 | /usr/bin/pkill -9 -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}"; | |
54 | fi; | |
55 | ||
56 | if [ $? -eq 0 ]; then | |
57 | log_success_msg | |
58 | else | |
59 | log_failure_msg | |
60 | fi; | |
61 | echo "" | |
62 | done; | |
63 | } | |
64 | ||
65 | status(){ | |
66 | cd $GRAPHITE_DIR; | |
67 | ||
68 | for INSTANCE in ${INSTANCES}; do | |
69 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
70 | INSTANCE="a"; | |
71 | fi; | |
72 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} status; | |
73 | ||
74 | if [ $? -eq 0 ]; then | |
75 | log_success_msg | |
76 | else | |
77 | log_failure_msg | |
78 | fi; | |
79 | echo "" | |
80 | done; | |
81 | } | |
82 | ||
83 | case "$1" in | |
84 | start) | |
85 | start | |
86 | ;; | |
87 | stop) | |
88 | stop | |
89 | ;; | |
90 | status) | |
91 | status | |
92 | ;; | |
93 | restart|reload) | |
94 | stop | |
95 | start | |
96 | ;; | |
97 | *) | |
98 | echo $"Usage: $0 {start|stop|restart|status}" | |
99 | exit 1 | |
100 | esac | |
101 |
0 | #!/bin/bash | |
1 | # chkconfig: - 25 75 | |
2 | # description: carbon-relay | |
3 | # processname: carbon-relay | |
4 | ||
5 | export PYTHONPATH="$GRAPHITE_DIR/lib:$PYTHONPATH" | |
6 | ||
7 | # Source function library. | |
8 | if [ -e /lib/lsb/init-functions ]; then | |
9 | . /lib/lsb/init-functions | |
10 | fi; | |
11 | ||
12 | CARBON_DAEMON="relay" | |
13 | GRAPHITE_DIR="/opt/graphite" | |
14 | INSTANCES=`grep "^\[${CARBON_DAEMON}" ${GRAPHITE_DIR}/conf/carbon.conf | cut -d \[ -f 2 | cut -d \] -f 1 | cut -d : -f 2` | |
15 | ||
16 | function die { | |
17 | echo $1 | |
18 | exit 1 | |
19 | } | |
20 | ||
21 | start(){ | |
22 | cd $GRAPHITE_DIR; | |
23 | ||
24 | for INSTANCE in ${INSTANCES}; do | |
25 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
26 | INSTANCE="a"; | |
27 | fi; | |
28 | log_action_begin_msg "Starting carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
29 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} start; | |
30 | ||
31 | if [ $? -eq 0 ]; then | |
32 | log_success_msg | |
33 | else | |
34 | log_failure_msg | |
35 | fi; | |
36 | echo "" | |
37 | done; | |
38 | } | |
39 | ||
40 | stop(){ | |
41 | cd $GRAPHITE_DIR | |
42 | ||
43 | for INSTANCE in ${INSTANCES}; do | |
44 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
45 | INSTANCE="a"; | |
46 | fi; | |
47 | echo "Stopping carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
48 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} stop | |
49 | ||
50 | if [ `sleep 3; /usr/bin/pgrep -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}" | /usr/bin/wc -l` -gt 0 ]; then | |
51 | echo "Carbon did not stop yet. Sleeping longer, then force killing it..."; | |
52 | sleep 20; | |
53 | /usr/bin/pkill -9 -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}"; | |
54 | fi; | |
55 | ||
56 | if [ $? -eq 0 ]; then | |
57 | log_success_msg | |
58 | else | |
59 | log_failure_msg | |
60 | fi; | |
61 | echo "" | |
62 | done; | |
63 | } | |
64 | ||
65 | status(){ | |
66 | cd $GRAPHITE_DIR; | |
67 | ||
68 | for INSTANCE in ${INSTANCES}; do | |
69 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
70 | INSTANCE="a"; | |
71 | fi; | |
72 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} status; | |
73 | ||
74 | if [ $? -eq 0 ]; then | |
75 | log_success_msg | |
76 | else | |
77 | log_failure_msg | |
78 | fi; | |
79 | echo "" | |
80 | done; | |
81 | } | |
82 | ||
83 | case "$1" in | |
84 | start) | |
85 | start | |
86 | ;; | |
87 | stop) | |
88 | stop | |
89 | ;; | |
90 | status) | |
91 | status | |
92 | ;; | |
93 | restart|reload) | |
94 | stop | |
95 | start | |
96 | ;; | |
97 | *) | |
98 | echo $"Usage: $0 {start|stop|restart|status}" | |
99 | exit 1 | |
100 | esac | |
101 |
0 | #!/bin/bash | |
1 | ||
2 | # Configure init scripts | |
3 | INIT_SCRIPTS="carbon-cache carbon-relay carbon-aggregator"; | |
4 | for s in $INIT_SCRIPTS; do | |
5 | /bin/chmod +x /etc/init.d/${s}; | |
6 | /usr/sbin/update-rc.d ${s} defaults | |
7 | done; | |
8 | ||
9 | GRAPHITE_PATH=/opt/graphite | |
10 | CONFFILES="carbon.conf relay-rules.conf storage-schemas.conf storage-aggregation.conf" | |
11 | for i in $CONFFILES; do | |
12 | if [ ! -e ${GRAPHITE_PATH}/conf/$i ]; then | |
13 | /bin/echo "No pre-existing $i - creating from example." | |
14 | /bin/cp ${GRAPHITE_PATH}/conf/$i.example ${GRAPHITE_PATH}/conf/$i; | |
15 | fi; | |
16 | done; |
0 | # Configure init scripts | |
1 | INIT_SCRIPTS="carbon-cache carbon-relay carbon-aggregator"; | |
2 | for s in $INIT_SCRIPTS; do | |
3 | /bin/chmod +x /etc/init.d/${s}; | |
4 | ||
5 | if [ -x /sbin/chkconfig ]; then | |
6 | /sbin/chkconfig --add ${s}; | |
7 | fi; | |
8 | done; | |
9 | ||
10 | GRAPHITE_PATH=/opt/graphite | |
11 | CONFFILES="carbon.conf relay-rules.conf storage-schemas.conf storage-aggregation.conf" | |
12 | for i in $CONFFILES; do | |
13 | if [ ! -e ${GRAPHITE_PATH}/conf/$i ]; then | |
14 | /bin/echo "No pre-existing $i - creating from example." | |
15 | /bin/cp ${GRAPHITE_PATH}/conf/$i.example ${GRAPHITE_PATH}/conf/$i; | |
16 | fi; | |
17 | done; |
0 | #!/usr/bin/python | |
1 | """Copyright 2008 Orbitz WorldWide | |
2 | ||
3 | Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | you may not use this file except in compliance with the License. | |
5 | You may obtain a copy of the License at | |
6 | ||
7 | http://www.apache.org/licenses/LICENSE-2.0 | |
8 | ||
9 | Unless required by applicable law or agreed to in writing, software | |
10 | distributed under the License is distributed on an "AS IS" BASIS, | |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | See the License for the specific language governing permissions and | |
13 | limitations under the License.""" | |
14 | ||
15 | import re | |
16 | import sys | |
17 | import time | |
18 | import socket | |
19 | import platform | |
20 | import subprocess | |
21 | ||
22 | CARBON_SERVER = '127.0.0.1' | |
23 | CARBON_PORT = 2003 | |
24 | DELAY = 60 | |
25 | ||
26 | def get_loadavg(): | |
27 | """ | |
28 | Get the load average for a unix-like system. | |
29 | For more details, "man proc" and "man uptime" | |
30 | """ | |
31 | if platform.system() == "Linux": | |
32 | return open('/proc/loadavg').read().split()[:3] | |
33 | else: | |
34 | command = "uptime" | |
35 | process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) | |
36 | stdout = process.communicate()[0].strip() | |
37 | # Split on whitespace and commas | |
38 | output = re.split("[\s,]+", stdout) | |
39 | return output[-3:] | |
40 | ||
41 | def run(sock, delay): | |
42 | """Make the client go go go""" | |
43 | while True: | |
44 | now = int(time.time()) | |
45 | lines = [] | |
46 | #We're gonna report all three loadavg values | |
47 | loadavg = get_loadavg() | |
48 | lines.append("system.loadavg_1min %s %d" % (loadavg[0], now)) | |
49 | lines.append("system.loadavg_5min %s %d" % (loadavg[1], now)) | |
50 | lines.append("system.loadavg_15min %s %d" % (loadavg[2], now)) | |
51 | message = '\n'.join(lines) + '\n' #all lines must end in a newline | |
52 | print("sending message") | |
53 | print('-' * 80) | |
54 | print(message) | |
55 | sock.sendall(message) | |
56 | time.sleep(delay) | |
57 | ||
58 | def main(): | |
59 | """Wrap it all up together""" | |
60 | delay = DELAY | |
61 | if len(sys.argv) > 1: | |
62 | arg = sys.argv[1] | |
63 | if arg.isdigit(): | |
64 | delay = int(arg) | |
65 | else: | |
66 | sys.stderr.write("Ignoring non-integer argument. Using default: %ss\n" % delay) | |
67 | ||
68 | sock = socket.socket() | |
69 | try: | |
70 | sock.connect( (CARBON_SERVER, CARBON_PORT) ) | |
71 | except socket.error: | |
72 | raise SystemExit("Couldn't connect to %(server)s on port %(port)d, is carbon-cache.py running?" % { 'server':CARBON_SERVER, 'port':CARBON_PORT }) | |
73 | ||
74 | try: | |
75 | run(sock, delay) | |
76 | except KeyboardInterrupt: | |
77 | sys.stderr.write("\nExiting on CTRL-c\n") | |
78 | sys.exit(0) | |
79 | ||
80 | if __name__ == "__main__": | |
81 | main() |
0 | #!/usr/bin/python | |
1 | """Copyright 2013 Bryan Irvine | |
2 | ||
3 | Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | you may not use this file except in compliance with the License. | |
5 | You may obtain a copy of the License at | |
6 | ||
7 | http://www.apache.org/licenses/LICENSE-2.0 | |
8 | ||
9 | Unless required by applicable law or agreed to in writing, software | |
10 | distributed under the License is distributed on an "AS IS" BASIS, | |
11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | See the License for the specific language governing permissions and | |
13 | limitations under the License.""" | |
14 | ||
15 | import re | |
16 | import sys | |
17 | import time | |
18 | import socket | |
19 | import platform | |
20 | import subprocess | |
21 | import pickle | |
22 | import struct | |
23 | ||
24 | CARBON_SERVER = '127.0.0.1' | |
25 | CARBON_PICKLE_PORT = 2004 | |
26 | DELAY = 60 | |
27 | ||
28 | def get_loadavg(): | |
29 | """ | |
30 | Get the load average for a unix-like system. | |
31 | For more details, "man proc" and "man uptime" | |
32 | """ | |
33 | if platform.system() == "Linux": | |
34 | return open('/proc/loadavg').read().split()[:3] | |
35 | else: | |
36 | command = "uptime" | |
37 | process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True) | |
38 | stdout = process.communicate()[0].strip() | |
39 | # Split on whitespace and commas | |
40 | output = re.split("[\s,]+", stdout) | |
41 | return output[-3:] | |
42 | ||
43 | def run(sock, delay): | |
44 | """Make the client go go go""" | |
45 | while True: | |
46 | now = int(time.time()) | |
47 | tuples = ([]) | |
48 | lines = [] | |
49 | #We're gonna report all three loadavg values | |
50 | loadavg = get_loadavg() | |
51 | tuples.append(('system.loadavg_1min', (now,loadavg[0]))) | |
52 | tuples.append(('system.loadavg_5min', (now,loadavg[1]))) | |
53 | tuples.append(('system.loadavg_15min', (now,loadavg[2]))) | |
54 | lines.append("system.loadavg_1min %s %d" % (loadavg[0], now)) | |
55 | lines.append("system.loadavg_5min %s %d" % (loadavg[1], now)) | |
56 | lines.append("system.loadavg_15min %s %d" % (loadavg[2], now)) | |
57 | message = '\n'.join(lines) + '\n' #all lines must end in a newline | |
58 | print("sending message") | |
59 | print('-' * 80) | |
60 | print(message) | |
61 | package = pickle.dumps(tuples, 1) | |
62 | size = struct.pack('!L', len(package)) | |
63 | sock.sendall(size) | |
64 | sock.sendall(package) | |
65 | time.sleep(delay) | |
66 | ||
67 | def main(): | |
68 | """Wrap it all up together""" | |
69 | delay = DELAY | |
70 | if len(sys.argv) > 1: | |
71 | arg = sys.argv[1] | |
72 | if arg.isdigit(): | |
73 | delay = int(arg) | |
74 | else: | |
75 | sys.stderr.write("Ignoring non-integer argument. Using default: %ss\n" % delay) | |
76 | ||
77 | sock = socket.socket() | |
78 | try: | |
79 | sock.connect( (CARBON_SERVER, CARBON_PICKLE_PORT) ) | |
80 | except socket.error: | |
81 | raise SystemExit("Couldn't connect to %(server)s on port %(port)d, is carbon-cache.py running?" % { 'server':CARBON_SERVER, 'port':CARBON_PICKLE_PORT }) | |
82 | ||
83 | try: | |
84 | run(sock, delay) | |
85 | except KeyboardInterrupt: | |
86 | sys.stderr.write("\nExiting on CTRL-c\n") | |
87 | sys.exit(0) | |
88 | ||
89 | if __name__ == "__main__": | |
90 | main() |
0 | #!/usr/bin/python | |
1 | # -*- coding: utf-8 -*- | |
2 | """ Copyright 2013 Bryan Irvine | |
3 | Copyright 2017 The Graphite Project | |
4 | ||
5 | Licensed under the Apache License, Version 2.0 (the "License"); | |
6 | you may not use this file except in compliance with the License. | |
7 | You may obtain a copy of the License at | |
8 | ||
9 | http://www.apache.org/licenses/LICENSE-2.0 | |
10 | ||
11 | Unless required by applicable law or agreed to in writing, software | |
12 | distributed under the License is distributed on an "AS IS" BASIS, | |
13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
14 | See the License for the specific language governing permissions and | |
15 | limitations under the License.""" | |
16 | ||
17 | # Import the precompiled protobuffer. It can be recompiled with: | |
18 | # $ protoc --python_out=. carbon.proto | |
19 | from carbon.carbon_pb2 import Payload | |
20 | ||
21 | import os | |
22 | import sys | |
23 | import time | |
24 | import socket | |
25 | import struct | |
26 | ||
27 | CARBON_SERVER = '127.0.0.1' | |
28 | CARBON_PROTOBUF_PORT = 2005 | |
29 | DELAY = 60 | |
30 | ||
31 | ||
32 | def run(sock, delay): | |
33 | """Make the client go go go""" | |
34 | while True: | |
35 | # Epoch, timestamp in seconds since 1970 | |
36 | now = int(time.time()) | |
37 | ||
38 | # Initialize the protobuf payload | |
39 | payload_pb = Payload() | |
40 | ||
41 | labels = ['1min', '5min', '15min'] | |
42 | for name, value in zip(labels, os.getloadavg()): | |
43 | m = payload_pb.metrics.add() | |
44 | m.metric = 'system.loadavg_' + name | |
45 | p = m.points.add() | |
46 | p.timestamp = now | |
47 | p.value = value | |
48 | ||
49 | print("sending message") | |
50 | print(('-' * 80)) | |
51 | print(payload_pb) | |
52 | ||
53 | package = payload_pb.SerializeToString() | |
54 | ||
55 | # The message must be prepended with its size | |
56 | size = struct.pack('!L', len(package)) | |
57 | sock.sendall(size) | |
58 | ||
59 | # Then send the actual payload | |
60 | sock.sendall(package) | |
61 | ||
62 | time.sleep(delay) | |
63 | ||
64 | ||
65 | def main(): | |
66 | """Wrap it all up together""" | |
67 | delay = DELAY | |
68 | if len(sys.argv) > 1: | |
69 | arg = sys.argv[1] | |
70 | if arg.isdigit(): | |
71 | delay = int(arg) | |
72 | else: | |
73 | sys.stderr.write( | |
74 | "Ignoring non-integer argument. Using default: %ss\n" | |
75 | % delay) | |
76 | ||
77 | sock = socket.socket() | |
78 | try: | |
79 | sock.connect((CARBON_SERVER, CARBON_PROTOBUF_PORT)) | |
80 | except socket.error: | |
81 | raise SystemExit("Couldn't connect to %(server)s on port %(port)d, " | |
82 | "is carbon-cache.py running?" % | |
83 | {'server': CARBON_SERVER, | |
84 | 'port': CARBON_PROTOBUF_PORT}) | |
85 | ||
86 | try: | |
87 | run(sock, delay) | |
88 | except KeyboardInterrupt: | |
89 | sys.stderr.write("\nExiting on CTRL-c\n") | |
90 | sys.exit(0) | |
91 | ||
92 | if __name__ == "__main__": | |
93 | main() |
84 | 84 | |
85 | 85 | @property |
86 | 86 | def size(self): |
87 | return sum([len(buf.values) for buf in self.interval_buffers.values()]) | |
87 | return sum(len(buf.values) for buf in self.interval_buffers.values()) | |
88 | 88 | |
89 | 89 | |
90 | 90 | class IntervalBuffer: |
55 | 55 | |
56 | 56 | def choose_item(self): |
57 | 57 | raise NotImplementedError() |
58 | ||
59 | def store(self, metric): | |
60 | pass | |
58 | 61 | |
59 | 62 | |
60 | 63 | class NaiveStrategy(DrainStrategy): |
143 | 146 | |
144 | 147 | def choose_item(self): |
145 | 148 | return next(self.queue) |
149 | ||
150 | ||
151 | class BucketMaxStrategy(DrainStrategy): | |
152 | """ | |
153 | Same as 'max' strategy but sorts on insertion into buckets instead of at | |
154 | pop(). | |
155 | """ | |
156 | def __init__(self, cache): | |
157 | self.buckets = list() | |
158 | super(BucketMaxStrategy, self).__init__(cache) | |
159 | ||
160 | def choose_item(self): | |
161 | try: | |
162 | # Largest buckets are empty, remove them. | |
163 | while len(self.buckets[-1]) == 0: | |
164 | self.buckets.pop() | |
165 | # return the metric with the most datapoints. If there is | |
166 | # more than one metrics which has the most datapoints the | |
167 | # first seen is returned. | |
168 | return self.buckets[-1].pop(0) | |
169 | except (KeyError, IndexError): # buckets are empty | |
170 | return None | |
171 | ||
172 | def store(self, metric): | |
173 | nr_points = len(self.cache[metric]) | |
174 | ||
175 | # No bucket of this size exists, create it | |
176 | while nr_points > len(self.buckets): | |
177 | self.buckets.append(list()) | |
178 | ||
179 | # Remove existing metrics from its bucket | |
180 | if nr_points > 1: | |
181 | self.buckets[nr_points - 2].remove(metric) | |
182 | ||
183 | self.buckets[nr_points - 1].append(metric) | |
146 | 184 | |
147 | 185 | |
148 | 186 | class _MetricCache(defaultdict): |
216 | 254 | else: |
217 | 255 | self.size += 1 |
218 | 256 | self[metric][timestamp] = value |
257 | if self.strategy: | |
258 | self.strategy.store(metric) | |
219 | 259 | else: |
220 | 260 | # Updating a duplicate does not increase the cache size |
221 | 261 | self[metric][timestamp] = value |
231 | 271 | |
232 | 272 | # Initialize a singleton cache instance |
233 | 273 | # TODO: use plugins. |
234 | write_strategy = None | |
274 | write_strategy = DrainStrategy | |
235 | 275 | if settings.CACHE_WRITE_STRATEGY == 'naive': |
236 | 276 | write_strategy = NaiveStrategy |
237 | 277 | if settings.CACHE_WRITE_STRATEGY == 'max': |
242 | 282 | write_strategy = TimeSortedStrategy |
243 | 283 | if settings.CACHE_WRITE_STRATEGY == 'random': |
244 | 284 | write_strategy = RandomStrategy |
285 | if settings.CACHE_WRITE_STRATEGY == 'bucketmax': | |
286 | write_strategy = BucketMaxStrategy | |
245 | 287 | |
246 | 288 | _Cache = _MetricCache(write_strategy) |
247 | 289 | return _Cache |
0 | // protoc --python_out=. carbon.proto | |
1 | ||
2 | syntax = "proto3"; | |
3 | package carbon; | |
4 | ||
5 | message Point { | |
6 | uint32 timestamp = 1; | |
7 | double value = 2; | |
8 | } | |
9 | ||
10 | message Metric { | |
11 | string metric = 1; | |
12 | repeated Point points = 2; | |
13 | } | |
14 | ||
15 | message Payload { | |
16 | repeated Metric metrics = 1; | |
17 | } |
502 | 502 | """ |
503 | 503 | |
504 | 504 | def __init__(self): |
505 | # This queue isn't explicitely bounded but will implicitely be. It receives | |
505 | # This queue isn't explicitly bounded but will implicitly be. It receives | |
506 | 506 | # only metrics when no destinations are available, and as soon as we detect |
507 | 507 | # that we don't have any destination we pause the producer: this mean that |
508 | 508 | # it will contain only a few seconds of metrics. |
530 | 530 | def __init__(self, router): |
531 | 531 | if settings.DESTINATION_POOL_REPLICAS: |
532 | 532 | # If we decide to open multiple TCP connection to a replica, we probably |
533 | # want to try to also load-balance accross hosts. In this case we need | |
533 | # want to try to also load-balance across hosts. In this case we need | |
534 | 534 | # to make sure rfc3484 doesn't get in the way. |
535 | 535 | if setUpRandomResolver: |
536 | 536 | setUpRandomResolver(reactor) |
77 | 77 | BIND_PATTERNS=['#'], |
78 | 78 | GRAPHITE_URL='http://127.0.0.1:80', |
79 | 79 | ENABLE_TAGS=True, |
80 | SKIP_TAGS_FOR_NONTAGGED=True, | |
80 | 81 | TAG_UPDATE_INTERVAL=100, |
81 | 82 | TAG_BATCH_SIZE=100, |
82 | 83 | TAG_QUEUE_SIZE=10000, |
114 | 115 | TCP_KEEPINTVL=30, |
115 | 116 | TCP_KEEPCNT=2, |
116 | 117 | USE_RATIO_RESET=False, |
118 | LOG_LISTENER_CONN_LOST=False, | |
117 | 119 | LOG_LISTENER_CONN_SUCCESS=True, |
118 | 120 | LOG_AGGREGATOR_MISSES=True, |
119 | 121 | AGGREGATION_RULES='aggregation-rules.conf', |
276 | 278 | print("Error: missing required config %s" % storage_schemas) |
277 | 279 | sys.exit(1) |
278 | 280 | |
279 | if settings.CACHE_WRITE_STRATEGY not in ('timesorted', 'sorted', 'max', 'naive'): | |
281 | if settings.CACHE_WRITE_STRATEGY not in ('timesorted', 'sorted', 'max', | |
282 | 'bucketmax', 'naive'): | |
280 | 283 | log.err("%s is not a valid value for CACHE_WRITE_STRATEGY, defaulting to %s" % |
281 | 284 | (settings.CACHE_WRITE_STRATEGY, defaults['CACHE_WRITE_STRATEGY'])) |
282 | 285 | else: |
604 | 607 | raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT " |
605 | 608 | "needs to be provided.") |
606 | 609 | |
607 | # Default config directory to root-relative, unless overriden by the | |
610 | # Default config directory to root-relative, unless overridden by the | |
608 | 611 | # 'GRAPHITE_CONF_DIR' environment variable. |
609 | 612 | settings.setdefault("CONF_DIR", |
610 | 613 | os.environ.get("GRAPHITE_CONF_DIR", |
616 | 619 | # file. |
617 | 620 | settings["CONF_DIR"] = dirname(normpath(options["config"])) |
618 | 621 | |
619 | # Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR' | |
622 | # Storage directory can be overridden by the 'GRAPHITE_STORAGE_DIR' | |
620 | 623 | # environment variable. It defaults to a path relative to GRAPHITE_ROOT |
621 | 624 | # for backwards compatibility though. |
622 | 625 | settings.setdefault("STORAGE_DIR", |
88 | 88 | self.ring_len = len(self.ring) |
89 | 89 | |
90 | 90 | def get_node(self, key): |
91 | assert self.ring | |
92 | 91 | position = self.compute_ring_position(key) |
93 | 92 | search_entry = (position, ()) |
94 | 93 | index = bisect.bisect_left(self.ring, search_entry) % self.ring_len |
125 | 125 | record = aggregator_record |
126 | 126 | record('allocatedBuffers', len(BufferManager)) |
127 | 127 | record('bufferedDatapoints', |
128 | sum([b.size for b in BufferManager.buffers.values()])) | |
128 | sum(b.size for b in BufferManager.buffers.values())) | |
129 | 129 | record('aggregateDatapointsSent', myStats.get('aggregateDatapointsSent', 0)) |
130 | 130 | |
131 | 131 | # relay metrics |
151 | 151 | record('whitelistRejects', myStats.get('whitelistRejects', 0)) |
152 | 152 | record('cpuUsage', getCpuUsage()) |
153 | 153 | |
154 | # And here preserve count of messages received in the prior periiod | |
154 | # And here preserve count of messages received in the prior period | |
155 | 155 | myPriorStats['metricsReceived'] = myStats.get('metricsReceived', 0) |
156 | 156 | prior_stats.clear() |
157 | 157 | prior_stats.update(myPriorStats) |
154 | 154 | "%s connection with %s closed cleanly" % (self.__class__.__name__, self.peerName)) |
155 | 155 | |
156 | 156 | else: |
157 | log.listener( | |
158 | "%s connection with %s lost: %s" % (self.__class__.__name__, self.peerName, reason.value)) | |
157 | if settings.LOG_LISTENER_CONN_LOST: | |
158 | log.listener( | |
159 | "%s connection with %s lost: %s" % (self.__class__.__name__, self.peerName, reason.value)) | |
159 | 160 | |
160 | 161 | state.connectedMetricReceiverProtocols.remove(self) |
161 | 162 | checkIfAcceptingConnections() |
135 | 135 | try: |
136 | 136 | if xFilesFactor is not None: |
137 | 137 | xFilesFactor = float(xFilesFactor) |
138 | assert 0 <= xFilesFactor <= 1 | |
138 | if not 0 <= xFilesFactor <= 1: | |
139 | raise AssertionError("xFilesFactor value out of [0,1] bounds") | |
139 | 140 | if aggregationMethod is not None: |
140 | 141 | if state.database is not None: |
141 | assert aggregationMethod in state.database.aggregationMethods | |
142 | if aggregationMethod not in state.database.aggregationMethods: | |
143 | raise AssertionError("aggregationMethod not found in state.database.aggregationMethods") | |
142 | 144 | except ValueError: |
143 | 145 | log.msg("Invalid schemas found in %s." % section) |
144 | 146 | continue |
0 | import timeit | |
1 | import time | |
2 | ||
3 | from carbon.aggregator.processor import AggregationProcessor, RuleManager | |
4 | from carbon.aggregator.buffers import BufferManager | |
5 | from carbon.tests.util import print_stats | |
6 | from carbon.conf import settings | |
7 | from carbon import state | |
8 | ||
9 | METRIC = 'prod.applications.foo.1.requests' | |
10 | METRIC_AGGR = 'prod.applications.foo.all.requests' | |
11 | FREQUENCY = 1000 | |
12 | ||
13 | ||
14 | def bench_aggregator_noop(): | |
15 | RuleManager.clear() | |
16 | _bench_aggregator("noop") | |
17 | ||
18 | ||
19 | def bench_aggregator_sum(): | |
20 | RuleManager.clear() | |
21 | RuleManager.rules = [ | |
22 | RuleManager.parse_definition( | |
23 | ('<env>.applications.<app>.all.requests (%d) =' % FREQUENCY) + | |
24 | 'sum <env>.applications.<app>.*.requests'), | |
25 | ] | |
26 | _bench_aggregator("sum") | |
27 | ||
28 | ||
29 | def bench_aggregator_fake(): | |
30 | RuleManager.clear() | |
31 | RuleManager.rules = [ | |
32 | RuleManager.parse_definition('foo (60) = sum bar'), | |
33 | ] | |
34 | _bench_aggregator("fake") | |
35 | ||
36 | ||
37 | def _bench_aggregator(name): | |
38 | print("== %s ==" % name) | |
39 | max_intervals = settings['MAX_AGGREGATION_INTERVALS'] | |
40 | now = time.time() - (max_intervals * FREQUENCY) | |
41 | ||
42 | buf = None | |
43 | for n in [1, 1000, 10000, 100000, 1000000, 10000000]: | |
44 | processor = AggregationProcessor() | |
45 | processor.process(METRIC, (now, 1)) | |
46 | ||
47 | def _process(): | |
48 | processor.process(METRIC, (now + _process.i, 1)) | |
49 | if (_process.i % FREQUENCY) == 0 and buf is not None: | |
50 | buf.compute_values() | |
51 | _process.i += 1 | |
52 | _process.i = 0 | |
53 | ||
54 | if buf is None: | |
55 | buf = BufferManager.get_buffer(METRIC_AGGR, 1, None) | |
56 | ||
57 | t = timeit.timeit(_process, number=n) | |
58 | buf.close() | |
59 | print_stats(n, t) | |
60 | print("") | |
61 | ||
62 | ||
63 | def main(): | |
64 | settings.LOG_AGGREGATOR_MISSES = False | |
65 | ||
66 | class _Fake(object): | |
67 | def metricGenerated(self, metric, datapoint): | |
68 | pass | |
69 | ||
70 | def increment(self, metric): | |
71 | pass | |
72 | ||
73 | state.events = _Fake() | |
74 | state.instrumentation = _Fake() | |
75 | _bench_aggregator("warmup") | |
76 | bench_aggregator_noop() | |
77 | bench_aggregator_sum() | |
78 | bench_aggregator_fake() | |
79 | ||
80 | ||
81 | if __name__ == '__main__': | |
82 | main() |
0 | import timeit | |
1 | ||
2 | from carbon.cache import _MetricCache, DrainStrategy, \ | |
3 | NaiveStrategy, MaxStrategy, RandomStrategy, SortedStrategy, TimeSortedStrategy | |
4 | ||
5 | ||
6 | metric_cache = _MetricCache(DrainStrategy) | |
7 | count = 0 | |
8 | strategies = { | |
9 | 'naive': NaiveStrategy, | |
10 | 'max': MaxStrategy, | |
11 | 'random': RandomStrategy, | |
12 | 'sorted': SortedStrategy, | |
13 | 'timesorted': TimeSortedStrategy, | |
14 | } | |
15 | ||
16 | ||
17 | def command_store_foo(): | |
18 | global count | |
19 | count = count + 1 | |
20 | return metric_cache.store('foo', (count, 1.0)) | |
21 | ||
22 | ||
23 | def command_store_foo_n(): | |
24 | global count | |
25 | count = count + 1 | |
26 | return metric_cache.store("foo.%d" % count, (count, 1.0)) | |
27 | ||
28 | ||
29 | def command_drain(): | |
30 | while metric_cache: | |
31 | metric_cache.drain_metric() | |
32 | return metric_cache.size | |
33 | ||
34 | ||
35 | def print_stats(n, t): | |
36 | usec = t * 1e6 | |
37 | if usec < 1000: | |
38 | print(" datapoints: %-10d usecs: %d" % (n, int(usec))) | |
39 | else: | |
40 | msec = usec / 1000 | |
41 | if msec < 1000: | |
42 | print(" datapoints: %-10d msecs: %d" % (n, int(msec))) | |
43 | else: | |
44 | sec = msec / 1000 | |
45 | print(" datapoints: %-10d secs: %3g" % (n, sec)) | |
46 | ||
47 | ||
48 | if __name__ == '__main__': | |
49 | print("Benchmarking single metric MetricCache store...") | |
50 | for n in [1000, 10000, 100000, 1000000]: | |
51 | count = 0 | |
52 | metric_cache = _MetricCache(DrainStrategy) | |
53 | t = timeit.timeit(command_store_foo, number=n) | |
54 | print_stats(n, t) | |
55 | ||
56 | print("Benchmarking unique metric MetricCache store...") | |
57 | for n in [1000, 10000, 100000, 1000000]: | |
58 | count = 0 | |
59 | metric_cache = _MetricCache(DrainStrategy) | |
60 | t = timeit.timeit(command_store_foo_n, number=n) | |
61 | print_stats(n, t) | |
62 | ||
63 | print("Benchmarking single metric MetricCache drain...") | |
64 | for name, strategy in sorted(strategies.items()): | |
65 | print("CACHE_WRITE_STRATEGY: %s" % name) | |
66 | for n in [1000, 10000, 100000, 1000000]: | |
67 | count = 0 | |
68 | metric_cache = _MetricCache(strategy) | |
69 | timeit.timeit(command_store_foo, number=n) | |
70 | t = timeit.timeit(command_drain, number=1) | |
71 | print_stats(n, t) | |
72 | ||
73 | print("Benchmarking unique metric MetricCache drain...") | |
74 | for name, strategy in sorted(strategies.items()): | |
75 | print("CACHE_WRITE_STRATEGY: %s" % name) | |
76 | for n in [1000, 10000, 100000, 1000000]: | |
77 | # remove me when strategy is fast | |
78 | if (name == 'max' and n > 10000) or (name == 'random' and n > 10000): | |
79 | print(" datapoints: %-10d [skipped]" % n) | |
80 | continue | |
81 | count = 0 | |
82 | metric_cache = _MetricCache(strategy) | |
83 | timeit.timeit(command_store_foo_n, number=n) | |
84 | t = timeit.timeit(command_drain, number=1) | |
85 | print_stats(n, t) |
0 | import timeit | |
1 | ||
2 | from carbon.routers import DatapointRouter | |
3 | from test_routers import createSettings | |
4 | from six.moves import xrange | |
5 | ||
6 | ||
7 | REPLICATION_FACTORS = [1, 4] | |
8 | DIVERSE_REPLICAS = [True, False] | |
9 | N_DESTINATIONS = [1, 16, 32, 48] | |
10 | HASH_TYPES = [None, 'carbon_ch', 'fnv1a_ch', 'mmh3_ch'] | |
11 | ||
12 | ||
13 | def print_stats(r, t): | |
14 | usec = t * 1e6 | |
15 | msec = usec / 1000 | |
16 | text = " %s %s datapoints: %d" % (r.plugin_name, r.__id, r.__count) | |
17 | if usec < 1000: | |
18 | text += " usecs: %d" % int(usec) | |
19 | elif msec < 1000: | |
20 | text += " msecs: %d" % int(msec) | |
21 | else: | |
22 | sec = msec / 1000 | |
23 | text += " secs: %3g" % sec | |
24 | print(text) | |
25 | ||
26 | ||
27 | def generateDestinations(n): | |
28 | for i in xrange(n): | |
29 | host_id = i % 10 | |
30 | instance_id = i | |
31 | port = 2000 + i | |
32 | yield ('carbon%d' % host_id, port, instance_id) | |
33 | ||
34 | ||
35 | def benchmark(router_class): | |
36 | for hash_type in HASH_TYPES: | |
37 | for replication_factor in REPLICATION_FACTORS: | |
38 | for diverse_replicas in DIVERSE_REPLICAS: | |
39 | for n_destinations in N_DESTINATIONS: | |
40 | _benchmark( | |
41 | router_class, replication_factor, | |
42 | diverse_replicas, n_destinations, hash_type | |
43 | ) | |
44 | ||
45 | ||
46 | def _benchmark(router_class, replication_factor, diverse_replicas, n_destinations, hash_type): | |
47 | destinations = list(generateDestinations(n_destinations)) | |
48 | settings = createSettings() | |
49 | settings['REPLICATION_FACTOR'] = replication_factor | |
50 | settings['DIVERSE_REPLICAS'] = diverse_replicas | |
51 | settings['DESTINATIONS'] = destinations | |
52 | settings['ROUTER_HASH_TYPE'] = hash_type | |
53 | ||
54 | router = router_class(settings) | |
55 | router.__count = 0 # Ugly hack for timeit ! | |
56 | router.__id = ( | |
57 | ' replication_factor: %d' % replication_factor + | |
58 | ' diverse_replicas: %d' % diverse_replicas + | |
59 | ' n_destinations: %-5d' % n_destinations + | |
60 | ' hash_type: %s' % hash_type) | |
61 | settings.DESTINATIONS = [] | |
62 | for destination in destinations: | |
63 | router.addDestination(destination) | |
64 | settings.DESTINATIONS.append( | |
65 | '%s:%s:%s' % ( | |
66 | destination[0], destination[1], destination[2])) | |
67 | benchmark_router(router) | |
68 | ||
69 | ||
70 | def benchmark_router(router): | |
71 | ||
72 | def router_getDestinations(): | |
73 | router.__count += 1 | |
74 | dst = list(router.getDestinations('foo.%d' % router.__count)) | |
75 | assert(len(dst) != 0) | |
76 | ||
77 | n = 100000 | |
78 | t = timeit.timeit(router_getDestinations, number=n) | |
79 | print_stats(router, t) | |
80 | ||
81 | ||
82 | def main(): | |
83 | for router_class in DatapointRouter.plugins.values(): | |
84 | # Skip 'rules' because it's hard to mock. | |
85 | if router_class.plugin_name == 'rules': | |
86 | continue | |
87 | benchmark(router_class) | |
88 | ||
89 | ||
90 | if __name__ == '__main__': | |
91 | main() |
0 | # Aggregation methods for whisper files. Entries are scanned in order, | |
1 | # and first match wins. This file is scanned for changes every 60 seconds | |
2 | # | |
3 | # [name] | |
4 | # pattern = <regex> | |
5 | # xFilesFactor = <float between 0 and 1> | |
6 | # aggregationMethod = <average|sum|last|max|min> | |
7 | # | |
8 | # name: Arbitrary unique name for the rule | |
9 | # pattern: Regex pattern to match against the metric name | |
10 | # xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur | |
11 | # aggregationMethod: function to apply to data points for aggregation | |
12 | # | |
13 | [min] | |
14 | pattern = \.min$ | |
15 | xFilesFactor = 0.1 | |
16 | aggregationMethod = min | |
17 | ||
18 | [max] | |
19 | pattern = \.max$ | |
20 | xFilesFactor = 0.1 | |
21 | aggregationMethod = max | |
22 | ||
23 | [sum] | |
24 | pattern = \.count$ | |
25 | xFilesFactor = 0 | |
26 | aggregationMethod = sum | |
27 | ||
28 | [default_average] | |
29 | pattern = .* | |
30 | xFilesFactor = 0.5 | |
31 | aggregationMethod = average |
0 | [carbon] | |
1 | pattern = ^carbon\. | |
2 | retentions = 60:90d | |
3 | ||
4 | [default_1min_for_1day] | |
5 | pattern = .* | |
6 | retentions = 60s:1d |
0 | from mock import call, Mock, patch | |
1 | from unittest import TestCase | |
2 | ||
3 | from twisted.internet.task import LoopingCall | |
4 | ||
5 | from carbon import instrumentation | |
6 | from carbon.aggregator.buffers import BufferManager, IntervalBuffer, MetricBuffer | |
7 | from carbon.tests.util import TestSettings | |
8 | ||
9 | ||
10 | class AggregationBufferManagerTest(TestCase): | |
11 | def tearDown(self): | |
12 | BufferManager.clear() | |
13 | ||
14 | @patch("carbon.aggregator.buffers.MetricBuffer") | |
15 | def test_get_nonexistent_buffer_creates_new(self, metric_buffer_mock): | |
16 | BufferManager.get_buffer("carbon.foo") | |
17 | metric_buffer_mock.assert_called_once_with("carbon.foo") | |
18 | ||
19 | @patch("carbon.aggregator.buffers.MetricBuffer", new=Mock()) | |
20 | def test_get_nonexistent_buffer_creates_and_saves_it(self): | |
21 | new_buffer = BufferManager.get_buffer("carbon.foo") | |
22 | existing_buffer = BufferManager.get_buffer("carbon.foo") | |
23 | self.assertTrue(new_buffer is existing_buffer) | |
24 | ||
25 | @patch("carbon.aggregator.buffers.MetricBuffer", new=Mock(spec=MetricBuffer)) | |
26 | def test_clear_closes_buffers(self): | |
27 | metric_buffer_mock = BufferManager.get_buffer("carbon.foo") | |
28 | BufferManager.clear() | |
29 | metric_buffer_mock.close.assert_called_once_with() | |
30 | ||
31 | ||
32 | class AggregationMetricBufferTest(TestCase): | |
33 | def setUp(self): | |
34 | self.new_metric_buffer = MetricBuffer("carbon.foo") | |
35 | ||
36 | with patch("carbon.aggregator.buffers.LoopingCall", new=Mock()): | |
37 | self.metric_buffer = MetricBuffer("carbon.foo.bar") | |
38 | self.metric_buffer.configure_aggregation(60, sum) | |
39 | ||
40 | def tearDown(self): | |
41 | instrumentation.stats.clear() | |
42 | ||
43 | def test_new_buffer_is_unconfigured(self): | |
44 | self.assertFalse(self.new_metric_buffer.configured) | |
45 | ||
46 | @patch("carbon.aggregator.buffers.LoopingCall", new=Mock()) | |
47 | def test_configure_buffer_marks_configured(self): | |
48 | self.new_metric_buffer.configure_aggregation(60, sum) | |
49 | self.assertTrue(self.new_metric_buffer.configured) | |
50 | ||
51 | @patch("carbon.aggregator.buffers.LoopingCall", spec=LoopingCall) | |
52 | def test_configure_buffer_creates_looping_call(self, looping_call_mock): | |
53 | self.new_metric_buffer.configure_aggregation(60, sum) | |
54 | looping_call_mock.assert_called_once_with(self.new_metric_buffer.compute_value) | |
55 | ||
56 | @patch("carbon.aggregator.buffers.LoopingCall", spec=LoopingCall) | |
57 | def test_configure_buffer_starts_looping_call(self, looping_call_mock): | |
58 | self.new_metric_buffer.configure_aggregation(60, sum) | |
59 | looping_call_mock.return_value.start.assert_called_once_with(60, now=False) | |
60 | ||
61 | @patch("carbon.aggregator.buffers.LoopingCall", spec=LoopingCall) | |
62 | def test_configure_buffer_uses_freq_if_less_than_writeback_freq(self, looping_call_mock): | |
63 | settings = TestSettings() | |
64 | settings['WRITE_BACK_FREQUENCY'] = 300 | |
65 | with patch('carbon.aggregator.buffers.settings', new=settings): | |
66 | self.new_metric_buffer.configure_aggregation(60, sum) | |
67 | looping_call_mock.return_value.start.assert_called_once_with(60, now=False) | |
68 | ||
69 | @patch("carbon.aggregator.buffers.LoopingCall", spec=LoopingCall) | |
70 | def test_configure_buffer_uses_writeback_freq_if_less_than_freq(self, looping_call_mock): | |
71 | settings = TestSettings() | |
72 | settings['WRITE_BACK_FREQUENCY'] = 30 | |
73 | with patch('carbon.aggregator.buffers.settings', new=settings): | |
74 | self.new_metric_buffer.configure_aggregation(60, sum) | |
75 | looping_call_mock.return_value.start.assert_called_once_with(30, now=False) | |
76 | ||
77 | @patch("carbon.aggregator.buffers.IntervalBuffer", new=Mock()) | |
78 | def test_input_rounds_down_to_interval(self): | |
79 | # Interval of 60 | |
80 | self.metric_buffer.input((125, 1.0)) | |
81 | self.assertTrue(120 in self.metric_buffer.interval_buffers) | |
82 | ||
83 | @patch("carbon.aggregator.buffers.IntervalBuffer", spec=IntervalBuffer) | |
84 | def test_input_passes_datapoint_to_interval_buffer(self, interval_buffer_mock): | |
85 | self.metric_buffer.input((120, 1.0)) | |
86 | interval_buffer_mock.return_value.input.assert_called_once_with((120, 1.0)) | |
87 | ||
88 | @patch("time.time", new=Mock(return_value=600)) | |
89 | @patch("carbon.state.events.metricGenerated") | |
90 | def test_compute_value_flushes_active_buffer(self, metric_generated_mock): | |
91 | self.metric_buffer.input((600, 1.0)) | |
92 | self.metric_buffer.compute_value() | |
93 | metric_generated_mock.assert_called_once_with("carbon.foo.bar", (600, 1.0)) | |
94 | ||
95 | @patch("time.time", new=Mock(return_value=600)) | |
96 | @patch("carbon.state.events.metricGenerated") | |
97 | def test_compute_value_uses_interval_for_flushed_datapoint(self, metric_generated_mock): | |
98 | self.metric_buffer.input((630, 1.0)) | |
99 | self.metric_buffer.compute_value() | |
100 | metric_generated_mock.assert_called_once_with("carbon.foo.bar", (600, 1.0)) | |
101 | ||
102 | @patch("time.time", new=Mock(return_value=600)) | |
103 | @patch("carbon.state.events.metricGenerated", new=Mock()) | |
104 | def test_compute_value_marks_buffer_inactive(self): | |
105 | interval_buffer = IntervalBuffer(600) | |
106 | interval_buffer.input((600, 1.0)) | |
107 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
108 | ||
109 | with patch.object(IntervalBuffer, 'mark_inactive') as mark_inactive_mock: | |
110 | self.metric_buffer.compute_value() | |
111 | mark_inactive_mock.assert_called_once_with() | |
112 | ||
113 | @patch("time.time", new=Mock(return_value=600)) | |
114 | @patch("carbon.state.events.metricGenerated", new=Mock()) | |
115 | def test_compute_value_computes_aggregate(self): | |
116 | interval_buffer = IntervalBuffer(600) | |
117 | interval_buffer.input((600, 1.0)) | |
118 | interval_buffer.input((601, 2.0)) | |
119 | interval_buffer.input((602, 3.0)) | |
120 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
121 | ||
122 | with patch.object(self.metric_buffer, 'aggregation_func') as aggregation_func_mock: | |
123 | self.metric_buffer.compute_value() | |
124 | aggregation_func_mock.assert_called_once_with([1.0, 2.0, 3.0]) | |
125 | ||
126 | @patch("time.time", new=Mock(return_value=600)) | |
127 | @patch("carbon.state.events.metricGenerated") | |
128 | def test_compute_value_skips_inactive_buffers(self, metric_generated_mock): | |
129 | interval_buffer = IntervalBuffer(600) | |
130 | interval_buffer.input((600, 1.0)) | |
131 | interval_buffer.mark_inactive() | |
132 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
133 | ||
134 | self.metric_buffer.compute_value() | |
135 | self.assertFalse(metric_generated_mock.called) | |
136 | ||
137 | @patch("carbon.state.events.metricGenerated") | |
138 | def test_compute_value_can_flush_interval_multiple_times(self, metric_generated_mock): | |
139 | interval_buffer = IntervalBuffer(600) | |
140 | interval_buffer.input((600, 1.0)) | |
141 | interval_buffer.input((601, 2.0)) | |
142 | interval_buffer.input((602, 3.0)) | |
143 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
144 | ||
145 | with patch("time.time") as time_mock: | |
146 | time_mock.return_value = 600 | |
147 | self.metric_buffer.compute_value() | |
148 | calls = [call("carbon.foo.bar", (600, 6.0))] | |
149 | # say WRITE_BACK_FREQUENCY is 30, we flush again if another point came in | |
150 | time_mock.return_value = 630 | |
151 | interval_buffer.input((604, 4.0)) | |
152 | self.metric_buffer.compute_value() | |
153 | calls.append(call("carbon.foo.bar", (600, 10.0))) | |
154 | ||
155 | metric_generated_mock.assert_has_calls(calls) | |
156 | ||
157 | @patch("carbon.state.events.metricGenerated") | |
158 | def test_compute_value_doesnt_flush_unchanged_interval_many_times(self, metric_generated_mock): | |
159 | interval_buffer = IntervalBuffer(600) | |
160 | interval_buffer.input((600, 1.0)) | |
161 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
162 | ||
163 | with patch("time.time") as time_mock: | |
164 | time_mock.return_value = 600 | |
165 | self.metric_buffer.compute_value() | |
166 | calls = [call("carbon.foo.bar", (600, 1.0))] | |
167 | # say WRITE_BACK_FREQUENCY is 30, we flush again but no point came in | |
168 | time_mock.return_value = 630 | |
169 | self.metric_buffer.compute_value() | |
170 | ||
171 | metric_generated_mock.assert_has_calls(calls) | |
172 | ||
173 | def test_compute_value_deletes_expired_buffers(self): | |
174 | from carbon.conf import settings | |
175 | current_interval = 600 + 60 * settings['MAX_AGGREGATION_INTERVALS'] | |
176 | ||
177 | interval_buffer = IntervalBuffer(600) | |
178 | interval_buffer.input((600, 1.0)) | |
179 | interval_buffer.mark_inactive() | |
180 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
181 | ||
182 | # 2nd interval for current time | |
183 | interval_buffer = IntervalBuffer(current_interval) | |
184 | interval_buffer.input((current_interval, 1.0)) | |
185 | interval_buffer.mark_inactive() | |
186 | self.metric_buffer.interval_buffers[current_interval] = interval_buffer | |
187 | ||
188 | with patch("time.time", new=Mock(return_value=current_interval + 60)): | |
189 | self.metric_buffer.compute_value() | |
190 | self.assertFalse(600 in self.metric_buffer.interval_buffers) | |
191 | ||
192 | def test_compute_value_closes_metric_if_last_buffer_deleted(self): | |
193 | from carbon.conf import settings | |
194 | current_interval = 600 + 60 * settings['MAX_AGGREGATION_INTERVALS'] | |
195 | ||
196 | interval_buffer = IntervalBuffer(600) | |
197 | interval_buffer.input((600, 1.0)) | |
198 | interval_buffer.mark_inactive() | |
199 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
200 | BufferManager.buffers['carbon.foo.bar'] = self.metric_buffer | |
201 | ||
202 | with patch("time.time", new=Mock(return_value=current_interval + 60)): | |
203 | with patch.object(MetricBuffer, 'close') as close_mock: | |
204 | self.metric_buffer.compute_value() | |
205 | close_mock.assert_called_once_with() | |
206 | ||
207 | def test_compute_value_unregisters_metric_if_last_buffer_deleted(self): | |
208 | from carbon.conf import settings | |
209 | current_interval = 600 + 60 * settings['MAX_AGGREGATION_INTERVALS'] | |
210 | ||
211 | interval_buffer = IntervalBuffer(600) | |
212 | interval_buffer.input((600, 1.0)) | |
213 | interval_buffer.mark_inactive() | |
214 | self.metric_buffer.interval_buffers[600] = interval_buffer | |
215 | BufferManager.buffers['carbon.foo.bar'] = self.metric_buffer | |
216 | ||
217 | with patch("time.time", new=Mock(return_value=current_interval + 60)): | |
218 | self.metric_buffer.compute_value() | |
219 | self.assertFalse('carbon.foo.bar' in BufferManager.buffers) | |
220 | ||
221 | def test_close_stops_looping_call(self): | |
222 | with patch.object(MetricBuffer, 'close') as close_mock: | |
223 | self.metric_buffer.close() | |
224 | close_mock.assert_called_once_with() |
0 | import unittest | |
1 | ||
2 | from carbon.aggregator.rules import AGGREGATION_METHODS | |
3 | ||
4 | PERCENTILE_METHODS = ['p999', 'p99', 'p95', 'p90', 'p80', 'p75', 'p50'] | |
5 | VALUES = [4, 8, 15, 16, 23, 42] | |
6 | ||
7 | ||
8 | def almost_equal(a, b): | |
9 | return abs(a - b) < 0.0000000001 | |
10 | ||
11 | ||
12 | class AggregationMethodTest(unittest.TestCase): | |
13 | def test_percentile_simple(self): | |
14 | for method in PERCENTILE_METHODS: | |
15 | self.assertTrue(almost_equal(AGGREGATION_METHODS[method]([1]), 1)) | |
16 | ||
17 | def test_percentile_order(self): | |
18 | for method in PERCENTILE_METHODS: | |
19 | a = AGGREGATION_METHODS[method]([1, 2, 3, 4, 5]) | |
20 | b = AGGREGATION_METHODS[method]([3, 2, 1, 4, 5]) | |
21 | self.assertTrue(almost_equal(a, b)) | |
22 | ||
23 | def test_percentile_values(self): | |
24 | examples = [ | |
25 | ('p999', 41.905, ), | |
26 | ('p99', 41.05, ), | |
27 | ('p95', 37.25, ), | |
28 | ('p90', 32.5, ), | |
29 | ('p80', 23, ), | |
30 | ('p75', 21.25, ), | |
31 | ('p50', 15.5, ), | |
32 | ] | |
33 | ||
34 | for (method, result) in examples: | |
35 | self.assertTrue(almost_equal(AGGREGATION_METHODS[method](VALUES), result)) |
0 | from mock import patch | |
1 | from unittest import TestCase | |
2 | ||
3 | from carbon import instrumentation | |
4 | from carbon.pipeline import Processor | |
5 | from carbon.aggregator.buffers import BufferManager | |
6 | from carbon.aggregator.rules import AggregationRule, RuleManager | |
7 | from carbon.aggregator.processor import AggregationProcessor | |
8 | ||
9 | ||
10 | class AggregationProcessorTest(TestCase): | |
11 | def setUp(self): | |
12 | self.sample_aggregation_rule = AggregationRule(r'^carbon.foo', r'carbon.foo.sum', 'sum', 1) | |
13 | self.sample_overwriting_aggregation_rule = \ | |
14 | AggregationRule(r'^carbon.foo', r'carbon.foo', 'sum', 1) | |
15 | self.processor = AggregationProcessor() | |
16 | ||
17 | def tearDown(self): | |
18 | instrumentation.stats.clear() | |
19 | BufferManager.clear() | |
20 | RuleManager.clear() | |
21 | ||
22 | def test_registers_plugin(self): | |
23 | self.assertTrue('aggregate' in Processor.plugins) | |
24 | ||
25 | def test_process_increments_datapoints_metric(self): | |
26 | list(self.processor.process('carbon.foo', (0, 0))) | |
27 | self.assertEqual(1, instrumentation.stats['datapointsReceived']) | |
28 | ||
29 | def test_unaggregated_metrics_pass_through_when_no_rules(self): | |
30 | result = list(self.processor.process('carbon.foo', (0, 0))) | |
31 | self.assertEqual([('carbon.foo', (0, 0))], result) | |
32 | ||
33 | def test_unaggregated_metrics_pass_through(self): | |
34 | RuleManager.rules = [self.sample_aggregation_rule] | |
35 | result = list(self.processor.process('carbon.foo', (0, 0))) | |
36 | self.assertEqual([('carbon.foo', (0, 0))], result) | |
37 | ||
38 | def test_aggregation_rule_checked(self): | |
39 | RuleManager.rules = [self.sample_aggregation_rule] | |
40 | with patch.object(self.sample_aggregation_rule, 'get_aggregate_metric'): | |
41 | list(self.processor.process('carbon.foo', (0, 0))) | |
42 | self.sample_aggregation_rule.get_aggregate_metric.assert_called_once_with('carbon.foo') | |
43 | ||
44 | def test_new_buffer_configured(self): | |
45 | RuleManager.rules = [self.sample_aggregation_rule] | |
46 | list(self.processor.process('carbon.foo', (0, 0))) | |
47 | values_buffer = BufferManager.get_buffer('carbon.foo.sum') | |
48 | ||
49 | self.assertTrue(values_buffer.configured) | |
50 | self.assertEqual(1, values_buffer.aggregation_frequency) | |
51 | self.assertEqual(sum, values_buffer.aggregation_func) | |
52 | ||
53 | def test_buffer_receives_value(self): | |
54 | RuleManager.rules = [self.sample_aggregation_rule] | |
55 | list(self.processor.process('carbon.foo', (0, 0))) | |
56 | values_buffer = BufferManager.get_buffer('carbon.foo.sum') | |
57 | ||
58 | self.assertEqual([0], values_buffer.interval_buffers[0].values) | |
59 | ||
60 | def test_metric_not_passed_through_when_aggregate_overwrites(self): | |
61 | RuleManager.rules = [self.sample_overwriting_aggregation_rule] | |
62 | result = list(self.processor.process('carbon.foo', (0, 0))) | |
63 | ||
64 | self.assertEqual([], result) |
0 | import unittest | |
1 | from carbon.aggregator.rules import AggregationRule | |
2 | ||
3 | ||
4 | class AggregationRuleTest(unittest.TestCase): | |
5 | ||
6 | def test_inclusive_regexes(self): | |
7 | """ | |
8 | Test case for https://github.com/graphite-project/carbon/pull/120 | |
9 | ||
10 | Consider the two rules: | |
11 | ||
12 | aggregated.hist.p99 (10) = avg hosts.*.hist.p99 | |
13 | aggregated.hist.p999 (10) = avg hosts.*.hist.p999 | |
14 | ||
15 | Before the abovementioned patch the second rule would be treated as | |
16 | expected but the first rule would lead to an aggregated metric | |
17 | aggregated.hist.p99 which would in fact be equivalent to | |
18 | avgSeries(hosts.*.hist.p99,hosts.*.hist.p999). | |
19 | """ | |
20 | ||
21 | method = 'avg' | |
22 | frequency = 10 | |
23 | ||
24 | input_pattern = 'hosts.*.hist.p99' | |
25 | output_pattern = 'aggregated.hist.p99' | |
26 | rule99 = AggregationRule(input_pattern, output_pattern, | |
27 | method, frequency) | |
28 | ||
29 | input_pattern = 'hosts.*.hist.p999' | |
30 | output_pattern = 'aggregated.hist.p999' | |
31 | rule999 = AggregationRule(input_pattern, output_pattern, | |
32 | method, frequency) | |
33 | ||
34 | self.assertEqual(rule99.get_aggregate_metric('hosts.abc.hist.p99'), | |
35 | 'aggregated.hist.p99') | |
36 | self.assertEqual(rule99.get_aggregate_metric('hosts.abc.hist.p999'), | |
37 | None) | |
38 | ||
39 | self.assertEqual(rule999.get_aggregate_metric('hosts.abc.hist.p99'), | |
40 | None) | |
41 | self.assertEqual(rule999.get_aggregate_metric('hosts.abc.hist.p999'), | |
42 | 'aggregated.hist.p999') |
0 | import time | |
1 | from unittest import TestCase | |
2 | from mock import Mock, PropertyMock, patch | |
3 | from carbon.cache import ( | |
4 | MetricCache, _MetricCache, DrainStrategy, MaxStrategy, RandomStrategy, SortedStrategy, | |
5 | TimeSortedStrategy | |
6 | ) | |
7 | ||
8 | ||
9 | class MetricCacheTest(TestCase): | |
10 | def setUp(self): | |
11 | settings = { | |
12 | 'MAX_CACHE_SIZE': float('inf'), | |
13 | 'CACHE_SIZE_LOW_WATERMARK': float('inf') | |
14 | } | |
15 | self._settings_patch = patch.dict('carbon.conf.settings', settings) | |
16 | self._settings_patch.start() | |
17 | self.strategy_mock = Mock(spec=DrainStrategy) | |
18 | self.metric_cache = _MetricCache(self.strategy_mock) | |
19 | ||
20 | def tearDown(self): | |
21 | self._settings_patch.stop() | |
22 | ||
23 | def test_constructor(self): | |
24 | settings = { | |
25 | 'CACHE_WRITE_STRATEGY': 'max', | |
26 | } | |
27 | settings_patch = patch.dict('carbon.conf.settings', settings) | |
28 | settings_patch.start() | |
29 | cache = MetricCache() | |
30 | self.assertNotEqual(cache, None) | |
31 | self.assertTrue(isinstance(cache.strategy, MaxStrategy)) | |
32 | ||
33 | def test_cache_is_a_dict(self): | |
34 | self.assertTrue(issubclass(_MetricCache, dict)) | |
35 | ||
36 | def test_initial_size(self): | |
37 | self.assertEqual(0, self.metric_cache.size) | |
38 | ||
39 | def test_store_new_metric(self): | |
40 | self.metric_cache.store('foo', (123456, 1.0)) | |
41 | self.assertEqual(1, self.metric_cache.size) | |
42 | self.assertEqual([(123456, 1.0)], list(self.metric_cache['foo'].items())) | |
43 | ||
44 | def test_store_multiple_datapoints(self): | |
45 | self.metric_cache.store('foo', (123456, 1.0)) | |
46 | self.metric_cache.store('foo', (123457, 2.0)) | |
47 | self.assertEqual(2, self.metric_cache.size) | |
48 | result = self.metric_cache['foo'].items() | |
49 | self.assertTrue((123456, 1.0) in result) | |
50 | self.assertTrue((123457, 2.0) in result) | |
51 | ||
52 | def test_store_duplicate_timestamp(self): | |
53 | self.metric_cache.store('foo', (123456, 1.0)) | |
54 | self.metric_cache.store('foo', (123456, 2.0)) | |
55 | self.assertEqual(1, self.metric_cache.size) | |
56 | self.assertEqual([(123456, 2.0)], list(self.metric_cache['foo'].items())) | |
57 | ||
58 | def test_store_checks_fullness(self): | |
59 | is_full_mock = PropertyMock() | |
60 | with patch.object(_MetricCache, 'is_full', is_full_mock): | |
61 | with patch('carbon.cache.events'): | |
62 | metric_cache = _MetricCache() | |
63 | metric_cache.store('foo', (123456, 1.0)) | |
64 | self.assertEqual(1, is_full_mock.call_count) | |
65 | ||
66 | def test_store_on_full_triggers_events(self): | |
67 | is_full_mock = PropertyMock(return_value=True) | |
68 | with patch.object(_MetricCache, 'is_full', is_full_mock): | |
69 | with patch('carbon.cache.events') as events_mock: | |
70 | self.metric_cache.store('foo', (123456, 1.0)) | |
71 | events_mock.cacheFull.assert_called_with() | |
72 | ||
73 | def test_pop_multiple_datapoints(self): | |
74 | self.metric_cache.store('foo', (123456, 1.0)) | |
75 | self.metric_cache.store('foo', (123457, 2.0)) | |
76 | result = self.metric_cache.pop('foo') | |
77 | self.assertTrue((123456, 1.0) in result) | |
78 | self.assertTrue((123457, 2.0) in result) | |
79 | ||
80 | def test_pop_reduces_size(self): | |
81 | self.metric_cache.store('foo', (123456, 1.0)) | |
82 | self.metric_cache.store('foo', (123457, 2.0)) | |
83 | self.metric_cache.pop('foo') | |
84 | self.assertEqual(0, self.metric_cache.size) | |
85 | ||
86 | def test_pop_triggers_space_check(self): | |
87 | with patch.object(self.metric_cache, '_check_available_space') as check_space_mock: | |
88 | self.metric_cache.store('foo', (123456, 1.0)) | |
89 | self.metric_cache.pop('foo') | |
90 | self.assertEqual(1, check_space_mock.call_count) | |
91 | ||
92 | def test_pop_triggers_space_event(self): | |
93 | with patch('carbon.state.cacheTooFull', new=Mock(return_value=True)): | |
94 | with patch('carbon.cache.events') as events_mock: | |
95 | self.metric_cache.store('foo', (123456, 1.0)) | |
96 | self.metric_cache.pop('foo') | |
97 | events_mock.cacheSpaceAvailable.assert_called_with() | |
98 | ||
99 | def test_pop_returns_sorted_timestamps(self): | |
100 | self.metric_cache.store('foo', (123457, 2.0)) | |
101 | self.metric_cache.store('foo', (123458, 3.0)) | |
102 | self.metric_cache.store('foo', (123456, 1.0)) | |
103 | result = self.metric_cache.pop('foo') | |
104 | expected = [(123456, 1.0), (123457, 2.0), (123458, 3.0)] | |
105 | self.assertEqual(expected, result) | |
106 | ||
107 | def test_pop_raises_on_missing(self): | |
108 | self.assertRaises(KeyError, self.metric_cache.pop, 'foo') | |
109 | ||
110 | def test_get_datapoints(self): | |
111 | self.metric_cache.store('foo', (123456, 1.0)) | |
112 | self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo')) | |
113 | ||
114 | def test_get_datapoints_doesnt_pop(self): | |
115 | self.metric_cache.store('foo', (123456, 1.0)) | |
116 | self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo')) | |
117 | self.assertEqual(1, self.metric_cache.size) | |
118 | self.assertEqual([(123456, 1.0)], self.metric_cache.get_datapoints('foo')) | |
119 | ||
120 | def test_get_datapoints_returns_empty_on_missing(self): | |
121 | self.assertEqual([], self.metric_cache.get_datapoints('foo')) | |
122 | ||
123 | def test_get_datapoints_returns_sorted_timestamps(self): | |
124 | self.metric_cache.store('foo', (123457, 2.0)) | |
125 | self.metric_cache.store('foo', (123458, 3.0)) | |
126 | self.metric_cache.store('foo', (123456, 1.0)) | |
127 | result = self.metric_cache.get_datapoints('foo') | |
128 | expected = [(123456, 1.0), (123457, 2.0), (123458, 3.0)] | |
129 | self.assertEqual(expected, result) | |
130 | ||
131 | def test_drain_metric_respects_strategy(self): | |
132 | self.metric_cache.store('foo', (123456, 1.0)) | |
133 | self.metric_cache.store('bar', (123456, 1.0)) | |
134 | self.metric_cache.store('baz', (123456, 1.0)) | |
135 | self.strategy_mock.return_value.choose_item.side_effect = ['bar', 'baz', 'foo'] | |
136 | self.assertEqual('bar', self.metric_cache.drain_metric()[0]) | |
137 | self.assertEqual('baz', self.metric_cache.drain_metric()[0]) | |
138 | self.assertEqual('foo', self.metric_cache.drain_metric()[0]) | |
139 | ||
140 | def test_drain_metric_works_without_strategy(self): | |
141 | metric_cache = _MetricCache() # No strategy | |
142 | ||
143 | metric_cache.store('foo', (123456, 1.0)) | |
144 | self.assertEqual('foo', metric_cache.drain_metric()[0]) | |
145 | ||
146 | def test_is_full_short_circuits_on_inf(self): | |
147 | with patch.object(self.metric_cache, 'size') as size_mock: | |
148 | self.metric_cache.is_full | |
149 | size_mock.assert_not_called() | |
150 | ||
151 | def test_is_full(self): | |
152 | self._settings_patch.values['MAX_CACHE_SIZE'] = 2.0 | |
153 | self._settings_patch.start() | |
154 | with patch('carbon.cache.events'): | |
155 | self.assertFalse(self.metric_cache.is_full) | |
156 | self.metric_cache.store('foo', (123456, 1.0)) | |
157 | self.assertFalse(self.metric_cache.is_full) | |
158 | self.metric_cache.store('foo', (123457, 1.0)) | |
159 | self.assertTrue(self.metric_cache.is_full) | |
160 | ||
161 | def test_counts_one_datapoint(self): | |
162 | self.metric_cache.store('foo', (123456, 1.0)) | |
163 | self.assertEqual([('foo', 1)], self.metric_cache.counts) | |
164 | ||
165 | def test_counts_two_datapoints(self): | |
166 | self.metric_cache.store('foo', (123456, 1.0)) | |
167 | self.metric_cache.store('foo', (123457, 2.0)) | |
168 | self.assertEqual([('foo', 2)], self.metric_cache.counts) | |
169 | ||
170 | def test_counts_multiple_datapoints(self): | |
171 | self.metric_cache.store('foo', (123456, 1.0)) | |
172 | self.metric_cache.store('foo', (123457, 2.0)) | |
173 | self.metric_cache.store('bar', (123458, 3.0)) | |
174 | self.assertTrue(('foo', 2) in self.metric_cache.counts) | |
175 | self.assertTrue(('bar', 1) in self.metric_cache.counts) | |
176 | ||
177 | ||
178 | class DrainStrategyTest(TestCase): | |
179 | def setUp(self): | |
180 | self.metric_cache = _MetricCache() | |
181 | ||
182 | def test_max_strategy(self): | |
183 | self.metric_cache.store('foo', (123456, 1.0)) | |
184 | self.metric_cache.store('foo', (123457, 2.0)) | |
185 | self.metric_cache.store('foo', (123458, 3.0)) | |
186 | self.metric_cache.store('bar', (123459, 4.0)) | |
187 | self.metric_cache.store('bar', (123460, 5.0)) | |
188 | self.metric_cache.store('baz', (123461, 6.0)) | |
189 | ||
190 | max_strategy = MaxStrategy(self.metric_cache) | |
191 | # foo has 3 | |
192 | self.assertEqual('foo', max_strategy.choose_item()) | |
193 | # add 2 more 'bar' for 4 total | |
194 | self.metric_cache.store('bar', (123462, 8.0)) | |
195 | self.metric_cache.store('bar', (123463, 9.0)) | |
196 | self.assertEqual('bar', max_strategy.choose_item()) | |
197 | ||
198 | self.metric_cache.pop('foo') | |
199 | self.metric_cache.pop('bar') | |
200 | self.assertEqual('baz', max_strategy.choose_item()) | |
201 | ||
202 | def test_sorted_strategy_static_cache(self): | |
203 | self.metric_cache.store('foo', (123456, 1.0)) | |
204 | self.metric_cache.store('foo', (123457, 2.0)) | |
205 | self.metric_cache.store('foo', (123458, 3.0)) | |
206 | self.metric_cache.store('bar', (123459, 4.0)) | |
207 | self.metric_cache.store('bar', (123460, 5.0)) | |
208 | self.metric_cache.store('baz', (123461, 6.0)) | |
209 | ||
210 | sorted_strategy = SortedStrategy(self.metric_cache) | |
211 | # In order from most to least | |
212 | self.assertEqual('foo', sorted_strategy.choose_item()) | |
213 | self.assertEqual('bar', sorted_strategy.choose_item()) | |
214 | self.assertEqual('baz', sorted_strategy.choose_item()) | |
215 | ||
216 | def test_sorted_strategy_changing_sizes(self): | |
217 | self.metric_cache.store('foo', (123456, 1.0)) | |
218 | self.metric_cache.store('foo', (123457, 2.0)) | |
219 | self.metric_cache.store('foo', (123458, 3.0)) | |
220 | self.metric_cache.store('bar', (123459, 4.0)) | |
221 | self.metric_cache.store('bar', (123460, 5.0)) | |
222 | self.metric_cache.store('baz', (123461, 6.0)) | |
223 | ||
224 | sorted_strategy = SortedStrategy(self.metric_cache) | |
225 | # In order from most to least foo, bar, baz | |
226 | self.assertEqual('foo', sorted_strategy.choose_item()) | |
227 | ||
228 | # 'baz' gets 2 more, now greater than 'bar' | |
229 | self.metric_cache.store('baz', (123461, 6.0)) | |
230 | self.metric_cache.store('baz', (123461, 6.0)) | |
231 | # But 'bar' is popped anyway, because sort has already happened | |
232 | self.assertEqual('bar', sorted_strategy.choose_item()) | |
233 | self.assertEqual('baz', sorted_strategy.choose_item()) | |
234 | ||
235 | # Sort happens again | |
236 | self.assertEqual('foo', sorted_strategy.choose_item()) | |
237 | self.assertEqual('bar', sorted_strategy.choose_item()) | |
238 | self.assertEqual('baz', sorted_strategy.choose_item()) | |
239 | ||
240 | def test_time_sorted_strategy(self): | |
241 | self.metric_cache.store('foo', (123456, 1.0)) | |
242 | self.metric_cache.store('foo', (123457, 2.0)) | |
243 | self.metric_cache.store('foo', (123458, 3.0)) | |
244 | self.metric_cache.store('bar', (123459, 4.0)) | |
245 | self.metric_cache.store('bar', (123460, 5.0)) | |
246 | self.metric_cache.store('baz', (123461, 6.0)) | |
247 | ||
248 | time_sorted_strategy = TimeSortedStrategy(self.metric_cache) | |
249 | # In order: foo, bar, baz | |
250 | self.assertEqual('foo', time_sorted_strategy.choose_item()) | |
251 | ||
252 | # 'baz' gets older points. | |
253 | self.metric_cache.store('baz', (123450, 6.0)) | |
254 | self.metric_cache.store('baz', (123451, 6.0)) | |
255 | # But 'bar' is popped anyway, because sort has already happened | |
256 | self.assertEqual('bar', time_sorted_strategy.choose_item()) | |
257 | self.assertEqual('baz', time_sorted_strategy.choose_item()) | |
258 | ||
259 | # Sort happens again | |
260 | self.assertEqual('baz', time_sorted_strategy.choose_item()) | |
261 | self.assertEqual('foo', time_sorted_strategy.choose_item()) | |
262 | self.assertEqual('bar', time_sorted_strategy.choose_item()) | |
263 | ||
264 | def test_time_sorted_strategy_min_lag(self): | |
265 | settings = { | |
266 | 'MIN_TIMESTAMP_LAG': 5, | |
267 | } | |
268 | settings_patch = patch.dict('carbon.conf.settings', settings) | |
269 | settings_patch.start() | |
270 | ||
271 | now = time.time() | |
272 | self.metric_cache.store('old', (now - 10, 1.0)) | |
273 | self.metric_cache.store('new', (now, 2.0)) | |
274 | ||
275 | time_sorted_strategy = TimeSortedStrategy(self.metric_cache) | |
276 | self.assertEqual('old', time_sorted_strategy.choose_item()) | |
277 | self.metric_cache.pop('old') | |
278 | self.assertEqual(None, time_sorted_strategy.choose_item()) | |
279 | ||
280 | ||
281 | class RandomStrategyTest(TestCase): | |
282 | def setUp(self): | |
283 | self.metric_cache = _MetricCache() | |
284 | ||
285 | def test_random_strategy(self): | |
286 | self.metric_cache.store('foo', (123456, 1.0)) | |
287 | self.metric_cache.store('bar', (123457, 2.0)) | |
288 | self.metric_cache.store('baz', (123458, 3.0)) | |
289 | ||
290 | strategy = RandomStrategy(self.metric_cache) | |
291 | for _i in range(3): | |
292 | item = strategy.choose_item() | |
293 | self.assertTrue(item in self.metric_cache) | |
294 | self.metric_cache.pop(item) |
0 | import carbon.client as carbon_client | |
1 | from carbon.client import ( | |
2 | CarbonPickleClientFactory, CarbonPickleClientProtocol, CarbonLineClientProtocol, | |
3 | CarbonClientManager, RelayProcessor | |
4 | ) | |
5 | from carbon.routers import DatapointRouter | |
6 | from carbon.tests.util import TestSettings | |
7 | import carbon.service # NOQA | |
8 | ||
9 | from twisted.internet import reactor | |
10 | from twisted.internet.defer import Deferred | |
11 | from twisted.internet.base import DelayedCall | |
12 | from twisted.internet.task import deferLater | |
13 | from twisted.trial.unittest import TestCase | |
14 | from twisted.test.proto_helpers import StringTransport | |
15 | ||
16 | from mock import Mock, patch | |
17 | from pickle import loads as pickle_loads | |
18 | from struct import unpack, calcsize | |
19 | ||
20 | ||
21 | INT32_FORMAT = '!I' | |
22 | INT32_SIZE = calcsize(INT32_FORMAT) | |
23 | ||
24 | ||
25 | def decode_sent(data): | |
26 | pickle_size = unpack(INT32_FORMAT, data[:INT32_SIZE])[0] | |
27 | return pickle_loads(data[INT32_SIZE:INT32_SIZE + pickle_size]) | |
28 | ||
29 | ||
30 | class BroadcastRouter(DatapointRouter): | |
31 | def __init__(self, destinations=[]): | |
32 | self.destinations = set(destinations) | |
33 | ||
34 | def addDestination(self, destination): | |
35 | self.destinations.append(destination) | |
36 | ||
37 | def removeDestination(self, destination): | |
38 | self.destinations.discard(destination) | |
39 | ||
40 | def getDestinations(self, key): | |
41 | for destination in self.destinations: | |
42 | yield destination | |
43 | ||
44 | ||
45 | class ConnectedCarbonClientProtocolTest(TestCase): | |
46 | def setUp(self): | |
47 | self.router_mock = Mock(spec=DatapointRouter) | |
48 | carbon_client.settings = TestSettings() # reset to defaults | |
49 | factory = CarbonPickleClientFactory(('127.0.0.1', 2003, 'a'), self.router_mock) | |
50 | self.protocol = factory.buildProtocol(('127.0.0.1', 2003)) | |
51 | self.transport = StringTransport() | |
52 | self.protocol.makeConnection(self.transport) | |
53 | ||
54 | def test_send_datapoint(self): | |
55 | def assert_sent(): | |
56 | sent_data = self.transport.value() | |
57 | sent_datapoints = decode_sent(sent_data) | |
58 | self.assertEqual([datapoint], sent_datapoints) | |
59 | ||
60 | datapoint = ('foo.bar', (1000000000, 1.0)) | |
61 | self.protocol.sendDatapoint(*datapoint) | |
62 | return deferLater(reactor, 0.1, assert_sent) | |
63 | ||
64 | ||
65 | class CarbonLineClientProtocolTest(TestCase): | |
66 | def setUp(self): | |
67 | self.protocol = CarbonLineClientProtocol() | |
68 | self.protocol.sendLine = Mock() | |
69 | ||
70 | def test_send_datapoints(self): | |
71 | calls = [ | |
72 | (('foo.bar', (1000000000, 1.0)), b'foo.bar 1 1000000000'), | |
73 | (('foo.bar', (1000000000, 1.1)), b'foo.bar 1.1 1000000000'), | |
74 | (('foo.bar', (1000000000, 1.123456789123)), b'foo.bar 1.1234567891 1000000000'), | |
75 | (('foo.bar', (1000000000, 1)), b'foo.bar 1 1000000000'), | |
76 | (('foo.bar', (1000000000, 1.498566361088E12)), b'foo.bar 1498566361088 1000000000'), | |
77 | ] | |
78 | ||
79 | i = 0 | |
80 | for (datapoint, expected_line_to_send) in calls: | |
81 | i += 1 | |
82 | ||
83 | self.protocol._sendDatapointsNow([datapoint]) | |
84 | self.assertEqual(self.protocol.sendLine.call_count, i) | |
85 | self.protocol.sendLine.assert_called_with(expected_line_to_send) | |
86 | ||
87 | ||
88 | class CarbonClientFactoryTest(TestCase): | |
89 | def setUp(self): | |
90 | self.router_mock = Mock(spec=DatapointRouter) | |
91 | self.protocol_mock = Mock(spec=CarbonPickleClientProtocol) | |
92 | self.protocol_patch = patch( | |
93 | 'carbon.client.CarbonPickleClientProtocol', new=Mock(return_value=self.protocol_mock)) | |
94 | self.protocol_patch.start() | |
95 | carbon_client.settings = TestSettings() | |
96 | self.factory = CarbonPickleClientFactory(('127.0.0.1', 2003, 'a'), self.router_mock) | |
97 | self.connected_factory = CarbonPickleClientFactory(('127.0.0.1', 2003, 'a'), self.router_mock) | |
98 | self.connected_factory.buildProtocol(None) | |
99 | self.connected_factory.started = True | |
100 | ||
101 | def tearDown(self): | |
102 | if self.factory.deferSendPending and self.factory.deferSendPending.active(): | |
103 | self.factory.deferSendPending.cancel() | |
104 | self.protocol_patch.stop() | |
105 | ||
106 | def test_schedule_send_schedules_call_to_send_queued(self): | |
107 | self.factory.scheduleSend() | |
108 | self.assertIsInstance(self.factory.deferSendPending, DelayedCall) | |
109 | self.assertTrue(self.factory.deferSendPending.active()) | |
110 | ||
111 | def test_schedule_send_ignores_already_scheduled(self): | |
112 | self.factory.scheduleSend() | |
113 | expected_fire_time = self.factory.deferSendPending.getTime() | |
114 | self.factory.scheduleSend() | |
115 | self.assertTrue(expected_fire_time, self.factory.deferSendPending.getTime()) | |
116 | ||
117 | def test_send_queued_should_noop_if_not_connected(self): | |
118 | self.factory.scheduleSend() | |
119 | self.assertFalse(self.protocol_mock.sendQueued.called) | |
120 | ||
121 | def test_send_queued_should_call_protocol_send_queued(self): | |
122 | self.connected_factory.sendQueued() | |
123 | self.protocol_mock.sendQueued.assert_called_once_with() | |
124 | ||
125 | ||
126 | class CarbonClientManagerTest(TestCase): | |
127 | timeout = 1.0 | |
128 | ||
129 | def setUp(self): | |
130 | self.router_mock = Mock(spec=DatapointRouter) | |
131 | self.factory_mock = Mock(spec=CarbonPickleClientFactory) | |
132 | self.client_mgr = CarbonClientManager(self.router_mock) | |
133 | self.client_mgr.createFactory = lambda dest: self.factory_mock(dest, self.router_mock) | |
134 | ||
135 | def test_start_service_installs_sig_ignore(self): | |
136 | from signal import SIGHUP, SIG_IGN | |
137 | ||
138 | with patch('signal.signal', new=Mock()) as signal_mock: | |
139 | self.client_mgr.startService() | |
140 | signal_mock.assert_called_once_with(SIGHUP, SIG_IGN) | |
141 | ||
142 | def test_start_service_starts_factory_connect(self): | |
143 | factory_mock = Mock(spec=CarbonPickleClientFactory) | |
144 | factory_mock.started = False | |
145 | self.client_mgr.client_factories[('127.0.0.1', 2003, 'a')] = factory_mock | |
146 | self.client_mgr.startService() | |
147 | factory_mock.startConnecting.assert_called_once_with() | |
148 | ||
149 | def test_stop_service_waits_for_clients_to_disconnect(self): | |
150 | dest = ('127.0.0.1', 2003, 'a') | |
151 | self.client_mgr.startService() | |
152 | self.client_mgr.startClient(dest) | |
153 | ||
154 | disconnect_deferred = Deferred() | |
155 | reactor.callLater(0.1, disconnect_deferred.callback, 0) | |
156 | self.factory_mock.return_value.disconnect.return_value = disconnect_deferred | |
157 | return self.client_mgr.stopService() | |
158 | ||
159 | def test_start_client_instantiates_client_factory(self): | |
160 | dest = ('127.0.0.1', 2003, 'a') | |
161 | self.client_mgr.startClient(dest) | |
162 | self.factory_mock.assert_called_once_with(dest, self.router_mock) | |
163 | ||
164 | def test_start_client_ignores_duplicate(self): | |
165 | dest = ('127.0.0.1', 2003, 'a') | |
166 | self.client_mgr.startClient(dest) | |
167 | self.client_mgr.startClient(dest) | |
168 | self.factory_mock.assert_called_once_with(dest, self.router_mock) | |
169 | ||
170 | def test_start_client_starts_factory_if_running(self): | |
171 | dest = ('127.0.0.1', 2003, 'a') | |
172 | self.client_mgr.startService() | |
173 | self.client_mgr.startClient(dest) | |
174 | self.factory_mock.return_value.startConnecting.assert_called_once_with() | |
175 | ||
176 | def test_start_client_adds_destination_to_router(self): | |
177 | dest = ('127.0.0.1', 2003, 'a') | |
178 | self.client_mgr.startClient(dest) | |
179 | self.router_mock.addDestination.assert_called_once_with(dest) | |
180 | ||
181 | def test_stop_client_removes_destination_from_router(self): | |
182 | dest = ('127.0.0.1', 2003, 'a') | |
183 | self.client_mgr.startClient(dest) | |
184 | self.client_mgr.stopClient(dest) | |
185 | self.router_mock.removeDestination.assert_called_once_with(dest) | |
186 | ||
187 | ||
188 | class RelayProcessorTest(TestCase): | |
189 | timeout = 1.0 | |
190 | ||
191 | def setUp(self): | |
192 | carbon_client.settings = TestSettings() # reset to defaults | |
193 | self.client_mgr_mock = Mock(spec=CarbonClientManager) | |
194 | self.client_mgr_patch = patch( | |
195 | 'carbon.state.client_manager', new=self.client_mgr_mock) | |
196 | self.client_mgr_patch.start() | |
197 | ||
198 | def tearDown(self): | |
199 | self.client_mgr_patch.stop() | |
200 | ||
201 | def test_relay_normalized(self): | |
202 | carbon_client.settings.TAG_RELAY_NORMALIZED = True | |
203 | relayProcessor = RelayProcessor() | |
204 | relayProcessor.process('my.metric;foo=a;bar=b', (0.0, 0.0)) | |
205 | self.client_mgr_mock.sendDatapoint.assert_called_once_with('my.metric;bar=b;foo=a', (0.0, 0.0)) | |
206 | ||
207 | def test_relay_unnormalized(self): | |
208 | carbon_client.settings.TAG_RELAY_NORMALIZED = False | |
209 | relayProcessor = RelayProcessor() | |
210 | relayProcessor.process('my.metric;foo=a;bar=b', (0.0, 0.0)) | |
211 | self.client_mgr_mock.sendDatapoint.assert_called_once_with('my.metric;foo=a;bar=b', (0.0, 0.0)) |
0 | import os | |
1 | from tempfile import mkdtemp, mkstemp | |
2 | from shutil import rmtree | |
3 | from os import makedirs | |
4 | from os.path import dirname, join | |
5 | from unittest import TestCase | |
6 | from carbon.conf import get_default_parser, parse_options, read_config | |
7 | from carbon.exceptions import CarbonConfigException | |
8 | ||
9 | ||
10 | class FakeParser(object): | |
11 | ||
12 | def __init__(self): | |
13 | self.called = [] | |
14 | ||
15 | def parse_args(self, args): | |
16 | return object(), args | |
17 | ||
18 | def print_usage(self): | |
19 | self.called.append("print_usage") | |
20 | ||
21 | ||
22 | class FakeOptions(object): | |
23 | ||
24 | def __init__(self, **kwargs): | |
25 | self.__dict__.update(kwargs) | |
26 | ||
27 | def __getitem__(self, name): | |
28 | return self.__dict__[name] | |
29 | ||
30 | def __setitem__(self, name, value): | |
31 | self.__dict__[name] = value | |
32 | ||
33 | ||
34 | class DefaultParserTest(TestCase): | |
35 | ||
36 | def test_default_parser(self): | |
37 | """Check default parser settings.""" | |
38 | parser = get_default_parser() | |
39 | self.assertTrue(parser.has_option("--debug")) | |
40 | self.assertEqual(None, parser.defaults["debug"]) | |
41 | self.assertTrue(parser.has_option("--profile")) | |
42 | self.assertEqual(None, parser.defaults["profile"]) | |
43 | self.assertTrue(parser.has_option("--pidfile")) | |
44 | self.assertEqual(None, parser.defaults["pidfile"]) | |
45 | self.assertTrue(parser.has_option("--config")) | |
46 | self.assertEqual(None, parser.defaults["config"]) | |
47 | self.assertTrue(parser.has_option("--logdir")) | |
48 | self.assertEqual(None, parser.defaults["logdir"]) | |
49 | self.assertTrue(parser.has_option("--instance")) | |
50 | self.assertEqual("a", parser.defaults["instance"]) | |
51 | ||
52 | ||
53 | class ParseOptionsTest(TestCase): | |
54 | ||
55 | def test_no_args_prints_usage_and_exit(self): | |
56 | """ | |
57 | If no arguments are provided, the usage help will be printed and a | |
58 | SystemExit exception will be raised. | |
59 | """ | |
60 | parser = FakeParser() | |
61 | self.assertRaises(SystemExit, parse_options, parser, ()) | |
62 | self.assertEqual(["print_usage"], parser.called) | |
63 | ||
64 | def test_no_valid_args_prints_usage_and_exit(self): | |
65 | """ | |
66 | If an argument which isn't a valid command was provided, 'print_usage' | |
67 | will be called and a SystemExit exception will be raised. | |
68 | """ | |
69 | parser = FakeParser() | |
70 | self.assertRaises(SystemExit, parse_options, parser, ("bazinga!",)) | |
71 | self.assertEqual(["print_usage"], parser.called) | |
72 | ||
73 | def test_valid_args(self): | |
74 | """ | |
75 | If a valid argument is provided, it will be returned along with | |
76 | options. | |
77 | """ | |
78 | parser = FakeParser() | |
79 | options, args = parser.parse_args(("start",)) | |
80 | self.assertEqual(("start",), args) | |
81 | ||
82 | ||
83 | class ReadConfigTest(TestCase): | |
84 | ||
85 | def makeFile(self, content=None, basename=None, dirname=None): | |
86 | """ | |
87 | Create a temporary file with content | |
88 | Deletes the file after tests | |
89 | """ | |
90 | if basename is not None: | |
91 | path = join(dirname, basename) | |
92 | else: | |
93 | fd, path = mkstemp(dir=dirname) | |
94 | os.close(fd) | |
95 | self.addCleanup(os.unlink, path) | |
96 | ||
97 | if content is not None: | |
98 | with open(path, "w") as f: | |
99 | f.write(content) | |
100 | ||
101 | return path | |
102 | ||
103 | def test_root_dir_is_required(self): | |
104 | """ | |
105 | At minimum, the caller must provide a 'ROOT_DIR' setting. | |
106 | """ | |
107 | try: | |
108 | read_config("carbon-foo", FakeOptions(config=None)) | |
109 | except CarbonConfigException as e: | |
110 | self.assertEqual("Either ROOT_DIR or GRAPHITE_ROOT " | |
111 | "needs to be provided.", str(e)) | |
112 | else: | |
113 | self.fail("Did not raise exception.") | |
114 | ||
115 | def test_config_is_not_required(self): | |
116 | """ | |
117 | If the '--config' option is not provided, it defaults to | |
118 | ROOT_DIR/conf/carbon.conf. | |
119 | """ | |
120 | root_dir = mkdtemp() | |
121 | self.addCleanup(rmtree, root_dir) | |
122 | conf_dir = join(root_dir, "conf") | |
123 | makedirs(conf_dir) | |
124 | self.makeFile(content="[foo]", | |
125 | basename="carbon.conf", | |
126 | dirname=conf_dir) | |
127 | options = FakeOptions(config=None, instance=None, | |
128 | pidfile=None, logdir=None) | |
129 | read_config("carbon-foo", options, ROOT_DIR=root_dir) | |
130 | self.assertEqual(join(root_dir, "conf", "carbon.conf"), | |
131 | options["config"]) | |
132 | ||
133 | def test_config_dir_from_environment(self): | |
134 | """ | |
135 | If the 'GRAPHITE_CONFIG_DIR' variable is set in the environment, then | |
136 | 'CONFIG_DIR' will be set to that directory. | |
137 | """ | |
138 | root_dir = mkdtemp() | |
139 | self.addCleanup(rmtree, root_dir) | |
140 | conf_dir = join(root_dir, "configs", "production") | |
141 | makedirs(conf_dir) | |
142 | self.makeFile(content="[foo]", | |
143 | basename="carbon.conf", | |
144 | dirname=conf_dir) | |
145 | orig_value = os.environ.get("GRAPHITE_CONF_DIR", None) | |
146 | if orig_value is not None: | |
147 | self.addCleanup(os.environ.__setitem__, | |
148 | "GRAPHITE_CONF_DIR", | |
149 | orig_value) | |
150 | else: | |
151 | self.addCleanup(os.environ.__delitem__, "GRAPHITE_CONF_DIR") | |
152 | os.environ["GRAPHITE_CONF_DIR"] = conf_dir | |
153 | settings = read_config("carbon-foo", | |
154 | FakeOptions(config=None, instance=None, | |
155 | pidfile=None, logdir=None), | |
156 | ROOT_DIR=root_dir) | |
157 | self.assertEqual(conf_dir, settings.CONF_DIR) | |
158 | ||
159 | def test_conf_dir_defaults_to_config_dirname(self): | |
160 | """ | |
161 | The 'CONF_DIR' setting defaults to the parent directory of the | |
162 | provided configuration file. | |
163 | """ | |
164 | config = self.makeFile(content="[foo]") | |
165 | settings = read_config( | |
166 | "carbon-foo", | |
167 | FakeOptions(config=config, instance=None, | |
168 | pidfile=None, logdir=None), | |
169 | ROOT_DIR="foo") | |
170 | self.assertEqual(dirname(config), settings.CONF_DIR) | |
171 | ||
172 | def test_storage_dir_relative_to_root_dir(self): | |
173 | """ | |
174 | The 'STORAGE_DIR' setting defaults to the 'storage' directory relative | |
175 | to the 'ROOT_DIR' setting. | |
176 | """ | |
177 | config = self.makeFile(content="[foo]") | |
178 | settings = read_config( | |
179 | "carbon-foo", | |
180 | FakeOptions(config=config, instance=None, | |
181 | pidfile=None, logdir=None), | |
182 | ROOT_DIR="foo") | |
183 | self.assertEqual(join("foo", "storage"), settings.STORAGE_DIR) | |
184 | ||
185 | def test_log_dir_relative_to_storage_dir(self): | |
186 | """ | |
187 | The 'LOG_DIR' setting defaults to a program-specific directory relative | |
188 | to the 'STORAGE_DIR' setting. | |
189 | """ | |
190 | config = self.makeFile(content="[foo]") | |
191 | settings = read_config( | |
192 | "carbon-foo", | |
193 | FakeOptions(config=config, instance=None, | |
194 | pidfile=None, logdir=None), | |
195 | ROOT_DIR="foo") | |
196 | self.assertEqual(join("foo", "storage", "log", "carbon-foo"), | |
197 | settings.LOG_DIR) | |
198 | ||
199 | def test_log_dir_relative_to_provided_storage_dir(self): | |
200 | """ | |
201 | Providing a different 'STORAGE_DIR' in defaults overrides the default | |
202 | of being relative to 'ROOT_DIR'. | |
203 | """ | |
204 | config = self.makeFile(content="[foo]") | |
205 | settings = read_config( | |
206 | "carbon-foo", | |
207 | FakeOptions(config=config, instance=None, | |
208 | pidfile=None, logdir=None), | |
209 | ROOT_DIR="foo", STORAGE_DIR="bar") | |
210 | self.assertEqual(join("bar", "log", "carbon-foo"), | |
211 | settings.LOG_DIR) | |
212 | ||
213 | def test_log_dir_for_instance_relative_to_storage_dir(self): | |
214 | """ | |
215 | The 'LOG_DIR' setting defaults to a program-specific directory relative | |
216 | to the 'STORAGE_DIR' setting. In the case of an instance, the instance | |
217 | name is appended to the directory. | |
218 | """ | |
219 | config = self.makeFile(content="[foo]") | |
220 | settings = read_config( | |
221 | "carbon-foo", | |
222 | FakeOptions(config=config, instance="x", | |
223 | pidfile=None, logdir=None), | |
224 | ROOT_DIR="foo") | |
225 | self.assertEqual(join("foo", "storage", "log", | |
226 | "carbon-foo", "carbon-foo-x"), | |
227 | settings.LOG_DIR) | |
228 | ||
229 | def test_log_dir_for_instance_relative_to_provided_storage_dir(self): | |
230 | """ | |
231 | Providing a different 'STORAGE_DIR' in defaults overrides the default | |
232 | of being relative to 'ROOT_DIR'. In the case of an instance, the | |
233 | instance name is appended to the directory. | |
234 | """ | |
235 | config = self.makeFile(content="[foo]") | |
236 | settings = read_config( | |
237 | "carbon-foo", | |
238 | FakeOptions(config=config, instance="x", | |
239 | pidfile=None, logdir=None), | |
240 | ROOT_DIR="foo", STORAGE_DIR="bar") | |
241 | self.assertEqual(join("bar", "log", "carbon-foo", "carbon-foo-x"), | |
242 | settings.LOG_DIR) | |
243 | ||
244 | def test_pidfile_relative_to_storage_dir(self): | |
245 | """ | |
246 | The 'pidfile' setting defaults to a program-specific filename relative | |
247 | to the 'STORAGE_DIR' setting. | |
248 | """ | |
249 | config = self.makeFile(content="[foo]") | |
250 | settings = read_config( | |
251 | "carbon-foo", | |
252 | FakeOptions(config=config, instance=None, | |
253 | pidfile=None, logdir=None), | |
254 | ROOT_DIR="foo") | |
255 | self.assertEqual(join("foo", "storage", "carbon-foo.pid"), | |
256 | settings.pidfile) | |
257 | ||
258 | def test_pidfile_in_options_has_precedence(self): | |
259 | """ | |
260 | The 'pidfile' option from command line overrides the default setting. | |
261 | """ | |
262 | config = self.makeFile(content="[foo]") | |
263 | settings = read_config( | |
264 | "carbon-foo", | |
265 | FakeOptions(config=config, instance=None, | |
266 | pidfile="foo.pid", logdir=None), | |
267 | ROOT_DIR="foo") | |
268 | self.assertEqual("foo.pid", settings.pidfile) | |
269 | ||
270 | def test_pidfile_for_instance_in_options_has_precedence(self): | |
271 | """ | |
272 | The 'pidfile' option from command line overrides the default setting | |
273 | for the instance, if one is specified. | |
274 | """ | |
275 | config = self.makeFile(content="[foo]") | |
276 | settings = read_config( | |
277 | "carbon-foo", | |
278 | FakeOptions(config=config, instance="x", | |
279 | pidfile="foo.pid", logdir=None), | |
280 | ROOT_DIR="foo") | |
281 | self.assertEqual("foo.pid", settings.pidfile) | |
282 | ||
283 | def test_storage_dir_as_provided(self): | |
284 | """ | |
285 | Providing a 'STORAGE_DIR' in defaults overrides the root-relative | |
286 | default. | |
287 | """ | |
288 | config = self.makeFile(content="[foo]") | |
289 | settings = read_config( | |
290 | "carbon-foo", | |
291 | FakeOptions(config=config, instance=None, | |
292 | pidfile=None, logdir=None), | |
293 | ROOT_DIR="foo", STORAGE_DIR="bar") | |
294 | self.assertEqual("bar", settings.STORAGE_DIR) | |
295 | ||
296 | def test_log_dir_as_provided(self): | |
297 | """ | |
298 | Providing a 'LOG_DIR' in defaults overrides the storage-relative | |
299 | default. | |
300 | """ | |
301 | config = self.makeFile(content="[foo]") | |
302 | settings = read_config( | |
303 | "carbon-foo", | |
304 | FakeOptions(config=config, instance=None, | |
305 | pidfile=None, logdir=None), | |
306 | ROOT_DIR="foo", STORAGE_DIR="bar", LOG_DIR='baz') | |
307 | self.assertEqual("baz", settings.LOG_DIR) | |
308 | ||
309 | def test_log_dir_from_options(self): | |
310 | """ | |
311 | Providing a 'LOG_DIR' in the command line overrides the | |
312 | storage-relative default. | |
313 | """ | |
314 | config = self.makeFile(content="[foo]") | |
315 | settings = read_config( | |
316 | "carbon-foo", | |
317 | FakeOptions(config=config, instance=None, | |
318 | pidfile=None, logdir="baz"), | |
319 | ROOT_DIR="foo") | |
320 | self.assertEqual("baz", settings.LOG_DIR) | |
321 | ||
322 | def test_log_dir_for_instance_from_options(self): | |
323 | """ | |
324 | Providing a 'LOG_DIR' in the command line overrides the | |
325 | storage-relative default for the instance. | |
326 | """ | |
327 | config = self.makeFile(content="[foo]") | |
328 | settings = read_config( | |
329 | "carbon-foo", | |
330 | FakeOptions(config=config, instance="x", | |
331 | pidfile=None, logdir="baz"), | |
332 | ROOT_DIR="foo") | |
333 | self.assertEqual("baz", settings.LOG_DIR) | |
334 | ||
335 | def test_storage_dir_from_config(self): | |
336 | """ | |
337 | Providing a 'STORAGE_DIR' in the configuration file overrides the | |
338 | root-relative default. | |
339 | """ | |
340 | config = self.makeFile(content="[foo]\nSTORAGE_DIR = bar") | |
341 | settings = read_config( | |
342 | "carbon-foo", | |
343 | FakeOptions(config=config, instance=None, | |
344 | pidfile=None, logdir=None), | |
345 | ROOT_DIR="foo") | |
346 | self.assertEqual("bar", settings.STORAGE_DIR) | |
347 | ||
348 | def test_log_dir_from_config(self): | |
349 | """ | |
350 | Providing a 'LOG_DIR' in the configuration file overrides the | |
351 | storage-relative default. | |
352 | """ | |
353 | config = self.makeFile(content="[foo]\nLOG_DIR = baz") | |
354 | settings = read_config( | |
355 | "carbon-foo", | |
356 | FakeOptions(config=config, instance=None, | |
357 | pidfile=None, logdir=None), | |
358 | ROOT_DIR="foo") | |
359 | self.assertEqual("baz", settings.LOG_DIR) | |
360 | ||
361 | def test_log_dir_from_instance_config(self): | |
362 | """ | |
363 | Providing a 'LOG_DIR' for the specific instance in the configuration | |
364 | file overrides the storage-relative default. The actual value will have | |
365 | the instance name appended to it and ends with a forward slash. | |
366 | """ | |
367 | config = self.makeFile( | |
368 | content=("[foo]\nLOG_DIR = baz\n" | |
369 | "[foo:x]\nLOG_DIR = boo")) | |
370 | settings = read_config( | |
371 | "carbon-foo", | |
372 | FakeOptions(config=config, instance="x", | |
373 | pidfile=None, logdir=None), | |
374 | ROOT_DIR="foo") | |
375 | self.assertEqual("boo/carbon-foo-x", settings.LOG_DIR) | |
376 | ||
377 | def test_pid_dir_depends_on_storage_dir(self): | |
378 | """ | |
379 | Tests 'STORAGE_DIR' dependency 'PID_DIR' | |
380 | """ | |
381 | config = self.makeFile( | |
382 | content=("[foo]\n" | |
383 | "STORAGE_DIR = bar")) | |
384 | settings = read_config( | |
385 | "carbon-foo", | |
386 | FakeOptions(config=config, instance=None, | |
387 | pidfile=None, logdir=None), | |
388 | ROOT_DIR="foo") | |
389 | self.assertEqual("bar", settings.PID_DIR) | |
390 | ||
391 | def test_log_dir_depends_on_storage_dir(self): | |
392 | """ | |
393 | Tests 'STORAGE_DIR' dependency 'LOG_DIR' | |
394 | """ | |
395 | config = self.makeFile( | |
396 | content=("[foo]\n" | |
397 | "STORAGE_DIR = bar")) | |
398 | settings = read_config( | |
399 | "carbon-foo", | |
400 | FakeOptions(config=config, instance=None, | |
401 | pidfile=None, logdir=None), | |
402 | ROOT_DIR="foo") | |
403 | self.assertEqual(join("bar", "log", "carbon-foo"), settings.LOG_DIR) | |
404 | ||
405 | def test_local_data_dir_depends_on_storage_dir(self): | |
406 | """ | |
407 | Tests 'STORAGE_DIR' dependency 'LOCAL_DATA_DIR' | |
408 | """ | |
409 | config = self.makeFile( | |
410 | content=("[foo]\n" | |
411 | "STORAGE_DIR = bar")) | |
412 | settings = read_config( | |
413 | "carbon-foo", | |
414 | FakeOptions(config=config, instance=None, | |
415 | pidfile=None, logdir=None), | |
416 | ROOT_DIR="foo") | |
417 | self.assertEqual(join("bar", "whisper"), settings.LOCAL_DATA_DIR) | |
418 | ||
419 | def test_whitelists_dir_depends_on_storage_dir(self): | |
420 | """ | |
421 | Tests 'STORAGE_DIR' dependency 'WHITELISTS_DIR' | |
422 | """ | |
423 | config = self.makeFile( | |
424 | content=("[foo]\n" | |
425 | "STORAGE_DIR = bar")) | |
426 | settings = read_config( | |
427 | "carbon-foo", | |
428 | FakeOptions(config=config, instance=None, | |
429 | pidfile=None, logdir=None), | |
430 | ROOT_DIR="foo") | |
431 | self.assertEqual(join("bar", "lists"), settings.WHITELISTS_DIR) |
0 | import os | |
1 | from unittest import TestCase | |
2 | from mock import patch | |
3 | from os.path import exists | |
4 | import shutil | |
5 | ||
6 | from carbon.tests.util import TestSettings | |
7 | from carbon.database import WhisperDatabase, CeresDatabase | |
8 | ||
9 | ||
10 | class WhisperDatabaseTest(TestCase): | |
11 | ||
12 | def setUp(self): | |
13 | self._sep_patch = patch.object(os.path, 'sep', "/") | |
14 | self._sep_patch.start() | |
15 | ||
16 | def tearDown(self): | |
17 | self._sep_patch.stop() | |
18 | ||
19 | def test_getFilesystemPath(self): | |
20 | settings = TestSettings() | |
21 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
22 | database = WhisperDatabase(settings) | |
23 | result = database.getFilesystemPath('stats.example.counts') | |
24 | self.assertEqual(result, '/tmp/stats/example/counts.wsp') # nosec | |
25 | ||
26 | def test_getTaggedFilesystemPath(self): | |
27 | metric = 'stats.example.counts;tag1=value1' | |
28 | ||
29 | settings = TestSettings() | |
30 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
31 | settings['TAG_HASH_FILENAMES'] = False | |
32 | database = WhisperDatabase(settings) | |
33 | ||
34 | result = database.getFilesystemPath(metric) | |
35 | self.assertEqual( | |
36 | result, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1.wsp') # nosec | |
37 | ||
38 | result = database.exists(metric) | |
39 | self.assertEqual(result, False) | |
40 | ||
41 | def test_getTaggedFilesystemPathHashed(self): | |
42 | metric = 'stats.example.counts;tag1=value1' | |
43 | ||
44 | settings = TestSettings() | |
45 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
46 | settings['TAG_HASH_FILENAMES'] = True | |
47 | database = WhisperDatabase(settings) | |
48 | ||
49 | result = database.getFilesystemPath(metric) | |
50 | self.assertEqual( | |
51 | result, | |
52 | '/tmp/_tagged/872/252/' + # nosec | |
53 | '872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0.wsp') | |
54 | ||
55 | result = database.exists(metric) | |
56 | self.assertEqual(result, False) | |
57 | ||
58 | def test_migrateTaggedFilesystemPathHashed(self): | |
59 | metric = 'stats.example.counts;tag1=value1' | |
60 | ||
61 | settings = TestSettings() | |
62 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
63 | settings['TAG_HASH_FILENAMES'] = False | |
64 | database = WhisperDatabase(settings) | |
65 | ||
66 | result = database.exists(metric) | |
67 | self.assertEqual(result, False) | |
68 | ||
69 | old_path = database.getFilesystemPath(metric) | |
70 | self.assertEqual( | |
71 | old_path, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1.wsp') # nosec | |
72 | ||
73 | self.assertEqual(exists(old_path), False) | |
74 | ||
75 | result = database.create(metric, [(60, 60)], 0.5, 'average') | |
76 | ||
77 | self.assertEqual(exists(old_path), True) | |
78 | ||
79 | result = database.exists(metric) | |
80 | self.assertEqual(result, True) | |
81 | ||
82 | settings['TAG_HASH_FILENAMES'] = True | |
83 | database = WhisperDatabase(settings) | |
84 | ||
85 | hashed_path = database.getFilesystemPath(metric) | |
86 | self.assertEqual( | |
87 | hashed_path, | |
88 | '/tmp/_tagged/872/252/' + # nosec | |
89 | '872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0.wsp') | |
90 | ||
91 | self.assertEqual(exists(hashed_path), False) | |
92 | ||
93 | result = database.exists(metric) | |
94 | self.assertEqual(result, True) | |
95 | ||
96 | self.assertEqual(exists(old_path), False) | |
97 | self.assertEqual(exists(hashed_path), True) | |
98 | ||
99 | os.remove(hashed_path) | |
100 | ||
101 | ||
102 | class CeresDatabaseTest(TestCase): | |
103 | ||
104 | def setUp(self): | |
105 | self._sep_patch = patch.object(os.path, 'sep', "/") | |
106 | self._sep_patch.start() | |
107 | ||
108 | def tearDown(self): | |
109 | self._sep_patch.stop() | |
110 | ||
111 | def test_getFilesystemPath(self): | |
112 | settings = TestSettings() | |
113 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
114 | database = CeresDatabase(settings) | |
115 | result = database.getFilesystemPath('stats.example.counts') | |
116 | self.assertEqual(result, '/tmp/stats/example/counts') # nosec | |
117 | ||
118 | def test_getTaggedFilesystemPath(self): | |
119 | metric = 'stats.example.counts;tag1=value1' | |
120 | ||
121 | settings = TestSettings() | |
122 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
123 | settings['TAG_HASH_FILENAMES'] = False | |
124 | database = CeresDatabase(settings) | |
125 | ||
126 | result = database.getFilesystemPath(metric) | |
127 | self.assertEqual( | |
128 | result, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1') # nosec | |
129 | ||
130 | result = database.exists(metric) | |
131 | self.assertEqual(result, False) | |
132 | ||
133 | def test_getTaggedFilesystemPathHashed(self): | |
134 | metric = 'stats.example.counts;tag1=value1' | |
135 | ||
136 | settings = TestSettings() | |
137 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
138 | settings['TAG_HASH_FILENAMES'] = True | |
139 | database = CeresDatabase(settings) | |
140 | ||
141 | result = database.getFilesystemPath(metric) | |
142 | self.assertEqual( | |
143 | result, | |
144 | '/tmp/_tagged/872/252/' + # nosec | |
145 | '872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0') | |
146 | ||
147 | result = database.exists(metric) | |
148 | self.assertEqual(result, False) | |
149 | ||
150 | def test_migrateTaggedFilesystemPathHashed(self): | |
151 | metric = 'stats.example.counts;tag1=value1' | |
152 | ||
153 | settings = TestSettings() | |
154 | settings['LOCAL_DATA_DIR'] = '/tmp/' | |
155 | settings['TAG_HASH_FILENAMES'] = False | |
156 | database = CeresDatabase(settings) | |
157 | ||
158 | result = database.exists(metric) | |
159 | self.assertEqual(result, False) | |
160 | ||
161 | old_path = database.getFilesystemPath(metric) | |
162 | self.assertEqual( | |
163 | old_path, '/tmp/_tagged/872/252/stats_DOT_example_DOT_counts;tag1=value1') # nosec | |
164 | ||
165 | self.assertEqual(exists(old_path), False) | |
166 | ||
167 | result = database.create(metric, [(60, 60)], 0.5, 'average') | |
168 | ||
169 | self.assertEqual(exists(old_path), True) | |
170 | ||
171 | result = database.exists(metric) | |
172 | self.assertEqual(result, True) | |
173 | ||
174 | settings['TAG_HASH_FILENAMES'] = True | |
175 | database = CeresDatabase(settings) | |
176 | ||
177 | hashed_path = database.getFilesystemPath(metric) | |
178 | self.assertEqual( | |
179 | hashed_path, | |
180 | '/tmp/_tagged/872/252/' + # nosec | |
181 | '872252dcead671982862f82a3b440f02aa8f525dd6d0f2921de0dc2b3e874ad0') | |
182 | ||
183 | self.assertEqual(exists(hashed_path), False) | |
184 | ||
185 | result = database.exists(metric) | |
186 | self.assertEqual(result, True) | |
187 | ||
188 | self.assertEqual(exists(old_path), False) | |
189 | self.assertEqual(exists(hashed_path), True) | |
190 | ||
191 | shutil.rmtree(hashed_path) |
0 | import unittest | |
1 | from carbon.hashing import ConsistentHashRing | |
2 | ||
3 | ||
4 | class HashIntegrityTest(unittest.TestCase): | |
5 | ||
6 | def test_2_node_positional_itegrity(self): | |
7 | """Make a cluster, verify we don't have positional collisions""" | |
8 | ring = ConsistentHashRing([]) | |
9 | for n in range(2): | |
10 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
11 | self.assertEqual( | |
12 | len([n[0] for n in ring.ring]), | |
13 | len(set([n[0] for n in ring.ring]))) | |
14 | ||
15 | def test_3_node_positional_itegrity(self): | |
16 | """Make a cluster, verify we don't have positional collisions""" | |
17 | ring = ConsistentHashRing([]) | |
18 | for n in range(3): | |
19 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
20 | self.assertEqual( | |
21 | len([n[0] for n in ring.ring]), | |
22 | len(set([n[0] for n in ring.ring]))) | |
23 | ||
24 | def test_4_node_positional_itegrity(self): | |
25 | """Make a cluster, verify we don't have positional collisions""" | |
26 | ring = ConsistentHashRing([]) | |
27 | for n in range(4): | |
28 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
29 | self.assertEqual( | |
30 | len([n[0] for n in ring.ring]), | |
31 | len(set([n[0] for n in ring.ring]))) | |
32 | ||
33 | def test_5_node_positional_itegrity(self): | |
34 | """Make a cluster, verify we don't have positional collisions""" | |
35 | ring = ConsistentHashRing([]) | |
36 | for n in range(5): | |
37 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
38 | self.assertEqual( | |
39 | len([n[0] for n in ring.ring]), | |
40 | len(set([n[0] for n in ring.ring]))) | |
41 | ||
42 | def test_6_node_positional_itegrity(self): | |
43 | """Make a cluster, verify we don't have positional collisions""" | |
44 | ring = ConsistentHashRing([]) | |
45 | for n in range(6): | |
46 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
47 | self.assertEqual( | |
48 | len([n[0] for n in ring.ring]), | |
49 | len(set([n[0] for n in ring.ring]))) | |
50 | ||
51 | def test_7_node_positional_itegrity(self): | |
52 | """Make a cluster, verify we don't have positional collisions""" | |
53 | ring = ConsistentHashRing([]) | |
54 | for n in range(7): | |
55 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
56 | self.assertEqual( | |
57 | len([n[0] for n in ring.ring]), | |
58 | len(set([n[0] for n in ring.ring]))) | |
59 | ||
60 | def test_8_node_positional_itegrity(self): | |
61 | """Make a cluster, verify we don't have positional collisions""" | |
62 | ring = ConsistentHashRing([]) | |
63 | for n in range(8): | |
64 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
65 | self.assertEqual( | |
66 | len([n[0] for n in ring.ring]), | |
67 | len(set([n[0] for n in ring.ring]))) | |
68 | ||
69 | def test_9_node_positional_itegrity(self): | |
70 | """Make a cluster, verify we don't have positional collisions""" | |
71 | ring = ConsistentHashRing([]) | |
72 | for n in range(9): | |
73 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
74 | self.assertEqual( | |
75 | len([n[0] for n in ring.ring]), | |
76 | len(set([n[0] for n in ring.ring]))) | |
77 | ||
78 | def test_10_get_node(self): | |
79 | """Trigger bisect on identical first key, see: issues/766""" | |
80 | ring = ConsistentHashRing([], replica_count=1) | |
81 | ring.add_node(("1", "1")) | |
82 | n = ring.get_node("('1', '1'):0") | |
83 | self.assertEqual(('1', '1'), n) | |
84 | ||
85 | def test_11_get_nodes(self): | |
86 | """Trigger bisect on identical first key, see: issues/766""" | |
87 | ring = ConsistentHashRing([], replica_count=1) | |
88 | ring.add_node(("1", "1")) | |
89 | n = ring.get_nodes("('1', '1'):0") | |
90 | self.assertEqual([('1', '1')], list(n)) | |
91 | ||
92 | ||
93 | class FNVHashIntegrityTest(unittest.TestCase): | |
94 | ||
95 | def test_2_node_positional_itegrity(self): | |
96 | """Make a cluster, verify we don't have positional collisions""" | |
97 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
98 | for n in range(2): | |
99 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
100 | self.assertEqual( | |
101 | len([n[0] for n in ring.ring]), | |
102 | len(set([n[0] for n in ring.ring]))) | |
103 | ||
104 | def test_3_node_positional_itegrity(self): | |
105 | """Make a cluster, verify we don't have positional collisions""" | |
106 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
107 | for n in range(3): | |
108 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
109 | self.assertEqual( | |
110 | len([n[0] for n in ring.ring]), | |
111 | len(set([n[0] for n in ring.ring]))) | |
112 | ||
113 | def test_4_node_positional_itegrity(self): | |
114 | """Make a cluster, verify we don't have positional collisions""" | |
115 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
116 | for n in range(4): | |
117 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
118 | self.assertEqual( | |
119 | len([n[0] for n in ring.ring]), | |
120 | len(set([n[0] for n in ring.ring]))) | |
121 | ||
122 | def test_5_node_positional_itegrity(self): | |
123 | """Make a cluster, verify we don't have positional collisions""" | |
124 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
125 | for n in range(5): | |
126 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
127 | self.assertEqual( | |
128 | len([n[0] for n in ring.ring]), | |
129 | len(set([n[0] for n in ring.ring]))) | |
130 | ||
131 | def test_6_node_positional_itegrity(self): | |
132 | """Make a cluster, verify we don't have positional collisions""" | |
133 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
134 | for n in range(6): | |
135 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
136 | self.assertEqual( | |
137 | len([n[0] for n in ring.ring]), | |
138 | len(set([n[0] for n in ring.ring]))) | |
139 | ||
140 | def test_7_node_positional_itegrity(self): | |
141 | """Make a cluster, verify we don't have positional collisions""" | |
142 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
143 | for n in range(7): | |
144 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
145 | self.assertEqual( | |
146 | len([n[0] for n in ring.ring]), | |
147 | len(set([n[0] for n in ring.ring]))) | |
148 | ||
149 | def test_8_node_positional_itegrity(self): | |
150 | """Make a cluster, verify we don't have positional collisions""" | |
151 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
152 | for n in range(8): | |
153 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
154 | self.assertEqual( | |
155 | len([n[0] for n in ring.ring]), | |
156 | len(set([n[0] for n in ring.ring]))) | |
157 | ||
158 | def test_9_node_positional_itegrity(self): | |
159 | """Make a cluster, verify we don't have positional collisions""" | |
160 | ring = ConsistentHashRing([], hash_type='fnv1a_ch') | |
161 | for n in range(9): | |
162 | ring.add_node(("192.168.10.%s" % str(10 + n), "%s" % str(10 + n))) | |
163 | self.assertEqual( | |
164 | len([n[0] for n in ring.ring]), | |
165 | len(set([n[0] for n in ring.ring]))) | |
166 | ||
167 | ||
168 | class ConsistentHashRingTestFNV1A(unittest.TestCase): | |
169 | ||
170 | def test_chr_compute_ring_position_fnv1a(self): | |
171 | hosts = [("127.0.0.1", "ba603c36342304ed77953f84ac4d357b"), | |
172 | ("127.0.0.2", "5dd63865534f84899c6e5594dba6749a"), | |
173 | ("127.0.0.3", "866a18b81f2dc4649517a1df13e26f28")] | |
174 | hashring = ConsistentHashRing(hosts, hash_type='fnv1a_ch') | |
175 | self.assertEqual(hashring.compute_ring_position('hosts.worker1.cpu'), | |
176 | 59573) | |
177 | self.assertEqual(hashring.compute_ring_position('hosts.worker1.load'), | |
178 | 57163) | |
179 | self.assertEqual(hashring.compute_ring_position('hosts.worker2.cpu'), | |
180 | 35749) | |
181 | self.assertEqual(hashring.compute_ring_position('hosts.worker2.network'), | |
182 | 43584) | |
183 | self.assertEqual(hashring.compute_ring_position('hosts.worker3.cpu'), | |
184 | 12600) | |
185 | self.assertEqual(hashring.compute_ring_position('hosts.worker3.irq'), | |
186 | 10052) | |
187 | ||
188 | def test_chr_get_node_fnv1a(self): | |
189 | hosts = [("127.0.0.1", "ba603c36342304ed77953f84ac4d357b"), | |
190 | ("127.0.0.2", "5dd63865534f84899c6e5594dba6749a"), | |
191 | ("127.0.0.3", "866a18b81f2dc4649517a1df13e26f28")] | |
192 | hashring = ConsistentHashRing(hosts, hash_type='fnv1a_ch') | |
193 | self.assertEqual(hashring.get_node('hosts.worker1.cpu'), | |
194 | ('127.0.0.1', 'ba603c36342304ed77953f84ac4d357b')) | |
195 | self.assertEqual(hashring.get_node('hosts.worker2.cpu'), | |
196 | ('127.0.0.3', '866a18b81f2dc4649517a1df13e26f28')) | |
197 | self.assertEqual(hashring.get_node( | |
198 | 'stats.checkout.cluster.padamski-wro.api.v1.payment-initialize.count'), | |
199 | ('127.0.0.3', '866a18b81f2dc4649517a1df13e26f28')) |
0 | # -*- coding: utf-8 -*- | |
1 | import os | |
2 | import sys | |
3 | from unittest import TestCase | |
4 | ||
5 | from mock import mock_open, patch, call | |
6 | ||
7 | from carbon.instrumentation import getMemUsage | |
8 | ||
9 | ||
10 | class TestInstrumentation(TestCase): | |
11 | ||
12 | def test_getMemUsage(self): | |
13 | if sys.version_info[0] >= 3: | |
14 | open_call = 'builtins.open' | |
15 | else: | |
16 | open_call = '__builtin__.open' | |
17 | ||
18 | with patch(open_call, mock_open(read_data='1 2 1 1 0 1 0')) as m: | |
19 | page_size = os.sysconf('SC_PAGESIZE') | |
20 | usage = getMemUsage() | |
21 | m.assert_has_calls([call().__exit__(None, None, None)], | |
22 | any_order=True) | |
23 | self.assertEqual(usage, 2 * page_size) |
0 | # -*- coding: utf-8 -*- | |
1 | ||
2 | from unittest import TestCase | |
3 | from os import path | |
4 | from twisted.python.log import addObserver, removeObserver | |
5 | from carbon import log | |
6 | ||
7 | try: | |
8 | from tempfile import TemporaryDirectory | |
9 | except ImportError: | |
10 | from backports.tempfile import TemporaryDirectory | |
11 | ||
12 | ||
13 | class CarbonLogFileTest(TestCase): | |
14 | ||
15 | def test_write_to_logfile(self): | |
16 | with TemporaryDirectory() as tmpdir: | |
17 | o = log.CarbonLogObserver() | |
18 | o.log_to_dir(tmpdir) | |
19 | addObserver(o) | |
20 | log.creates('😀😀😀😀 test !!!!') | |
21 | removeObserver(o) | |
22 | ||
23 | with open(path.join(tmpdir, 'creates.log')) as logfile: | |
24 | read_line = logfile.readline() | |
25 | self.assertRegexpMatches(read_line, '.*😀😀😀😀 test !!!!') |
0 | from unittest import TestCase | |
1 | from mock import MagicMock, patch | |
2 | ||
3 | from carbon.pipeline import Processor, run_pipeline | |
4 | ||
5 | ||
6 | class ProcessorTest(TestCase): | |
7 | def test_processor_registers(self): | |
8 | class DummyProcessor(Processor): | |
9 | plugin_name = "dummy_processor" | |
10 | ||
11 | self.assertTrue("dummy_processor" in Processor.plugins) | |
12 | del Processor.plugins["dummy_processor"] | |
13 | ||
14 | def test_run_pipeline_empty_processors(self): | |
15 | self.assertEqual(None, run_pipeline("carbon.metric", (0, 0), [])) | |
16 | ||
17 | def test_run_pipeline(self): | |
18 | processor_mock = MagicMock(Processor) | |
19 | ||
20 | run_pipeline("carbon.metric", (0, 0), [processor_mock]) | |
21 | processor_mock.process.assert_called_once_with("carbon.metric", (0, 0)) | |
22 | ||
23 | def test_run_pipeline_no_processors_uses_state(self): | |
24 | processor_mock = MagicMock(Processor) | |
25 | ||
26 | import carbon.pipeline | |
27 | with patch.object(carbon.pipeline.state, 'pipeline_processors', [processor_mock]): | |
28 | run_pipeline("carbon.metric", (0, 0)) | |
29 | processor_mock.process.assert_called_once_with("carbon.metric", (0, 0)) |
0 | import carbon.client as carbon_client | |
1 | from carbon.routers import DatapointRouter | |
2 | from carbon.tests.util import TestSettings | |
3 | import carbon.service # NOQA | |
4 | ||
5 | from carbon.carbon_pb2 import Payload | |
6 | from carbon.protobuf import CarbonProtobufClientFactory | |
7 | ||
8 | from twisted.internet import reactor | |
9 | # from twisted.internet.defer import Deferred | |
10 | # from twisted.internet.base import DelayedCall | |
11 | from twisted.internet.task import deferLater | |
12 | from twisted.trial.unittest import TestCase | |
13 | from twisted.test.proto_helpers import StringTransport | |
14 | ||
15 | from mock import Mock | |
16 | from struct import unpack, calcsize | |
17 | ||
18 | ||
19 | INT32_FORMAT = '!I' | |
20 | INT32_SIZE = calcsize(INT32_FORMAT) | |
21 | ||
22 | ||
23 | def decode_sent(data): | |
24 | pb_size = unpack(INT32_FORMAT, data[:INT32_SIZE])[0] | |
25 | data = data[INT32_SIZE:INT32_SIZE + pb_size] | |
26 | ||
27 | datapoints = [] | |
28 | payload_pb = Payload.FromString(data) | |
29 | for metric_pb in payload_pb.metrics: | |
30 | for point_pb in metric_pb.points: | |
31 | datapoints.append( | |
32 | (metric_pb.metric, (point_pb.timestamp, point_pb.value))) | |
33 | return datapoints | |
34 | ||
35 | ||
36 | class ConnectedCarbonClientProtocolTest(TestCase): | |
37 | def setUp(self): | |
38 | self.router_mock = Mock(spec=DatapointRouter) | |
39 | carbon_client.settings = TestSettings() # reset to defaults | |
40 | factory = CarbonProtobufClientFactory(('127.0.0.1', 2003, 'a'), self.router_mock) | |
41 | self.protocol = factory.buildProtocol(('127.0.0.1', 2003)) | |
42 | self.transport = StringTransport() | |
43 | self.protocol.makeConnection(self.transport) | |
44 | ||
45 | def test_send_datapoint(self): | |
46 | def assert_sent(): | |
47 | sent_data = self.transport.value() | |
48 | sent_datapoints = decode_sent(sent_data) | |
49 | self.assertEqual([datapoint], sent_datapoints) | |
50 | ||
51 | datapoint = ('foo.bar', (1000000000, 1.0)) | |
52 | self.protocol.sendDatapoint(*datapoint) | |
53 | return deferLater(reactor, 0.1, assert_sent) |
0 | # -*- coding: utf-8 -*- | |
1 | from carbon.protocols import MetricReceiver, MetricLineReceiver, \ | |
2 | MetricDatagramReceiver, MetricPickleReceiver | |
3 | from carbon import events | |
4 | from unittest import TestCase | |
5 | from mock import Mock, patch, call | |
6 | from carbon.cache import _MetricCache | |
7 | ||
8 | import os.path | |
9 | import pickle | |
10 | import re | |
11 | import time | |
12 | ||
13 | ||
14 | class TestMetricReceiversHandler(TestCase): | |
15 | def test_build(self): | |
16 | expected_plugins = sorted(['line', 'udp', 'pickle', 'protobuf']) | |
17 | ||
18 | # amqp not supported with py3 | |
19 | try: | |
20 | import carbon.amqp_listener | |
21 | expected_plugins.append('amqp') | |
22 | except ImportError: | |
23 | pass | |
24 | ||
25 | # Can't always test manhole because 'cryptography' can | |
26 | # be a pain to install and we don't want to make the CI | |
27 | # flaky because of that. | |
28 | try: | |
29 | import carbon.manhole # NOQA | |
30 | expected_plugins.append('manhole') | |
31 | except ImportError: | |
32 | pass | |
33 | ||
34 | expected_plugins = sorted(expected_plugins) | |
35 | plugins = sorted(MetricReceiver.plugins.keys()) | |
36 | self.assertEquals(expected_plugins, plugins) | |
37 | ||
38 | class _FakeService(object): | |
39 | def addService(_, __): | |
40 | pass | |
41 | fake_service = _FakeService() | |
42 | ||
43 | for plugin_name, plugin_class in MetricReceiver.plugins.items(): | |
44 | plugin_class.build(fake_service) | |
45 | ||
46 | ||
47 | class TestMetricReceiver(TestCase): | |
48 | def setUp(self): | |
49 | self.receiver = MetricReceiver() | |
50 | ||
51 | self.event_mock = Mock() | |
52 | self._event_patch = patch.object(events, 'metricReceived', | |
53 | self.event_mock) | |
54 | self._event_patch.start() | |
55 | ||
56 | self.time_mock = Mock() | |
57 | self.time_mock.return_value = 123456 | |
58 | ||
59 | def tearDown(self): | |
60 | self._event_patch.stop() | |
61 | ||
62 | def test_valid_metricReceived(self): | |
63 | """ Valid metric should call events.metricReceived """ | |
64 | metric = ('carbon.foo', (1, 2)) | |
65 | self.receiver.metricReceived(*metric) | |
66 | events.metricReceived.assert_called_once_with(*metric) | |
67 | ||
68 | def test_min_timestamp_metricReceived(self): | |
69 | """ Should round the timestamp down to whole interval """ | |
70 | settings = {'MIN_TIMESTAMP_RESOLUTION': 10} | |
71 | metric = ('carbon.foo', (1005, 0)) | |
72 | with patch.dict('carbon.conf.settings', settings): | |
73 | self.receiver.metricReceived(*metric) | |
74 | events.metricReceived.assert_called_once_with('carbon.foo', | |
75 | (1000, 0)) | |
76 | ||
77 | def test_nan_metricReceived(self): | |
78 | """ NaN value should not call events.metricReceived """ | |
79 | metric = ('carbon.foo', (1, float('NaN'))) | |
80 | self.receiver.metricReceived(*metric) | |
81 | events.metricReceived.assert_not_called() | |
82 | ||
83 | def test_notime_metricReceived(self): | |
84 | """ metric with timestamp -1 Should call events.metricReceived with | |
85 | current (mocked) time """ | |
86 | with patch.object(time, 'time', self.time_mock): | |
87 | metric = ('carbon.foo', (-1, 2)) | |
88 | self.receiver.metricReceived(*metric) | |
89 | events.metricReceived.assert_called_once_with('carbon.foo', | |
90 | (time.time(), 2)) | |
91 | ||
92 | def test_allowlist_metricReceived(self): | |
93 | """ metrics which don't match should be dropped """ | |
94 | regexes = [re.compile(r'.*\.is\.allowed\..*'), | |
95 | re.compile(r'^жопа\.驢\.γάιδαρος$')] | |
96 | ||
97 | metrics = [('this.metric.is.allowed.a', (1, 2)), | |
98 | ('this.metric.is.not_allowed.a', (3, 4)), | |
99 | ('osioł.الاغ.नितंब$', (5, 6)), | |
100 | ('жопа.驢.γάιδαρος', (7, 8))] | |
101 | ||
102 | with patch('carbon.regexlist.WhiteList.regex_list', regexes): | |
103 | for m in metrics: | |
104 | self.receiver.metricReceived(*m) | |
105 | ||
106 | events.metricReceived.assert_has_calls([call(*metrics[0]), | |
107 | call(*metrics[3])]) | |
108 | ||
109 | def test_disallowlist_metricReceived(self): | |
110 | """ metrics which match should be dropped """ | |
111 | regexes = [re.compile(r'.*\.invalid\.metric\..*'), | |
112 | re.compile('^osioł.الاغ.नितंब$')] | |
113 | ||
114 | metrics = [('some.invalid.metric.a', (1, 2)), | |
115 | ('a.valid.metric.b', (3, 4)), | |
116 | ('osioł.الاغ.नितंब', (5, 6)), | |
117 | ('жопа.驢.γάιδαρος', (7, 8))] | |
118 | ||
119 | with patch('carbon.regexlist.BlackList.regex_list', regexes): | |
120 | for m in metrics: | |
121 | self.receiver.metricReceived(*m) | |
122 | ||
123 | events.metricReceived.assert_has_calls([call(*metrics[1]), | |
124 | call(*metrics[3])]) | |
125 | ||
126 | ||
127 | class TestMetricLineReceiver(TestCase): | |
128 | def setUp(self): | |
129 | self.receiver = MetricLineReceiver() | |
130 | self.receiver.peerName = 'localhost' | |
131 | ||
132 | self._receiver_mock = Mock() | |
133 | self._receiver_patch = patch.object(MetricReceiver, 'metricReceived', | |
134 | self._receiver_mock) | |
135 | self._receiver_patch.start() | |
136 | ||
137 | def tearDown(self): | |
138 | self._receiver_patch.stop() | |
139 | ||
140 | def test_invalid_line_received(self): | |
141 | """ Metric with no timestamp and value should not call metricReceived """ | |
142 | self.receiver.lineReceived(b'\xd0\xb6' * 401) | |
143 | MetricReceiver.metricReceived.assert_not_called() | |
144 | ||
145 | def test_valid_line_received(self): | |
146 | """ Should call metricReceived with str object """ | |
147 | self.receiver.lineReceived( | |
148 | b'\xd0\xb6\xd0\xbe\xd0\xbf\xd0\xb0 42 -1') | |
149 | MetricReceiver.metricReceived.assert_called_once_with( | |
150 | 'жопа', (-1, 42)) | |
151 | ||
152 | def test_get_peer_name(self): | |
153 | """ getPeerName without transport info should return 'peer' """ | |
154 | self.assertEqual(self.receiver.getPeerName(), 'peer') | |
155 | ||
156 | ||
157 | class TestMetricDatagramReceiver(TestCase): | |
158 | def setUp(self): | |
159 | self.receiver = MetricDatagramReceiver() | |
160 | self.receiver.peerName = 'localhost' | |
161 | ||
162 | self.addr = ('127.0.0.1', 9999) | |
163 | ||
164 | self._receiver_mock = Mock() | |
165 | self._receiver_patch = patch.object(MetricReceiver, 'metricReceived', | |
166 | self._receiver_mock) | |
167 | self._receiver_patch.start() | |
168 | ||
169 | def tearDown(self): | |
170 | self._receiver_patch.stop() | |
171 | ||
172 | def test_valid_datagramReceived(self): | |
173 | """ metricReceived should be called with valid metric """ | |
174 | metric = b'carbon.foo 1 2' | |
175 | self.receiver.datagramReceived(metric, self.addr) | |
176 | MetricReceiver.metricReceived.assert_called_once_with( | |
177 | 'carbon.foo', (2, 1)) | |
178 | ||
179 | def test_invalid_datagramReceived(self): | |
180 | """ metricReceived should not be called with missing timestamp """ | |
181 | metric = b'carbon.foo 1' | |
182 | self.receiver.datagramReceived(metric, self.addr) | |
183 | metric = b'c' * 401 | |
184 | self.receiver.datagramReceived(metric, self.addr) | |
185 | MetricReceiver.metricReceived.assert_not_called() | |
186 | ||
187 | def test_utf8_datagramReceived(self): | |
188 | """ metricReceived should be called with UTF-8 metricname """ | |
189 | metric = b'\xd0\xb6\xd0\xbe\xd0\xbf\xd0\xb0 42 -1' | |
190 | self.receiver.datagramReceived(metric, self.addr) | |
191 | MetricReceiver.metricReceived.assert_called_once_with( | |
192 | 'жопа', (-1, 42)) | |
193 | ||
194 | def test_multiple_datagramReceived(self): | |
195 | """ metricReceived should only be called with valid lines """ | |
196 | metric = b'lines 1 2\nare 3 4\nnot\nvalid 5 6\n' | |
197 | self.receiver.datagramReceived(metric, self.addr) | |
198 | MetricReceiver.metricReceived.assert_has_calls([ | |
199 | call('lines', (2, 1)), | |
200 | call('are', (4, 3)), | |
201 | call('valid', (6, 5))]) | |
202 | ||
203 | ||
204 | class TestMetricPickleReceiver(TestCase): | |
205 | ||
206 | def setUp(self): | |
207 | self.receiver = MetricPickleReceiver() | |
208 | ||
209 | self.receiver.unpickler = pickle | |
210 | self.receiver.peerName = 'localhost' | |
211 | ||
212 | self._receiver_mock = Mock() | |
213 | self._receiver_patch = patch.object(MetricReceiver, 'metricReceived', | |
214 | self._receiver_mock) | |
215 | self._receiver_patch.start() | |
216 | ||
217 | def tearDown(self): | |
218 | self._receiver_patch.stop() | |
219 | ||
220 | @patch('carbon.protocols.get_unpickler') | |
221 | def test_pickler_configured_on_connect(self, get_unpickler_mock): | |
222 | """ connectionMade should configure a pickler """ | |
223 | from twisted.internet.address import IPv4Address | |
224 | address = IPv4Address('TCP', 'localhost', 2004) | |
225 | self.receiver.transport = Mock() | |
226 | self.receiver.transport.getPeer = Mock(return_value=address) | |
227 | self.receiver.connectionMade() | |
228 | get_unpickler_mock.assert_called_once_with(insecure=False) | |
229 | ||
230 | def test_string_received(self): | |
231 | """ Valid received metrics should call metricReceived """ | |
232 | metrics = [('foo.bar', (1, 1.5)), | |
233 | ('bar.foo', (2, 2.5))] | |
234 | self.receiver.stringReceived(pickle.dumps(metrics)) | |
235 | MetricReceiver.metricReceived.assert_has_calls( | |
236 | [call('foo.bar', (1, 1.5)), call('bar.foo', (2, 2.5))]) | |
237 | ||
238 | def test_invalid_pickle(self): | |
239 | """ Invalid formatted pickle should not call metricReceived """ | |
240 | # IndexError | |
241 | self.receiver.stringReceived(b"1") | |
242 | # ValueError | |
243 | self.receiver.stringReceived(b"i") | |
244 | # ImportError | |
245 | self.receiver.stringReceived(b"iii") | |
246 | MetricReceiver.metricReceived.not_called() | |
247 | ||
248 | def test_decode_pickle(self): | |
249 | """ Missing timestamp/value should not call metricReceived """ | |
250 | metrics = [('foo.bar', 1)] | |
251 | self.receiver.stringReceived(pickle.dumps(metrics)) | |
252 | MetricReceiver.metricReceived.not_called() | |
253 | ||
254 | def test_invalid_types(self): | |
255 | """ Timestamp/value in wrong type should not call metricReceived """ | |
256 | metrics = [('foo.bar', ('a', 'b'))] | |
257 | self.receiver.stringReceived(pickle.dumps(metrics)) | |
258 | MetricReceiver.metricReceived.not_called() | |
259 | ||
260 | def test_py2_unicode_to_string_conversion(self): | |
261 | """ Metricname in python2 unicode type should be transformed to str """ | |
262 | metrics = [(u'foo.bar.中文', (1, 2))] | |
263 | self.receiver.stringReceived(pickle.dumps(metrics)) | |
264 | MetricReceiver.metricReceived.assert_called_once_with( | |
265 | 'foo.bar.中文', (1, 2)) | |
266 | # assert_called_once does not verify type | |
267 | args, _ = MetricReceiver.metricReceived.call_args | |
268 | self.assertIsInstance(args[0], str) | |
269 | ||
270 | ||
271 | class TestCacheManagementHandler(TestCase): | |
272 | def setUp(self): | |
273 | test_directory = os.path.dirname(os.path.realpath(__file__)) | |
274 | settings = { | |
275 | 'CONF_DIR': os.path.join(test_directory, 'data', 'conf-directory'), | |
276 | 'USE_INSECURE_PICKLER': False | |
277 | } | |
278 | self._settings_patch = patch.dict('carbon.conf.settings', settings) | |
279 | self._settings_patch.start() | |
280 | ||
281 | from carbon.protocols import CacheManagementHandler | |
282 | self.handler = CacheManagementHandler() | |
283 | ||
284 | self.cache = _MetricCache() | |
285 | ||
286 | def _get_cache(): | |
287 | return self.cache | |
288 | ||
289 | self._metriccache_patch = patch('carbon.protocols.MetricCache', _get_cache) | |
290 | self._metriccache_patch.start() | |
291 | ||
292 | self.handler.unpickler = pickle | |
293 | self.handler.peerAddr = 'localhost:7002' | |
294 | ||
295 | self.send_string_mock = Mock(side_effect=self._save_response) | |
296 | self._send_string_patch = patch.object(self.handler, 'sendString', self.send_string_mock) | |
297 | self._send_string_patch.start() | |
298 | ||
299 | def tearDown(self): | |
300 | self._settings_patch.stop() | |
301 | self._send_string_patch.stop() | |
302 | self._metriccache_patch.stop() | |
303 | del self.cache | |
304 | ||
305 | def _save_response(self, arg): | |
306 | self.response = None | |
307 | if arg: | |
308 | raw_response = arg | |
309 | self.response = pickle.loads(raw_response) | |
310 | ||
311 | def send_request(self, request_type, **kwargs): | |
312 | request = {} | |
313 | request['type'] = request_type | |
314 | request.update(kwargs) | |
315 | self.handler.stringReceived(pickle.dumps(request)) | |
316 | ||
317 | @patch('carbon.protocols.get_unpickler') | |
318 | def test_pickler_configured_on_connect(self, get_unpickler_mock): | |
319 | from twisted.internet.address import IPv4Address | |
320 | address = IPv4Address('TCP', 'localhost', 7002) | |
321 | self.handler.transport = Mock() | |
322 | self.handler.transport.getPeer = Mock(return_value=address) | |
323 | self.handler.connectionMade() | |
324 | get_unpickler_mock.assert_called_once_with(insecure=False) | |
325 | ||
326 | def test_invalid_request_type_returns_error(self): | |
327 | self.send_request('foo') | |
328 | ||
329 | self.assertIn('error', self.response) | |
330 | ||
331 | def test_cache_query_returns_response_dict(self): | |
332 | self.send_request('cache-query', metric='carbon.foo') | |
333 | self.assertIsInstance(self.response, dict) | |
334 | ||
335 | def test_cache_query_response_has_datapoints(self): | |
336 | self.send_request('cache-query', metric='carbon.foo') | |
337 | self.assertIn('datapoints', self.response) | |
338 | ||
339 | def test_cache_query_returns_empty_if_no_match(self): | |
340 | self.send_request('cache-query', metric='carbon.foo') | |
341 | self.assertEquals({'datapoints': []}, self.response) | |
342 | ||
343 | def test_cache_query_returns_cached_datapoints_if_matches(self): | |
344 | self.cache.store('carbon.foo', (600, 1.0)) | |
345 | self.send_request('cache-query', metric='carbon.foo') | |
346 | self.assertEqual([(600, 1.0)], self.response['datapoints']) | |
347 | ||
348 | def test_cache_bulk_query_returns_response_dict(self): | |
349 | self.send_request('cache-query-bulk', metrics=[]) | |
350 | self.assertIsInstance(self.response, dict) | |
351 | ||
352 | def test_cache_bulk_query_response_has_datapointsByMetric(self): | |
353 | self.send_request('cache-query-bulk', metrics=[]) | |
354 | self.assertIn('datapointsByMetric', self.response) | |
355 | ||
356 | def test_cache_bulk_query_response_returns_empty_if_no_match(self): | |
357 | self.send_request('cache-query-bulk', metrics=[]) | |
358 | self.assertEquals({'datapointsByMetric': {}}, self.response) | |
359 | ||
360 | def test_cache_bulk_query_response(self): | |
361 | self.cache.store('carbon.foo', (600, 1.0)) | |
362 | self.cache.store('carbon.bar', (600, 2.0)) | |
363 | ||
364 | expected_response = {'carbon.foo': [(600, 1.0)], 'carbon.bar': [(600, 2.0)]} | |
365 | self.send_request('cache-query-bulk', metrics=['carbon.foo', 'carbon.bar']) | |
366 | self.assertEquals({'datapointsByMetric': expected_response}, self.response) |
0 | from unittest import TestCase | |
1 | from carbon.util import parseRetentionDef | |
2 | ||
3 | ||
4 | class TestParseRetentionDef(TestCase): | |
5 | def test_valid_retentions(self): | |
6 | retention_map = ( | |
7 | ('60:10', (60, 10)), | |
8 | ('10:60', (10, 60)), | |
9 | ('10s:10h', (10, 3600)), | |
10 | ) | |
11 | for retention, expected in retention_map: | |
12 | results = parseRetentionDef(retention) | |
13 | self.assertEqual(results, expected) | |
14 | ||
15 | def test_invalid_retentions(self): | |
16 | retention_map = ( | |
17 | # From getUnitString | |
18 | ('10x:10', ValueError("Invalid unit 'x'")), | |
19 | ('60:10x', ValueError("Invalid unit 'x'")), | |
20 | ||
21 | # From parseRetentionDef | |
22 | ('10X:10', ValueError("Invalid precision specification '10X'")), | |
23 | ('10:10$', ValueError("Invalid retention specification '10$'")), | |
24 | ('60:10', (60, 10)), | |
25 | ) | |
26 | for retention, expected_exc in retention_map: | |
27 | try: | |
28 | results = parseRetentionDef(retention) | |
29 | except expected_exc.__class__ as exc: | |
30 | self.assertEqual( | |
31 | str(expected_exc), | |
32 | str(exc), | |
33 | ) | |
34 | self.assertEqual( | |
35 | expected_exc.__class__, | |
36 | exc.__class__, | |
37 | ) | |
38 | else: | |
39 | # When there isn't an exception raised | |
40 | self.assertEqual(results, expected_exc) |
0 | from mock import Mock, mock_open, patch | |
1 | from unittest import TestCase | |
2 | from carbon.pipeline import Processor | |
3 | from carbon.rewrite import PRE, RewriteProcessor, RewriteRule, RewriteRuleManager | |
4 | ||
5 | ||
6 | class RewriteProcessorTest(TestCase): | |
7 | def tearDown(self): | |
8 | RewriteRuleManager.clear() | |
9 | ||
10 | def test_registers_plugin(self): | |
11 | self.assertTrue('rewrite' in Processor.plugins) | |
12 | ||
13 | def test_applies_rule(self): | |
14 | mock_rule = Mock(spec=RewriteRule) | |
15 | RewriteRuleManager.rulesets[PRE] = [mock_rule] | |
16 | list(RewriteProcessor(PRE).process('carbon.foo', (0, 0))) | |
17 | mock_rule.apply.assert_called_once_with('carbon.foo') | |
18 | ||
19 | def test_applies_rule_and_returns_metric(self): | |
20 | mock_rule = Mock(spec=RewriteRule) | |
21 | mock_rule.apply.return_value = 'carbon.foo.bar' | |
22 | RewriteRuleManager.rulesets[PRE] = [mock_rule] | |
23 | result = list(RewriteProcessor(PRE).process('carbon.foo', (0, 0))) | |
24 | self.assertEqual(('carbon.foo.bar', (0, 0)), result[0]) | |
25 | ||
26 | def test_passes_through_with_no_rules(self): | |
27 | result = list(RewriteProcessor(PRE).process('carbon.foo', (0, 0))) | |
28 | self.assertEqual(('carbon.foo', (0, 0)), result[0]) | |
29 | ||
30 | ||
31 | class TestRewriteRule(TestCase): | |
32 | def setUp(self): | |
33 | self.sample_rule = RewriteRule('^carbon[.]foo[.]', 'carbon_foo.') | |
34 | ||
35 | def test_instantiation_compiles_pattern(self): | |
36 | self.assertTrue(hasattr(self.sample_rule.regex, 'sub')) | |
37 | ||
38 | def test_apply_substitutes(self): | |
39 | result = self.sample_rule.apply('carbon.foo.bar') | |
40 | self.assertEqual('carbon_foo.bar', result) | |
41 | ||
42 | ||
43 | class TestRewriteRuleManager(TestCase): | |
44 | def setUp(self): | |
45 | self.sample_config = """ | |
46 | [pre] | |
47 | ^carbon.foo = carbon.foo.bar | |
48 | ^carbon.bar = carbon.bar.baz | |
49 | """ | |
50 | self.sample_multi_config = """ | |
51 | [pre] | |
52 | ^carbon.foo = carbon.foo.bar | |
53 | ^carbon.bar = carbon.bar.baz | |
54 | ||
55 | [post] | |
56 | ^carbon.baz = carbon.foo.bar | |
57 | """ | |
58 | self.broken_pattern_config = """ | |
59 | [pre] | |
60 | ^carbon.foo = carbon.foo.bar | |
61 | ^carbon.(bar = carbon.bar.baz | |
62 | """ | |
63 | self.commented_config = """ | |
64 | [pre] | |
65 | ^carbon.foo = carbon.foo.bar | |
66 | #^carbon.bar = carbon.bar.baz | |
67 | """ | |
68 | ||
69 | def tearDown(self): | |
70 | RewriteRuleManager.rules_file = None | |
71 | RewriteRuleManager.rules_last_read = 0.0 | |
72 | RewriteRuleManager.clear() | |
73 | ||
74 | def test_looping_call_reads_rules(self): | |
75 | self.assertEqual(RewriteRuleManager.read_rules, RewriteRuleManager.read_task.f) | |
76 | ||
77 | def test_request_for_nonexistent_rules_returns_iterable(self): | |
78 | try: | |
79 | iter(RewriteRuleManager.rules('foo')) | |
80 | except TypeError: | |
81 | self.fail("RewriteRuleManager.rules() returned a non-iterable type") | |
82 | ||
83 | def test_read_from_starts_task(self): | |
84 | with patch.object(RewriteRuleManager, 'read_rules'): | |
85 | with patch.object(RewriteRuleManager.read_task, 'start') as task_start_mock: | |
86 | RewriteRuleManager.read_from('foo.conf') | |
87 | self.assertEqual(1, task_start_mock.call_count) | |
88 | ||
89 | def test_read_records_mtime(self): | |
90 | import carbon.rewrite | |
91 | RewriteRuleManager.rules_file = 'foo.conf' | |
92 | ||
93 | with patch.object(carbon.rewrite, 'open', mock_open(), create=True): | |
94 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
95 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
96 | RewriteRuleManager.read_rules() | |
97 | self.assertEqual(1234, RewriteRuleManager.rules_last_read) | |
98 | ||
99 | def test_read_clears_if_no_file(self): | |
100 | import carbon.rewrite | |
101 | RewriteRuleManager.rules_file = 'foo.conf' | |
102 | ||
103 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=False)): | |
104 | with patch.object(RewriteRuleManager, 'clear') as clear_mock: | |
105 | RewriteRuleManager.read_rules() | |
106 | clear_mock.assert_called_once_with() | |
107 | ||
108 | def test_rules_unchanged_if_mtime_unchanged(self): | |
109 | import carbon.rewrite | |
110 | mtime = 1234 | |
111 | rulesets = {'pre': [Mock(RewriteRule)]} | |
112 | RewriteRuleManager.rules_last_read = mtime | |
113 | RewriteRuleManager.rulesets.update(rulesets) | |
114 | RewriteRuleManager.rules_file = 'foo.conf' | |
115 | ||
116 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
117 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=mtime)): | |
118 | RewriteRuleManager.read_rules() | |
119 | ||
120 | self.assertEqual(rulesets, RewriteRuleManager.rulesets) | |
121 | ||
122 | def test_read_doesnt_open_file_if_mtime_unchanged(self): | |
123 | import carbon.rewrite | |
124 | mtime = 1234 | |
125 | RewriteRuleManager.rules_last_read = mtime | |
126 | RewriteRuleManager.rules_file = 'foo.conf' | |
127 | ||
128 | with patch.object(carbon.rewrite, 'open', mock_open(), create=True) as open_mock: | |
129 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
130 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
131 | RewriteRuleManager.read_rules() | |
132 | self.assertFalse(open_mock.called) | |
133 | ||
134 | def test_read_opens_file_if_mtime_newer(self): | |
135 | import carbon.rewrite | |
136 | RewriteRuleManager.rules_last_read = 1234 | |
137 | RewriteRuleManager.rules_file = 'foo.conf' | |
138 | ||
139 | with patch.object(carbon.rewrite, 'open', mock_open(), create=True) as open_mock: | |
140 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
141 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=5678)): | |
142 | RewriteRuleManager.read_rules() | |
143 | self.assertTrue(open_mock.called) | |
144 | ||
145 | def test_section_parsed_into_ruleset(self): | |
146 | import carbon.rewrite | |
147 | ||
148 | open_mock = Mock(return_value=iter(self.sample_config.splitlines())) | |
149 | RewriteRuleManager.rules_file = 'foo.conf' | |
150 | ||
151 | with patch.object(carbon.rewrite, 'open', open_mock, create=True): | |
152 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
153 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
154 | RewriteRuleManager.read_rules() | |
155 | self.assertTrue('pre' in RewriteRuleManager.rulesets) | |
156 | ||
157 | def test_multiple_section_parsed_into_ruleset(self): | |
158 | import carbon.rewrite | |
159 | ||
160 | open_mock = Mock(return_value=iter(self.sample_multi_config.splitlines())) | |
161 | RewriteRuleManager.rules_file = 'foo.conf' | |
162 | ||
163 | with patch.object(carbon.rewrite, 'open', open_mock, create=True): | |
164 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
165 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
166 | RewriteRuleManager.read_rules() | |
167 | self.assertTrue('pre' in RewriteRuleManager.rulesets) | |
168 | self.assertTrue('post' in RewriteRuleManager.rulesets) | |
169 | ||
170 | def test_rules_parsed(self): | |
171 | import carbon.rewrite | |
172 | ||
173 | open_mock = Mock(return_value=iter(self.sample_config.splitlines())) | |
174 | RewriteRuleManager.rules_file = 'foo.conf' | |
175 | ||
176 | with patch.object(carbon.rewrite, 'open', open_mock, create=True): | |
177 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
178 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
179 | RewriteRuleManager.read_rules() | |
180 | self.assertEqual(2, len(RewriteRuleManager.rules('pre'))) | |
181 | ||
182 | def test_broken_patterns_ignored(self): | |
183 | import carbon.rewrite | |
184 | ||
185 | open_mock = Mock(return_value=iter(self.broken_pattern_config.splitlines())) | |
186 | RewriteRuleManager.rules_file = 'foo.conf' | |
187 | ||
188 | with patch.object(carbon.rewrite, 'open', open_mock, create=True): | |
189 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
190 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
191 | RewriteRuleManager.read_rules() | |
192 | self.assertEqual(1, len(RewriteRuleManager.rules('pre'))) | |
193 | ||
194 | def test_comments_ignored(self): | |
195 | import carbon.rewrite | |
196 | ||
197 | open_mock = Mock(return_value=iter(self.commented_config.splitlines())) | |
198 | RewriteRuleManager.rules_file = 'foo.conf' | |
199 | ||
200 | with patch.object(carbon.rewrite, 'open', open_mock, create=True): | |
201 | with patch.object(carbon.rewrite, 'exists', Mock(return_value=True)): | |
202 | with patch.object(carbon.rewrite, 'getmtime', Mock(return_value=1234)): | |
203 | RewriteRuleManager.read_rules() | |
204 | self.assertEqual(1, len(RewriteRuleManager.rules('pre'))) |
0 | import os | |
1 | import unittest | |
2 | ||
3 | from carbon import routers | |
4 | from carbon.util import parseDestinations | |
5 | from carbon.tests import util | |
6 | ||
7 | ||
8 | DESTINATIONS = ( | |
9 | 'foo:124:a', | |
10 | 'foo:125:b', | |
11 | 'foo:126:c', | |
12 | 'bar:423:a', | |
13 | 'bar:424:b', | |
14 | 'bar:425:c', | |
15 | ) | |
16 | ||
17 | ||
18 | def createSettings(): | |
19 | settings = util.TestSettings() | |
20 | settings['DIVERSE_REPLICAS'] = True, | |
21 | settings['REPLICATION_FACTOR'] = 2 | |
22 | settings['DESTINATIONS'] = DESTINATIONS | |
23 | settings['relay-rules'] = os.path.join( | |
24 | os.path.dirname(__file__), 'relay-rules.conf') | |
25 | settings['aggregation-rules'] = None | |
26 | return settings | |
27 | ||
28 | ||
29 | def parseDestination(destination): | |
30 | return parseDestinations([destination])[0] | |
31 | ||
32 | ||
33 | class TestRelayRulesRouter(unittest.TestCase): | |
34 | def testBasic(self): | |
35 | router = routers.RelayRulesRouter(createSettings()) | |
36 | for destination in DESTINATIONS: | |
37 | router.addDestination(parseDestination(destination)) | |
38 | self.assertEquals(len(list(router.getDestinations('foo.bar'))), 1) | |
39 | ||
40 | ||
41 | class TestOtherRouters(unittest.TestCase): | |
42 | def testBasic(self): | |
43 | settings = createSettings() | |
44 | for plugin in routers.DatapointRouter.plugins: | |
45 | # Test everything except 'rules' which is special | |
46 | if plugin == 'rules': | |
47 | continue | |
48 | ||
49 | router = routers.DatapointRouter.plugins[plugin](settings) | |
50 | self.assertEquals(len(list(router.getDestinations('foo.bar'))), 0) | |
51 | ||
52 | for destination in DESTINATIONS: | |
53 | router.addDestination(parseDestination(destination)) | |
54 | self.assertEquals(len(list(router.getDestinations('foo.bar'))), | |
55 | settings['REPLICATION_FACTOR']) |
0 | from mock import Mock, patch | |
1 | from unittest import TestCase | |
2 | ||
3 | from carbon import events, state | |
4 | from carbon.pipeline import Processor, run_pipeline, run_pipeline_generated | |
5 | from carbon.service import CarbonRootService, setupPipeline | |
6 | from carbon.tests.util import TestSettings | |
7 | ||
8 | ||
9 | class TestSetupPipeline(TestCase): | |
10 | def setUp(self): | |
11 | self.settings = TestSettings() | |
12 | self.root_service_mock = Mock(CarbonRootService) | |
13 | self.call_when_running_patch = patch('twisted.internet.reactor.callWhenRunning') | |
14 | self.call_when_running_mock = self.call_when_running_patch.start() | |
15 | ||
16 | def tearDown(self): | |
17 | self.call_when_running_patch.stop() | |
18 | state.pipeline_processors = [] | |
19 | events.metricReceived.handlers = [] | |
20 | events.metricGenerated.handlers = [] | |
21 | ||
22 | def test_run_pipeline_chained_to_metric_received(self): | |
23 | setupPipeline([], self.root_service_mock, self.settings) | |
24 | self.assertTrue(run_pipeline in events.metricReceived.handlers) | |
25 | ||
26 | def test_run_pipeline_chained_to_metric_generated(self): | |
27 | setupPipeline([], self.root_service_mock, self.settings) | |
28 | self.assertTrue(run_pipeline_generated in events.metricGenerated.handlers) | |
29 | ||
30 | @patch('carbon.service.setupAggregatorProcessor') | |
31 | def test_aggregate_processor_set_up(self, setup_mock): | |
32 | setupPipeline(['aggregate'], self.root_service_mock, self.settings) | |
33 | setup_mock.assert_called_once_with(self.root_service_mock, self.settings) | |
34 | ||
35 | @patch('carbon.service.setupRewriterProcessor') | |
36 | def test_rewrite_processor_set_up(self, setup_mock): | |
37 | setupPipeline(['rewrite:pre'], self.root_service_mock, self.settings) | |
38 | setup_mock.assert_called_once_with(self.root_service_mock, self.settings) | |
39 | ||
40 | @patch('carbon.service.setupRelayProcessor') | |
41 | def test_relay_processor_set_up(self, setup_mock): | |
42 | setupPipeline(['relay'], self.root_service_mock, self.settings) | |
43 | setup_mock.assert_called_once_with(self.root_service_mock, self.settings) | |
44 | ||
45 | @patch('carbon.service.setupWriterProcessor') | |
46 | def test_write_processor_set_up(self, setup_mock): | |
47 | setupPipeline(['write'], self.root_service_mock, self.settings) | |
48 | setup_mock.assert_called_once_with(self.root_service_mock, self.settings) | |
49 | ||
50 | def test_unknown_processor_raises_value_error(self): | |
51 | self.assertRaises( | |
52 | ValueError, setupPipeline, ['foo'], self.root_service_mock, self.settings) | |
53 | ||
54 | @patch('carbon.service.setupRewriterProcessor', new=Mock()) | |
55 | def test_parses_processor_args(self): | |
56 | # XXX Patch doesnt work on this import directly | |
57 | rewrite_mock = Mock() | |
58 | Processor.plugins['rewrite'] = rewrite_mock | |
59 | setupPipeline(['rewrite:pre'], self.root_service_mock, self.settings) | |
60 | rewrite_mock.assert_called_once_with('pre') | |
61 | ||
62 | def test_schedules_pipeline_ready(self): | |
63 | setupPipeline([], self.root_service_mock, self.settings) | |
64 | self.assertTrue(self.call_when_running_mock.called) |
0 | import os | |
1 | from unittest import TestCase | |
2 | from mock import patch | |
3 | ||
4 | from carbon.tests.util import TestSettings | |
5 | from carbon.database import WhisperDatabase | |
6 | ||
7 | ||
8 | # class NoConfigSchemaLoadingTest(TestCase): | |
9 | ||
10 | # def setUp(self): | |
11 | # settings = { | |
12 | # 'CONF_DIR': '', | |
13 | # } | |
14 | # self._settings_patch = patch.dict('carbon.conf.settings', settings) | |
15 | # self._settings_patch.start() | |
16 | ||
17 | # def tearDown(self): | |
18 | # self._settings_patch.stop() | |
19 | ||
20 | # def test_loadAggregationSchemas_load_default_schema(self): | |
21 | # from carbon.storage import loadAggregationSchemas, defaultAggregation | |
22 | # schema_list = loadAggregationSchemas() | |
23 | # self.assertEquals(len(schema_list), 1) | |
24 | # schema = schema_list[0] | |
25 | # self.assertEquals(schema, defaultAggregation) | |
26 | ||
27 | # def test_loadStorageSchemas_raise_CarbonConfigException(self): | |
28 | # from carbon.storage import loadStorageSchemas | |
29 | # from carbon.exceptions import CarbonConfigException | |
30 | # with self.assertRaises(CarbonConfigException): | |
31 | # loadStorageSchemas() | |
32 | ||
33 | ||
34 | class ExistingConfigSchemaLoadingTest(TestCase): | |
35 | ||
36 | def setUp(self): | |
37 | test_directory = os.path.dirname(os.path.realpath(__file__)) | |
38 | settings = TestSettings() | |
39 | settings['CONF_DIR'] = os.path.join(test_directory, 'data', 'conf-directory') | |
40 | settings['LOCAL_DATA_DIR'] = '' | |
41 | self._settings_patch = patch('carbon.conf.settings', settings) | |
42 | self._settings_patch.start() | |
43 | self._database_patch = patch('carbon.state.database', new=WhisperDatabase(settings)) | |
44 | self._database_patch.start() | |
45 | ||
46 | def tearDown(self): | |
47 | self._database_patch.stop() | |
48 | self._settings_patch.stop() | |
49 | ||
50 | def test_loadStorageSchemas_return_schemas(self): | |
51 | from carbon.storage import loadStorageSchemas, PatternSchema, Archive | |
52 | schema_list = loadStorageSchemas() | |
53 | self.assertEquals(len(schema_list), 3) | |
54 | expected = [ | |
55 | PatternSchema('carbon', r'^carbon\.', [Archive.fromString('60:90d')]), | |
56 | PatternSchema('default_1min_for_1day', '.*', [Archive.fromString('60s:1d')]) | |
57 | ] | |
58 | for schema, expected_schema in zip(schema_list[:-1], expected): | |
59 | self.assertEquals(schema.name, expected_schema.name) | |
60 | self.assertEquals(schema.pattern, expected_schema.pattern) | |
61 | for (archive, expected_archive) in zip(schema.archives, expected_schema.archives): | |
62 | self.assertEquals(archive.getTuple(), expected_archive.getTuple()) | |
63 | ||
64 | def test_loadStorageSchemas_return_the_default_schema_last(self): | |
65 | from carbon.storage import loadStorageSchemas, defaultSchema | |
66 | schema_list = loadStorageSchemas() | |
67 | last_schema = schema_list[-1] | |
68 | self.assertEquals(last_schema.name, defaultSchema.name) | |
69 | self.assertEquals(last_schema.archives, defaultSchema.archives) | |
70 | ||
71 | def test_loadAggregationSchemas_return_schemas(self): | |
72 | from carbon.storage import loadAggregationSchemas, PatternSchema | |
73 | schema_list = loadAggregationSchemas() | |
74 | self.assertEquals(len(schema_list), 5) | |
75 | expected = [ | |
76 | PatternSchema('min', r'\.min$', (0.1, 'min')), | |
77 | PatternSchema('max', r'\.max$', (0.1, 'max')), | |
78 | PatternSchema('sum', r'\.count$', (0, 'sum')), | |
79 | PatternSchema('default_average', '.*', (0.5, 'average')) | |
80 | ] | |
81 | for schema, expected_schema in zip(schema_list[:-1], expected): | |
82 | self.assertEquals(schema.name, expected_schema.name) | |
83 | self.assertEquals(schema.pattern, expected_schema.pattern) | |
84 | self.assertEquals(schema.archives, expected_schema.archives) | |
85 | ||
86 | def test_loadAggregationSchema_return_the_default_schema_last(self): | |
87 | from carbon.storage import loadAggregationSchemas, defaultAggregation | |
88 | schema_list = loadAggregationSchemas() | |
89 | last_schema = schema_list[-1] | |
90 | self.assertEquals(last_schema, defaultAggregation) |
0 | import socket | |
1 | ||
2 | from unittest import TestCase | |
3 | ||
4 | from carbon.util import parseDestinations | |
5 | from carbon.util import enableTcpKeepAlive | |
6 | from carbon.util import TaggedSeries | |
7 | ||
8 | ||
9 | class UtilTest(TestCase): | |
10 | ||
11 | def test_enable_tcp_keep_alive(self): | |
12 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | |
13 | ||
14 | class _Transport(): | |
15 | def getHandle(self): | |
16 | return s | |
17 | ||
18 | def setTcpKeepAlive(self, value): | |
19 | s.setsockopt(socket.SOL_TCP, socket.SO_KEEPALIVE, value) | |
20 | ||
21 | enableTcpKeepAlive(_Transport(), True, None) | |
22 | self.assertEquals(s.getsockopt(socket.SOL_TCP, socket.SO_KEEPALIVE), 1) | |
23 | ||
24 | def test_sanitizing_name_as_tag_value(self): | |
25 | test_cases = [ | |
26 | { | |
27 | 'original': "my~.test.abc", | |
28 | 'expected': "my~.test.abc", | |
29 | }, { | |
30 | 'original': "a.b.c", | |
31 | 'expected': "a.b.c", | |
32 | }, { | |
33 | 'original': "~~a~~.~~~b~~~.~~~c~~~", | |
34 | 'expected': "a~~.~~~b~~~.~~~c~~~", | |
35 | }, { | |
36 | 'original': "a.b.c~", | |
37 | 'expected': "a.b.c~", | |
38 | }, { | |
39 | 'original': "~a.b.c", | |
40 | 'expected': "a.b.c", | |
41 | }, { | |
42 | 'original': "~a~", | |
43 | 'expected': "a~", | |
44 | }, { | |
45 | 'original': "~~~", | |
46 | 'raises': True, | |
47 | }, { | |
48 | 'original': "~", | |
49 | 'raises': True, | |
50 | }, | |
51 | ] | |
52 | ||
53 | for test_case in test_cases: | |
54 | if test_case.get('raises', False): | |
55 | self.assertRaises( | |
56 | Exception, | |
57 | TaggedSeries.sanitize_name_as_tag_value, | |
58 | test_case['original'], | |
59 | ) | |
60 | else: | |
61 | result = TaggedSeries.sanitize_name_as_tag_value(test_case['original']) | |
62 | self.assertEquals(result, test_case['expected']) | |
63 | ||
64 | def test_validate_tag_key_and_value(self): | |
65 | # assert that it raises exception when sanitized name is still not valid | |
66 | with self.assertRaises(Exception): | |
67 | # sanitized name is going to be '', which is not a valid tag value | |
68 | TaggedSeries.sanitize_name_as_tag_value('~~~~') | |
69 | ||
70 | with self.assertRaises(Exception): | |
71 | # given tag value is invalid because it has length 0 | |
72 | TaggedSeries.validateTagAndValue('metric.name;tag=') | |
73 | ||
74 | with self.assertRaises(Exception): | |
75 | # given tag key is invalid because it has length 0 | |
76 | TaggedSeries.validateTagAndValue('metric.name;=value') | |
77 | ||
78 | with self.assertRaises(Exception): | |
79 | # given tag is missing = | |
80 | TaggedSeries.validateTagAndValue('metric.name;tagvalue') | |
81 | ||
82 | with self.assertRaises(Exception): | |
83 | # given tag value is invalid because it starts with ~ | |
84 | TaggedSeries.validateTagAndValue('metric.name;tag=~value') | |
85 | ||
86 | with self.assertRaises(Exception): | |
87 | # given tag key is invalid because it contains ! | |
88 | TaggedSeries.validateTagAndValue('metric.name;ta!g=value') | |
89 | ||
90 | ||
91 | # Destinations have the form: | |
92 | # <host> ::= <string without colons> | "[" <string> "]" | |
93 | # <port> ::= <number> | |
94 | # <instance> ::= <string> | |
95 | # <destination> ::= <host> ":" <port> | <host> ":" <port> ":" <instance> | |
96 | ||
97 | class ParseDestinationsTest(TestCase): | |
98 | def test_valid_dest_unbracketed(self): | |
99 | # Tests valid destinations in the unbracketed form of <host>. | |
100 | dests = [ | |
101 | "127.0.0.1:1234:alpha", # Full IPv4 address | |
102 | "127.1:1234:beta", # 'Short' IPv4 address | |
103 | "localhost:987:epsilon", # Relative domain name | |
104 | "foo.bar.baz.uk.:890:sigma" # Absolute domain name | |
105 | ] | |
106 | ||
107 | expected = [ | |
108 | ("127.0.0.1", 1234, "alpha"), | |
109 | ("127.1", 1234, "beta"), | |
110 | ("localhost", 987, "epsilon"), | |
111 | ("foo.bar.baz.uk.", 890, "sigma") | |
112 | ] | |
113 | ||
114 | actual = parseDestinations(dests) | |
115 | self.assertEquals(len(expected), len(actual)) | |
116 | ||
117 | for exp, act in zip(expected, actual): | |
118 | self.assertEquals(exp, act) | |
119 | ||
120 | def test_valid_dest_bracketed(self): | |
121 | # Tests valid destinations in the bracketed form of <host>. | |
122 | dests = [ | |
123 | "[fe80:dead:beef:cafe:0007:0007:0007:0001]:123:gamma", # Full IPv6 address | |
124 | "[fe80:1234::7]:456:theta", # Compact IPv6 address | |
125 | "[::]:1:o", # Very compact IPv6 address | |
126 | "[ffff::127.0.0.1]:789:omicron" # IPv6 mapped IPv4 address | |
127 | ] | |
128 | ||
129 | expected = [ | |
130 | ("fe80:dead:beef:cafe:0007:0007:0007:0001", 123, "gamma"), | |
131 | ("fe80:1234::7", 456, "theta"), | |
132 | ("::", 1, "o"), | |
133 | ("ffff::127.0.0.1", 789, "omicron"), | |
134 | ] | |
135 | ||
136 | actual = parseDestinations(dests) | |
137 | self.assertEquals(len(expected), len(actual)) | |
138 | ||
139 | for exp, act in zip(expected, actual): | |
140 | self.assertEquals(exp, act) | |
141 | ||
142 | def test_valid_dest_without_instance(self): | |
143 | # Tests destinations without instance specified. | |
144 | dests = [ | |
145 | "1.2.3.4:5678", | |
146 | "[::1]:2", | |
147 | "stats.example.co.uk:8125", | |
148 | "[127.0.0.1]:78", # Odd use of the bracket feature, but why not? | |
149 | "[why.not.this.com]:89", | |
150 | ] | |
151 | ||
152 | expected = [ | |
153 | ("1.2.3.4", 5678, None), | |
154 | ("::1", 2, None), | |
155 | ("stats.example.co.uk", 8125, None), | |
156 | ("127.0.0.1", 78, None), | |
157 | ("why.not.this.com", 89, None) | |
158 | ] | |
159 | ||
160 | actual = parseDestinations(dests) | |
161 | self.assertEquals(len(expected), len(actual)) | |
162 | ||
163 | for exp, act in zip(expected, actual): | |
164 | self.assertEquals(exp, act) | |
165 | ||
166 | def test_wrong_dest(self): | |
167 | # Some cases of invalid input, e.g. invalid/missing port. | |
168 | dests = [ | |
169 | "1.2.3.4", # No port | |
170 | "1.2.3.4:huh", # Invalid port (must be int) | |
171 | "[fe80::3285:a9ff:fe91:e287]", # No port | |
172 | "[ffff::1.2.3.4]:notaport" # Invalid port | |
173 | ] | |
174 | ||
175 | for dest in dests: | |
176 | try: | |
177 | parseDestinations([dest]) | |
178 | except ValueError: | |
179 | continue | |
180 | raise AssertionError("Invalid input was accepted.") |
0 | from carbon.conf import Settings | |
1 | ||
2 | ||
3 | class TestSettings(Settings): | |
4 | def readFrom(*args, **kwargs): | |
5 | pass |
166 | 166 | |
167 | 167 | # Yes this is duplicated in whisper. Yes, duplication is bad. |
168 | 168 | # But the code is needed in both places and we do not want to create |
169 | # a dependency on whisper especiaily as carbon moves toward being a more | |
169 | # a dependency on whisper especially as carbon moves toward being a more | |
170 | 170 | # generic storage service that can use various backends. |
171 | 171 | UnitMultipliers = { |
172 | 172 | 's': 1, |
231 | 231 | mod = sys.modules[module] |
232 | 232 | if name not in cls.PICKLE_SAFE[module]: |
233 | 233 | raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) |
234 | return getattr(mod, name) | |
234 | return getattr(mod, name) # skipcq: PTC-W0034 | |
235 | 235 | |
236 | 236 | @classmethod |
237 | 237 | def loads(cls, pickle_string): |
253 | 253 | mod = sys.modules[module] |
254 | 254 | if name not in self.PICKLE_SAFE[module]: |
255 | 255 | raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) |
256 | return getattr(mod, name) | |
256 | return getattr(mod, name) # skipcq: PTC-W0034 | |
257 | 257 | |
258 | 258 | @classmethod |
259 | 259 | def loads(cls, pickle_string): |
423 | 423 | sanitized = name.lstrip('~') |
424 | 424 | |
425 | 425 | if len(sanitized) == 0: |
426 | raise Exception('Cannot use metric name %s as tag value, results in emptry string' % (name)) | |
426 | raise Exception('Cannot use metric name %s as tag value, results in an empty string' % (name)) | |
427 | 427 | |
428 | 428 | return sanitized |
429 | 429 |
35 | 35 | AGGREGATION_SCHEMAS = loadAggregationSchemas() |
36 | 36 | |
37 | 37 | |
38 | # Inititalize token buckets so that we can enforce rate limits on creates and | |
38 | # Initialize token buckets so that we can enforce rate limits on creates and | |
39 | 39 | # updates if the config wants them. |
40 | 40 | CREATE_BUCKET = None |
41 | 41 | UPDATE_BUCKET = None |
107 | 107 | # file then we'll just drop the metric on the ground and move on to the next |
108 | 108 | # metric. |
109 | 109 | # XXX This behavior should probably be configurable to no tdrop metrics |
110 | # when rate limitng unless our cache is too big or some other legit | |
110 | # when rate limiting unless our cache is too big or some other legit | |
111 | 111 | # reason. |
112 | 112 | instrumentation.increment('droppedCreates') |
113 | 113 | continue |
140 | 140 | try: |
141 | 141 | state.database.create(metric, archiveConfig, xFilesFactor, aggregationMethod) |
142 | 142 | if settings.ENABLE_TAGS: |
143 | tagQueue.add(metric) | |
143 | if not settings.SKIP_TAGS_FOR_NONTAGGED or ';' in metric: | |
144 | tagQueue.add(metric) | |
144 | 145 | instrumentation.increment('creates') |
145 | 146 | except Exception as e: |
146 | 147 | log.err() |
158 | 159 | try: |
159 | 160 | t1 = time.time() |
160 | 161 | # If we have duplicated points, always pick the last. update_many() |
161 | # has no guaranted behavior for that, and in fact the current implementation | |
162 | # has no guaranteed behavior for that, and in fact the current implementation | |
162 | 163 | # will keep the first point in the list. |
163 | 164 | datapoints = dict(datapoints).items() |
164 | 165 | state.database.write(metric, datapoints) |
165 | 166 | if settings.ENABLE_TAGS: |
166 | tagQueue.update(metric) | |
167 | if not settings.SKIP_TAGS_FOR_NONTAGGED or ';' in metric: | |
168 | tagQueue.update(metric) | |
167 | 169 | updateTime = time.time() - t1 |
168 | 170 | except Exception as e: |
169 | 171 | log.err() |
0 | Twisted>=13.2.0 | |
1 | git+git://github.com/graphite-project/whisper.git#egg=whisper | |
2 | txAMQP | |
3 | cachetools | |
4 | urllib3 |
0 | 0 | [bdist_rpm] |
1 | 1 | requires = python-twisted |
2 | whisper | |
2 | whisper | |
3 | post-install = distro/redhat/misc/postinstall | |
3 | 4 | |
4 | post-install = distro/redhat/misc/postinstall | |
5 | [install] | |
6 | prefix = /opt/graphite | |
7 | install-lib = %(prefix)s/lib | |
8 |
78 | 78 | try: |
79 | 79 | setup( |
80 | 80 | name='carbon', |
81 | version='1.1.7', | |
81 | version='1.2.0', | |
82 | 82 | url='http://graphiteapp.org/', |
83 | 83 | author='Chris Davis', |
84 | 84 | author_email='chrismd@gmail.com', |
0 | coverage | |
1 | mock | |
2 | mocker | |
3 | nose | |
4 | protobuf | |
5 | mmh3 | |
6 | pyOpenSSL | |
7 | git+git://github.com/graphite-project/ceres.git#egg=ceres | |
8 | backports.tempfile |
0 | [tox] | |
1 | envlist = | |
2 | py{27,35,36,37,38,py}{,-pyhash}, | |
3 | lint, | |
4 | benchmark | |
5 | ||
6 | [testenv] | |
7 | setenv = | |
8 | GRAPHITE_NO_PREFIX=true | |
9 | PYTHONPATH={toxinidir}/lib | |
10 | commands = | |
11 | coverage run --branch --source=lib,bin --omit=lib/carbon/tests/* "{envbindir}/trial" carbon | |
12 | coverage xml | |
13 | coverage report | |
14 | deps = | |
15 | -rrequirements.txt | |
16 | -rtests-requirements.txt | |
17 | pyhash: pyhash | |
18 | ||
19 | [testenv:lint] | |
20 | deps = | |
21 | flake8 | |
22 | six | |
23 | commands = | |
24 | flake8 {toxinidir}/lib {toxinidir}/bin | |
25 | ||
26 | [testenv:benchmark] | |
27 | voting = False | |
28 | commands = | |
29 | python {toxinidir}/lib/carbon/tests/benchmark_cache.py | |
30 | python {toxinidir}/lib/carbon/tests/benchmark_routers.py | |
31 | ||
32 | [flake8] | |
33 | max-line-length=100 | |
34 | ignore=E111,E114,E121,W504 |