Imported Upstream version 0.9.12
Jonas Genannt
10 years ago
0 | 0 | Metadata-Version: 1.0 |
1 | 1 | Name: carbon |
2 | Version: 0.9.10 | |
2 | Version: 0.9.12 | |
3 | 3 | Summary: Backend data caching and persistence daemon for Graphite |
4 | Home-page: https://launchpad.net/graphite | |
4 | Home-page: http://graphite-project.github.com | |
5 | 5 | Author: Chris Davis |
6 | 6 | Author-email: chrismd@gmail.com |
7 | 7 | License: Apache Software License 2.0 |
13 | 13 | limitations under the License.""" |
14 | 14 | |
15 | 15 | import sys |
16 | from os.path import dirname, join, abspath | |
16 | import os.path | |
17 | 17 | |
18 | 18 | # Figure out where we're installed |
19 | BIN_DIR = dirname(abspath(__file__)) | |
20 | ROOT_DIR = dirname(BIN_DIR) | |
19 | BIN_DIR = os.path.dirname(os.path.abspath(__file__)) | |
20 | ROOT_DIR = os.path.dirname(BIN_DIR) | |
21 | 21 | |
22 | 22 | # Make sure that carbon's 'lib' dir is in the $PYTHONPATH if we're running from |
23 | 23 | # source. |
24 | LIB_DIR = join(ROOT_DIR, 'lib') | |
24 | LIB_DIR = os.path.join(ROOT_DIR, "lib") | |
25 | 25 | sys.path.insert(0, LIB_DIR) |
26 | 26 | |
27 | 27 | from carbon.util import run_twistd_plugin |
28 | from carbon.exceptions import CarbonConfigException | |
28 | 29 | |
29 | run_twistd_plugin(__file__) | |
30 | try: | |
31 | run_twistd_plugin(__file__) | |
32 | except CarbonConfigException, exc: | |
33 | raise SystemExit(str(exc)) |
13 | 13 | limitations under the License.""" |
14 | 14 | |
15 | 15 | import sys |
16 | from os.path import dirname, join, abspath | |
16 | import os.path | |
17 | 17 | |
18 | 18 | # Figure out where we're installed |
19 | BIN_DIR = dirname(abspath(__file__)) | |
20 | ROOT_DIR = dirname(BIN_DIR) | |
19 | BIN_DIR = os.path.dirname(os.path.abspath(__file__)) | |
20 | ROOT_DIR = os.path.dirname(BIN_DIR) | |
21 | 21 | |
22 | 22 | # Make sure that carbon's 'lib' dir is in the $PYTHONPATH if we're running from |
23 | 23 | # source. |
24 | LIB_DIR = join(ROOT_DIR, 'lib') | |
24 | LIB_DIR = os.path.join(ROOT_DIR, "lib") | |
25 | 25 | sys.path.insert(0, LIB_DIR) |
26 | 26 | |
27 | 27 | from carbon.util import run_twistd_plugin |
28 | from carbon.exceptions import CarbonConfigException | |
28 | 29 | |
29 | run_twistd_plugin(__file__) | |
30 | try: | |
31 | run_twistd_plugin(__file__) | |
32 | except CarbonConfigException, exc: | |
33 | raise SystemExit(str(exc)) |
13 | 13 | limitations under the License.""" |
14 | 14 | |
15 | 15 | import sys |
16 | from os.path import dirname, join, abspath | |
16 | import os.path | |
17 | 17 | |
18 | 18 | # Figure out where we're installed |
19 | BIN_DIR = dirname(abspath(__file__)) | |
20 | ROOT_DIR = dirname(BIN_DIR) | |
19 | BIN_DIR = os.path.dirname(os.path.abspath(__file__)) | |
20 | ROOT_DIR = os.path.dirname(BIN_DIR) | |
21 | 21 | |
22 | 22 | # Make sure that carbon's 'lib' dir is in the $PYTHONPATH if we're running from |
23 | 23 | # source. |
24 | LIB_DIR = join(ROOT_DIR, 'lib') | |
24 | LIB_DIR = os.path.join(ROOT_DIR, "lib") | |
25 | 25 | sys.path.insert(0, LIB_DIR) |
26 | 26 | |
27 | 27 | from carbon.util import run_twistd_plugin |
28 | from carbon.exceptions import CarbonConfigException | |
28 | 29 | |
29 | run_twistd_plugin(__file__) | |
30 | try: | |
31 | run_twistd_plugin(__file__) | |
32 | except CarbonConfigException, exc: | |
33 | raise SystemExit(str(exc)) |
26 | 26 | |
27 | 27 | config_parser = ConfigParser() |
28 | 28 | if not config_parser.read(SCHEMAS_FILE): |
29 | print "Error: Couldn't read config file: %s" % SCHEMAS_FILE | |
30 | sys.exit(1) | |
29 | raise SystemExit("Error: Couldn't read config file: %s" % SCHEMAS_FILE) | |
31 | 30 | |
32 | 31 | errors_found = 0 |
33 | 32 | |
61 | 60 | print " OK" |
62 | 61 | |
63 | 62 | if errors_found: |
64 | ||
65 | print "Storage-schemas configuration '%s' failed validation" % SCHEMAS_FILE | |
66 | sys.exit(1) | |
63 | raise SystemExit( "Storage-schemas configuration '%s' failed validation" % SCHEMAS_FILE) | |
67 | 64 | |
68 | ||
69 | 65 | print "Storage-schemas configuration '%s' is valid" % SCHEMAS_FILE |
25 | 25 | # aggregate metric 'prod.applications.apache.all.requests' would be calculated |
26 | 26 | # by summing their values. |
27 | 27 | # |
28 | # Template components such as <env> will match everything up to the next dot. | |
29 | # To match metric multiple components including the dots, use <<metric>> in the | |
30 | # input template: | |
31 | # | |
32 | # <env>.applications.<app>.all.<app_metric> (60) = sum <env>.applications.<app>.*.<<app_metric>> | |
33 | # | |
28 | 34 | # Note that any time this file is modified, it will be re-read automatically. |
29 | 29 | # |
30 | 30 | #LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ |
31 | 31 | |
32 | # Enable daily log rotation. If disabled, a kill -HUP can be used after a manual rotate | |
33 | ENABLE_LOGROTATION = True | |
34 | ||
32 | 35 | # Specify the user to drop privileges to |
33 | 36 | # If this is blank carbon runs as the user that invokes it |
34 | 37 | # This user must have write access to the local data directory |
35 | 38 | USER = |
39 | # | |
40 | # NOTE: The above settings must be set under [relay] and [aggregator] | |
41 | # to take effect for those daemons as well | |
36 | 42 | |
37 | 43 | # Limit the size of the cache to avoid swapping or becoming CPU bound. |
38 | 44 | # Sorts and serving cache queries gets more expensive as the cache grows. |
45 | 51 | # When the rate of required updates exceeds this, then carbon's caching will |
46 | 52 | # take effect and increase the overall throughput accordingly. |
47 | 53 | MAX_UPDATES_PER_SECOND = 500 |
54 | ||
55 | # If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a | |
56 | # stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is | |
57 | # relatively low and carbon has cached a lot of updates; it enables the carbon | |
58 | # daemon to shutdown more quickly. | |
59 | # MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000 | |
48 | 60 | |
49 | 61 | # Softly limits the number of whisper files that get created each minute. |
50 | 62 | # Setting this value low (like at 50) is a good way to ensure your graphite |
68 | 80 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
69 | 81 | PICKLE_RECEIVER_PORT = 2004 |
70 | 82 | |
83 | # Set to false to disable logging of successful connections | |
84 | LOG_LISTENER_CONNECTIONS = True | |
85 | ||
71 | 86 | # Per security concerns outlined in Bug #817247 the pickle receiver |
72 | 87 | # will use a more secure and slightly less efficient unpickler. |
73 | 88 | # Set this to True to revert to the old-fashioned insecure unpickler. |
82 | 97 | # data until the cache size falls below 95% MAX_CACHE_SIZE. |
83 | 98 | USE_FLOW_CONTROL = True |
84 | 99 | |
85 | # By default, carbon-cache will log every whisper update. This can be excessive and | |
100 | # By default, carbon-cache will log every whisper update and cache hit. This can be excessive and | |
86 | 101 | # degrade performance if logging on the same volume as the whisper data is stored. |
87 | 102 | LOG_UPDATES = False |
103 | LOG_CACHE_HITS = False | |
104 | LOG_CACHE_QUEUE_SORTS = True | |
105 | ||
106 | # The thread that writes metrics to disk can use on of the following strategies | |
107 | # determining the order in which metrics are removed from cache and flushed to | |
108 | # disk. The default option preserves the same behavior as has been historically | |
109 | # available in version 0.9.10. | |
110 | # | |
111 | # sorted - All metrics in the cache will be counted and an ordered list of | |
112 | # them will be sorted according to the number of datapoints in the cache at the | |
113 | # moment of the list's creation. Metrics will then be flushed from the cache to | |
114 | # disk in that order. | |
115 | # | |
116 | # max - The writer thread will always pop and flush the metric from cache | |
117 | # that has the most datapoints. This will give a strong flush preference to | |
118 | # frequently updated metrics and will also reduce random file-io. Infrequently | |
119 | # updated metrics may only ever be persisted to disk at daemon shutdown if | |
120 | # there are a large number of metrics which receive very frequent updates OR if | |
121 | # disk i/o is very slow. | |
122 | # | |
123 | # naive - Metrics will be flushed from the cache to disk in an unordered | |
124 | # fashion. This strategy may be desirable in situations where the storage for | |
125 | # whisper files is solid state, CPU resources are very limited or deference to | |
126 | # the OS's i/o scheduler is expected to compensate for the random write | |
127 | # pattern. | |
128 | # | |
129 | CACHE_WRITE_STRATEGY = sorted | |
88 | 130 | |
89 | 131 | # On some systems it is desirable for whisper to write synchronously. |
90 | 132 | # Set this option to True if you'd like to try this. Basically it will |
98 | 140 | # MAX_CREATES_PER_MINUTE but may have longer term performance implications |
99 | 141 | # depending on the underlying storage configuration. |
100 | 142 | # WHISPER_SPARSE_CREATE = False |
143 | ||
144 | # Only beneficial on linux filesystems that support the fallocate system call. | |
145 | # It maintains the benefits of contiguous reads/writes, but with a potentially | |
146 | # much faster creation speed, by allowing the kernel to handle the block | |
147 | # allocation and zero-ing. Enabling this option may allow a large increase of | |
148 | # MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported | |
149 | # this option will gracefully fallback to standard POSIX file access methods. | |
150 | WHISPER_FALLOCATE_CREATE = True | |
101 | 151 | |
102 | 152 | # Enabling this option will cause Whisper to lock each Whisper file it writes |
103 | 153 | # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when |
169 | 219 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
170 | 220 | PICKLE_RECEIVER_PORT = 2014 |
171 | 221 | |
172 | # To use consistent hashing instead of the user defined relay-rules.conf, | |
173 | # change this to: | |
174 | # RELAY_METHOD = consistent-hashing | |
222 | # Set to false to disable logging of successful connections | |
223 | LOG_LISTENER_CONNECTIONS = True | |
224 | ||
225 | # Carbon-relay has several options for metric routing controlled by RELAY_METHOD | |
226 | # | |
227 | # Use relay-rules.conf to route metrics to destinations based on pattern rules | |
228 | #RELAY_METHOD = rules | |
229 | # | |
230 | # Use consistent-hashing for even distribution of metrics between destinations | |
231 | #RELAY_METHOD = consistent-hashing | |
232 | # | |
233 | # Use consistent-hashing but take into account an aggregation-rules.conf shared | |
234 | # by downstream carbon-aggregator daemons. This will ensure that all metrics | |
235 | # that map to a given aggregation rule are sent to the same carbon-aggregator | |
236 | # instance. | |
237 | # Enable this for carbon-relays that send to a group of carbon-aggregators | |
238 | #RELAY_METHOD = aggregated-consistent-hashing | |
175 | 239 | RELAY_METHOD = rules |
176 | 240 | |
177 | # If you use consistent-hashing you may want to add redundancy | |
178 | # of your data by replicating every datapoint to more than | |
179 | # one machine. | |
241 | # If you use consistent-hashing you can add redundancy by replicating every | |
242 | # datapoint to more than one machine. | |
180 | 243 | REPLICATION_FACTOR = 1 |
181 | 244 | |
182 | 245 | # This is a list of carbon daemons we will send any relayed or |
227 | 290 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 |
228 | 291 | PICKLE_RECEIVER_PORT = 2024 |
229 | 292 | |
293 | # Set to false to disable logging of successful connections | |
294 | LOG_LISTENER_CONNECTIONS = True | |
295 | ||
296 | # If set true, metric received will be forwarded to DESTINATIONS in addition to | |
297 | # the output of the aggregation rules. If set false the carbon-aggregator will | |
298 | # only ever send the output of aggregation. | |
299 | FORWARD_ALL = True | |
300 | ||
230 | 301 | # This is a list of carbon daemons we will send any relayed or |
231 | 302 | # generated metrics to. The default provided would send to a single |
232 | 303 | # carbon-cache instance on the default port. However if you |
267 | 338 | # the past MAX_AGGREGATION_INTERVALS * intervalSize seconds. |
268 | 339 | MAX_AGGREGATION_INTERVALS = 5 |
269 | 340 | |
341 | # By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back | |
342 | # aggregated data points once every rule.frequency seconds, on a per-rule basis. | |
343 | # Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points | |
344 | # every N seconds, independent of rule frequency. This is useful, for example, | |
345 | # to be able to query partially aggregated metrics from carbon-cache without | |
346 | # having to first wait rule.frequency seconds. | |
347 | # WRITE_BACK_FREQUENCY = 0 | |
348 | ||
270 | 349 | # Set this to True to enable whitelisting and blacklisting of metrics in |
271 | 350 | # CONF_DIR/whitelist and CONF_DIR/blacklist. If the whitelist is missing or |
272 | 351 | # empty, all metrics will pass through |
0 | #!/bin/bash | |
1 | # chkconfig: - 25 75 | |
2 | # description: carbon-aggregator | |
3 | # processname: carbon-aggregator | |
4 | ||
5 | export PYTHONPATH="$GRAPHITE_DIR/lib:$PYTHONPATH" | |
6 | ||
7 | # Source function library. | |
8 | if [ -e /etc/rc.d/init.d/functions ]; then | |
9 | . /etc/rc.d/init.d/functions; | |
10 | fi; | |
11 | ||
12 | CARBON_DAEMON="aggregator" | |
13 | GRAPHITE_DIR="/opt/graphite" | |
14 | INSTANCES=`grep "^\[${CARBON_DAEMON}" ${GRAPHITE_DIR}/conf/carbon.conf | cut -d \[ -f 2 | cut -d \] -f 1 | cut -d : -f 2` | |
15 | ||
16 | function die { | |
17 | echo $1 | |
18 | exit 1 | |
19 | } | |
20 | ||
21 | start(){ | |
22 | cd $GRAPHITE_DIR; | |
23 | ||
24 | for INSTANCE in ${INSTANCES}; do | |
25 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
26 | INSTANCE="a"; | |
27 | fi; | |
28 | echo "Starting carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
29 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} start; | |
30 | ||
31 | if [ $? -eq 0 ]; then | |
32 | echo_success | |
33 | else | |
34 | echo_failure | |
35 | fi; | |
36 | echo "" | |
37 | done; | |
38 | } | |
39 | ||
40 | stop(){ | |
41 | cd $GRAPHITE_DIR | |
42 | ||
43 | for INSTANCE in ${INSTANCES}; do | |
44 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
45 | INSTANCE="a"; | |
46 | fi; | |
47 | echo "Stopping carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
48 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} stop | |
49 | ||
50 | if [ `sleep 3; /usr/bin/pgrep -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}" | /usr/bin/wc -l` -gt 0 ]; then | |
51 | echo "Carbon did not stop yet. Sleeping longer, then force killing it..."; | |
52 | sleep 20; | |
53 | /usr/bin/pkill -9 -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}"; | |
54 | fi; | |
55 | ||
56 | if [ $? -eq 0 ]; then | |
57 | echo_success | |
58 | else | |
59 | echo_failure | |
60 | fi; | |
61 | echo "" | |
62 | done; | |
63 | } | |
64 | ||
65 | status(){ | |
66 | cd $GRAPHITE_DIR; | |
67 | ||
68 | for INSTANCE in ${INSTANCES}; do | |
69 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
70 | INSTANCE="a"; | |
71 | fi; | |
72 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} status; | |
73 | ||
74 | if [ $? -eq 0 ]; then | |
75 | echo_success | |
76 | else | |
77 | echo_failure | |
78 | fi; | |
79 | echo "" | |
80 | done; | |
81 | } | |
82 | ||
83 | case "$1" in | |
84 | start) | |
85 | start | |
86 | ;; | |
87 | stop) | |
88 | stop | |
89 | ;; | |
90 | status) | |
91 | status | |
92 | ;; | |
93 | restart|reload) | |
94 | stop | |
95 | start | |
96 | ;; | |
97 | *) | |
98 | echo $"Usage: $0 {start|stop|restart|status}" | |
99 | exit 1 | |
100 | esac | |
101 |
0 | #!/bin/bash | |
1 | # chkconfig: - 25 75 | |
2 | # description: carbon-cache | |
3 | # processname: carbon-cache | |
4 | ||
5 | export PYTHONPATH="$GRAPHITE_DIR/lib:$PYTHONPATH" | |
6 | ||
7 | # Source function library. | |
8 | if [ -e /etc/rc.d/init.d/functions ]; then | |
9 | . /etc/rc.d/init.d/functions; | |
10 | fi; | |
11 | ||
12 | CARBON_DAEMON="cache" | |
13 | GRAPHITE_DIR="/opt/graphite" | |
14 | INSTANCES=`grep "^\[${CARBON_DAEMON}" ${GRAPHITE_DIR}/conf/carbon.conf | cut -d \[ -f 2 | cut -d \] -f 1 | cut -d : -f 2` | |
15 | ||
16 | function die { | |
17 | echo $1 | |
18 | exit 1 | |
19 | } | |
20 | ||
21 | start(){ | |
22 | cd $GRAPHITE_DIR; | |
23 | ||
24 | for INSTANCE in ${INSTANCES}; do | |
25 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
26 | INSTANCE="a"; | |
27 | fi; | |
28 | echo "Starting carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
29 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} start; | |
30 | ||
31 | if [ $? -eq 0 ]; then | |
32 | echo_success | |
33 | else | |
34 | echo_failure | |
35 | fi; | |
36 | echo "" | |
37 | done; | |
38 | } | |
39 | ||
40 | stop(){ | |
41 | cd $GRAPHITE_DIR | |
42 | ||
43 | for INSTANCE in ${INSTANCES}; do | |
44 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
45 | INSTANCE="a"; | |
46 | fi; | |
47 | echo "Stopping carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
48 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} stop | |
49 | ||
50 | if [ `sleep 3; /usr/bin/pgrep -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}" | /usr/bin/wc -l` -gt 0 ]; then | |
51 | echo "Carbon did not stop yet. Sleeping longer, then force killing it..."; | |
52 | sleep 20; | |
53 | /usr/bin/pkill -9 -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}"; | |
54 | fi; | |
55 | ||
56 | if [ $? -eq 0 ]; then | |
57 | echo_success | |
58 | else | |
59 | echo_failure | |
60 | fi; | |
61 | echo "" | |
62 | done; | |
63 | } | |
64 | ||
65 | status(){ | |
66 | cd $GRAPHITE_DIR; | |
67 | ||
68 | for INSTANCE in ${INSTANCES}; do | |
69 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
70 | INSTANCE="a"; | |
71 | fi; | |
72 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} status; | |
73 | ||
74 | if [ $? -eq 0 ]; then | |
75 | echo_success | |
76 | else | |
77 | echo_failure | |
78 | fi; | |
79 | echo "" | |
80 | done; | |
81 | } | |
82 | ||
83 | case "$1" in | |
84 | start) | |
85 | start | |
86 | ;; | |
87 | stop) | |
88 | stop | |
89 | ;; | |
90 | status) | |
91 | status | |
92 | ;; | |
93 | restart|reload) | |
94 | stop | |
95 | start | |
96 | ;; | |
97 | *) | |
98 | echo $"Usage: $0 {start|stop|restart|status}" | |
99 | exit 1 | |
100 | esac | |
101 |
0 | #!/bin/bash | |
1 | # chkconfig: - 25 75 | |
2 | # description: carbon-relay | |
3 | # processname: carbon-relay | |
4 | ||
5 | export PYTHONPATH="$GRAPHITE_DIR/lib:$PYTHONPATH" | |
6 | ||
7 | # Source function library. | |
8 | if [ -e /etc/rc.d/init.d/functions ]; then | |
9 | . /etc/rc.d/init.d/functions; | |
10 | fi; | |
11 | ||
12 | CARBON_DAEMON="relay" | |
13 | GRAPHITE_DIR="/opt/graphite" | |
14 | INSTANCES=`grep "^\[${CARBON_DAEMON}" ${GRAPHITE_DIR}/conf/carbon.conf | cut -d \[ -f 2 | cut -d \] -f 1 | cut -d : -f 2` | |
15 | ||
16 | function die { | |
17 | echo $1 | |
18 | exit 1 | |
19 | } | |
20 | ||
21 | start(){ | |
22 | cd $GRAPHITE_DIR; | |
23 | ||
24 | for INSTANCE in ${INSTANCES}; do | |
25 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
26 | INSTANCE="a"; | |
27 | fi; | |
28 | echo "Starting carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
29 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} start; | |
30 | ||
31 | if [ $? -eq 0 ]; then | |
32 | echo_success | |
33 | else | |
34 | echo_failure | |
35 | fi; | |
36 | echo "" | |
37 | done; | |
38 | } | |
39 | ||
40 | stop(){ | |
41 | cd $GRAPHITE_DIR | |
42 | ||
43 | for INSTANCE in ${INSTANCES}; do | |
44 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
45 | INSTANCE="a"; | |
46 | fi; | |
47 | echo "Stopping carbon-${CARBON_DAEMON}:${INSTANCE}..." | |
48 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} stop | |
49 | ||
50 | if [ `sleep 3; /usr/bin/pgrep -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}" | /usr/bin/wc -l` -gt 0 ]; then | |
51 | echo "Carbon did not stop yet. Sleeping longer, then force killing it..."; | |
52 | sleep 20; | |
53 | /usr/bin/pkill -9 -f "carbon-${CARBON_DAEMON}.py --instance=${INSTANCE}"; | |
54 | fi; | |
55 | ||
56 | if [ $? -eq 0 ]; then | |
57 | echo_success | |
58 | else | |
59 | echo_failure | |
60 | fi; | |
61 | echo "" | |
62 | done; | |
63 | } | |
64 | ||
65 | status(){ | |
66 | cd $GRAPHITE_DIR; | |
67 | ||
68 | for INSTANCE in ${INSTANCES}; do | |
69 | if [ "${INSTANCE}" == "${CARBON_DAEMON}" ]; then | |
70 | INSTANCE="a"; | |
71 | fi; | |
72 | bin/carbon-${CARBON_DAEMON}.py --instance=${INSTANCE} status; | |
73 | ||
74 | if [ $? -eq 0 ]; then | |
75 | echo_success | |
76 | else | |
77 | echo_failure | |
78 | fi; | |
79 | echo "" | |
80 | done; | |
81 | } | |
82 | ||
83 | case "$1" in | |
84 | start) | |
85 | start | |
86 | ;; | |
87 | stop) | |
88 | stop | |
89 | ;; | |
90 | status) | |
91 | status | |
92 | ;; | |
93 | restart|reload) | |
94 | stop | |
95 | start | |
96 | ;; | |
97 | *) | |
98 | echo $"Usage: $0 {start|stop|restart|status}" | |
99 | exit 1 | |
100 | esac | |
101 |
50 | 50 | self.aggregation_frequency = int(frequency) |
51 | 51 | self.aggregation_func = func |
52 | 52 | self.compute_task = LoopingCall(self.compute_value) |
53 | self.compute_task.start(frequency, now=False) | |
53 | self.compute_task.start(settings['WRITE_BACK_FREQUENCY'] or frequency, now=False) | |
54 | 54 | self.configured = True |
55 | 55 | |
56 | 56 | def compute_value(self): |
68 | 68 | |
69 | 69 | if buffer.interval < age_threshold: |
70 | 70 | del self.interval_buffers[buffer.interval] |
71 | if not self.interval_buffers: | |
72 | self.close() | |
73 | self.configured = False | |
74 | del BufferManager.buffers[self.metric_path] | |
71 | 75 | |
72 | 76 | def close(self): |
73 | 77 | if self.compute_task and self.compute_task.running: |
0 | 0 | from carbon.instrumentation import increment |
1 | 1 | from carbon.aggregator.rules import RuleManager |
2 | 2 | from carbon.aggregator.buffers import BufferManager |
3 | from carbon.conf import settings | |
3 | 4 | from carbon.rewrite import RewriteRuleManager |
4 | 5 | from carbon import events |
5 | 6 | |
30 | 31 | for rule in RewriteRuleManager.postRules: |
31 | 32 | metric = rule.apply(metric) |
32 | 33 | |
33 | if metric not in aggregate_metrics: | |
34 | if settings['FORWARD_ALL'] and metric not in aggregate_metrics: | |
34 | 35 | events.metricGenerated(metric, datapoint) |
138 | 138 | AGGREGATION_METHODS = { |
139 | 139 | 'sum' : sum, |
140 | 140 | 'avg' : avg, |
141 | 'min' : min, | |
142 | 'max' : max, | |
141 | 143 | } |
142 | 144 | |
143 | 145 | # Importable singleton |
34 | 34 | import socket |
35 | 35 | from optparse import OptionParser |
36 | 36 | |
37 | from twisted.internet.defer import inlineCallbacks | |
37 | from twisted.python.failure import Failure | |
38 | from twisted.internet.defer import deferredGenerator, waitForDeferred | |
38 | 39 | from twisted.internet import reactor |
39 | 40 | from twisted.internet.protocol import ReconnectingClientFactory |
40 | 41 | from txamqp.protocol import AMQClient |
61 | 62 | |
62 | 63 | consumer_tag = "graphite_consumer" |
63 | 64 | |
64 | @inlineCallbacks | |
65 | @deferredGenerator | |
65 | 66 | def connectionMade(self): |
66 | yield AMQClient.connectionMade(self) | |
67 | AMQClient.connectionMade(self) | |
67 | 68 | log.listener("New AMQP connection made") |
68 | yield self.setup() | |
69 | yield self.receive_loop() | |
70 | ||
71 | @inlineCallbacks | |
69 | self.setup() | |
70 | wfd = waitForDeferred(self.receive_loop()) | |
71 | yield wfd | |
72 | ||
73 | @deferredGenerator | |
72 | 74 | def setup(self): |
73 | 75 | exchange = self.factory.exchange_name |
74 | 76 | |
75 | yield self.authenticate(self.factory.username, self.factory.password) | |
76 | chan = yield self.channel(1) | |
77 | yield chan.channel_open() | |
77 | d = self.authenticate(self.factory.username, self.factory.password) | |
78 | wfd = waitForDeferred(d) | |
79 | yield wfd | |
80 | ||
81 | wfd = waitForDeferred(self.channel(1)) | |
82 | yield wfd | |
83 | chan = wfd.getResult() | |
84 | ||
85 | wfd = waitForDeferred(chan.channel_open()) | |
86 | yield wfd | |
78 | 87 | |
79 | 88 | # declare the exchange and queue |
80 | yield chan.exchange_declare(exchange=exchange, type="topic", | |
81 | durable=True, auto_delete=False) | |
89 | d = chan.exchange_declare(exchange=exchange, type="topic", | |
90 | durable=True, auto_delete=False) | |
91 | wfd = waitForDeferred(d) | |
92 | yield wfd | |
82 | 93 | |
83 | 94 | # we use a private queue to avoid conflicting with existing bindings |
84 | reply = yield chan.queue_declare(exclusive=True) | |
95 | wfd = waitForDeferred(chan.queue_declare(exclusive=True)) | |
96 | yield wfd | |
97 | reply = wfd.getResult() | |
85 | 98 | my_queue = reply.queue |
86 | 99 | |
87 | 100 | # bind each configured metric pattern |
88 | 101 | for bind_pattern in settings.BIND_PATTERNS: |
89 | 102 | log.listener("binding exchange '%s' to queue '%s' with pattern %s" \ |
90 | 103 | % (exchange, my_queue, bind_pattern)) |
91 | yield chan.queue_bind(exchange=exchange, queue=my_queue, | |
92 | routing_key=bind_pattern) | |
93 | ||
94 | yield chan.basic_consume(queue=my_queue, no_ack=True, | |
95 | consumer_tag=self.consumer_tag) | |
96 | @inlineCallbacks | |
104 | d = chan.queue_bind(exchange=exchange, queue=my_queue, | |
105 | routing_key=bind_pattern) | |
106 | wfd = waitForDeferred(d) | |
107 | yield wfd | |
108 | ||
109 | d = chan.basic_consume(queue=my_queue, no_ack=True, | |
110 | consumer_tag=self.consumer_tag) | |
111 | wfd = waitForDeferred(d) | |
112 | yield wfd | |
113 | ||
114 | @deferredGenerator | |
97 | 115 | def receive_loop(self): |
98 | queue = yield self.queue(self.consumer_tag) | |
116 | wfd = waitForDeferred(self.queue(self.consumer_tag)) | |
117 | yield wfd | |
118 | queue = wfd.getResult() | |
99 | 119 | |
100 | 120 | while True: |
101 | msg = yield queue.get() | |
121 | wfd = waitForDeferred(queue.get()) | |
122 | yield wfd | |
123 | msg = wfd.getResult() | |
102 | 124 | self.processMessage(msg) |
103 | 125 | |
104 | 126 | def processMessage(self, message): |
119 | 141 | else: |
120 | 142 | value, timestamp = line.split() |
121 | 143 | datapoint = ( float(timestamp), float(value) ) |
144 | if datapoint[1] != datapoint[1]: # filter out NaN values | |
145 | continue | |
122 | 146 | except ValueError: |
123 | 147 | log.listener("invalid message line: %s" % (line,)) |
124 | 148 | continue |
19 | 19 | import time |
20 | 20 | from optparse import OptionParser |
21 | 21 | |
22 | from twisted.internet.defer import inlineCallbacks | |
22 | from twisted.python.failure import Failure | |
23 | from twisted.internet.defer import deferredGenerator, waitForDeferred | |
23 | 24 | from twisted.internet import reactor, task |
24 | 25 | from twisted.internet.protocol import ClientCreator |
25 | 26 | from txamqp.protocol import AMQClient |
27 | 28 | from txamqp.content import Content |
28 | 29 | import txamqp.spec |
29 | 30 | |
30 | ||
31 | @inlineCallbacks | |
31 | @deferredGenerator | |
32 | 32 | def writeMetric(metric_path, value, timestamp, host, port, username, password, |
33 | 33 | vhost, exchange, spec=None, channel_number=1, ssl=False): |
34 | 34 | |
42 | 42 | vhost=vhost, spec=spec) |
43 | 43 | if ssl: |
44 | 44 | from twisted.internet.ssl import ClientContextFactory |
45 | conn = yield connector.connectSSL(host, port, ClientContextFactory()) | |
45 | wfd = waitForDeferred(connector.connectSSL(host, port, | |
46 | ClientContextFactory())) | |
47 | yield wfd | |
48 | conn = wfd.getResult() | |
46 | 49 | else: |
47 | conn = yield connector.connectTCP(host, port) | |
50 | wfd = waitForDeferred(connector.connectTCP(host, port)) | |
51 | yield wfd | |
52 | conn = wfd.getResult() | |
48 | 53 | |
49 | yield conn.authenticate(username, password) | |
50 | channel = yield conn.channel(channel_number) | |
51 | yield channel.channel_open() | |
54 | wfd = waitForDeferred(conn.authenticate(username, password)) | |
55 | yield wfd | |
52 | 56 | |
53 | yield channel.exchange_declare(exchange=exchange, type="topic", | |
54 | durable=True, auto_delete=False) | |
57 | wfd = waitForDeferred(conn.channel(channel_number)) | |
58 | yield wfd | |
59 | channel = wfd.getResult() | |
60 | ||
61 | wfd = waitForDeferred(channel.channel_open()) | |
62 | yield wfd | |
63 | ||
64 | wfd = waitForDeferred(channel.exchange_declare(exchange=exchange, | |
65 | type="topic", | |
66 | durable=True, | |
67 | auto_delete=False)) | |
68 | yield wfd | |
55 | 69 | |
56 | 70 | message = Content( "%f %d" % (value, timestamp) ) |
57 | 71 | message["delivery mode"] = 2 |
58 | 72 | |
59 | channel.basic_publish(exchange=exchange, content=message, routing_key=metric_path) | |
60 | yield channel.channel_close() | |
61 | ||
73 | channel.basic_publish(exchange=exchange, content=message, | |
74 | routing_key=metric_path) | |
75 | wfd = waitForDeferred(channel.channel_close()) | |
76 | yield wfd | |
62 | 77 | |
63 | 78 | def main(): |
64 | 79 | parser = OptionParser(usage="%prog [options] <metric> <value> [timestamp]") |
11 | 11 | See the License for the specific language governing permissions and |
12 | 12 | limitations under the License.""" |
13 | 13 | |
14 | from threading import Lock | |
14 | import time | |
15 | from collections import deque | |
15 | 16 | from carbon.conf import settings |
17 | try: | |
18 | from collections import defaultdict | |
19 | except: | |
20 | from util import defaultdict | |
16 | 21 | |
17 | 22 | |
18 | class MetricCache(dict): | |
19 | def __init__(self): | |
20 | self.size = 0 | |
21 | self.lock = Lock() | |
23 | class _MetricCache(defaultdict): | |
24 | def __init__(self, defaultfactory=deque, method="sorted"): | |
25 | self.method = method | |
26 | if self.method == "sorted": | |
27 | self.queue = self.gen_queue() | |
28 | else: | |
29 | self.queue = False | |
30 | super(_MetricCache, self).__init__(defaultfactory) | |
22 | 31 | |
23 | def __setitem__(self, key, value): | |
24 | raise TypeError("Use store() method instead!") | |
32 | def gen_queue(self): | |
33 | while True: | |
34 | t = time.time() | |
35 | queue = sorted(self.counts, key=lambda x: x[1]) | |
36 | if settings.LOG_CACHE_QUEUE_SORTS: | |
37 | log.debug("Sorted %d cache queues in %.6f seconds" % (len(queue), time.time() - t)) | |
38 | while queue: | |
39 | yield queue.pop()[0] | |
40 | ||
41 | @property | |
42 | def size(self): | |
43 | return reduce(lambda x, y: x + len(y), self.values(), 0) | |
25 | 44 | |
26 | 45 | def store(self, metric, datapoint): |
27 | try: | |
28 | self.lock.acquire() | |
29 | self.setdefault(metric, []).append(datapoint) | |
30 | self.size += 1 | |
31 | finally: | |
32 | self.lock.release() | |
33 | ||
46 | self[metric].append(datapoint) | |
34 | 47 | if self.isFull(): |
35 | 48 | log.msg("MetricCache is full: self.size=%d" % self.size) |
36 | 49 | state.events.cacheFull() |
37 | 50 | |
38 | 51 | def isFull(self): |
39 | return self.size >= settings.MAX_CACHE_SIZE | |
52 | # Short circuit this test if there is no max cache size, then we don't need | |
53 | # to do the someone expensive work of calculating the current size. | |
54 | return settings.MAX_CACHE_SIZE != float('inf') and self.size >= settings.MAX_CACHE_SIZE | |
40 | 55 | |
41 | def pop(self, metric): | |
42 | try: | |
43 | self.lock.acquire() | |
44 | datapoints = dict.pop(self, metric) | |
45 | self.size -= len(datapoints) | |
46 | return datapoints | |
47 | finally: | |
48 | self.lock.release() | |
56 | def pop(self, metric=None): | |
57 | if not self: | |
58 | raise KeyError(metric) | |
59 | elif not metric and self.method == "max": | |
60 | metric = max(self.items(), key=lambda x: len(x[1]))[0] | |
61 | elif not metric and self.method == "naive": | |
62 | return self.popitem() | |
63 | elif not metric and self.method == "sorted": | |
64 | metric = self.queue.next() | |
65 | datapoints = (metric, super(_MetricCache, self).pop(metric)) | |
66 | return datapoints | |
49 | 67 | |
68 | @property | |
50 | 69 | def counts(self): |
51 | try: | |
52 | self.lock.acquire() | |
53 | return [ (metric, len(datapoints)) for (metric, datapoints) in self.items() ] | |
54 | finally: | |
55 | self.lock.release() | |
70 | return [(metric, len(datapoints)) for (metric, datapoints) in self.items()] | |
56 | 71 | |
57 | 72 | |
58 | 73 | # Ghetto singleton |
59 | MetricCache = MetricCache() | |
74 | ||
75 | MetricCache = _MetricCache(method=settings.CACHE_WRITE_STRATEGY) | |
60 | 76 | |
61 | 77 | |
62 | 78 | # Avoid import circularities |
4 | 4 | from twisted.protocols.basic import Int32StringReceiver |
5 | 5 | from carbon.conf import settings |
6 | 6 | from carbon.util import pickle |
7 | from carbon import log, state, events, instrumentation | |
7 | from carbon import log, state, instrumentation | |
8 | 8 | |
9 | 9 | |
10 | 10 | SEND_QUEUE_LOW_WATERMARK = settings.MAX_QUEUE_SIZE * 0.8 |
71 | 71 | if (self.factory.queueFull.called and |
72 | 72 | queueSize < SEND_QUEUE_LOW_WATERMARK): |
73 | 73 | self.factory.queueHasSpace.callback(queueSize) |
74 | ||
75 | if (settings.USE_FLOW_CONTROL and | |
76 | state.metricReceiversPaused): | |
77 | log.clients('%s resuming paused clients' % self) | |
78 | events.resumeReceivingMetrics() | |
79 | 74 | |
80 | 75 | def __str__(self): |
81 | 76 | return 'CarbonClientProtocol(%s:%d:%s)' % (self.factory.destination) |
108 | 103 | self.queuedUntilConnected = 'destinations.%s.queuedUntilConnected' % self.destinationName |
109 | 104 | |
110 | 105 | def queueFullCallback(self, result): |
106 | state.events.cacheFull() | |
111 | 107 | log.clients('%s send queue is full (%d datapoints)' % (self, result)) |
112 | ||
108 | ||
113 | 109 | def queueSpaceCallback(self, result): |
114 | 110 | if self.queueFull.called: |
115 | 111 | log.clients('%s send queue has space available' % self.connectedProtocol) |
116 | 112 | self.queueFull = Deferred() |
117 | 113 | self.queueFull.addCallback(self.queueFullCallback) |
114 | state.events.cacheSpaceAvailable() | |
118 | 115 | self.queueHasSpace = Deferred() |
119 | 116 | self.queueHasSpace.addCallback(self.queueSpaceCallback) |
120 | 117 |
22 | 22 | |
23 | 23 | import whisper |
24 | 24 | from carbon import log |
25 | from carbon.exceptions import CarbonConfigException | |
25 | 26 | |
26 | 27 | from twisted.python import usage |
27 | 28 | |
41 | 42 | CACHE_QUERY_INTERFACE='0.0.0.0', |
42 | 43 | CACHE_QUERY_PORT=7002, |
43 | 44 | LOG_UPDATES=True, |
45 | LOG_CACHE_HITS=True, | |
46 | LOG_CACHE_QUEUE_SORTS=True, | |
44 | 47 | WHISPER_AUTOFLUSH=False, |
45 | 48 | WHISPER_SPARSE_CREATE=False, |
49 | WHISPER_FALLOCATE_CREATE=False, | |
46 | 50 | WHISPER_LOCK_WRITES=False, |
47 | 51 | MAX_DATAPOINTS_PER_MESSAGE=500, |
48 | 52 | MAX_AGGREGATION_INTERVALS=5, |
53 | FORWARD_ALL=False, | |
49 | 54 | MAX_QUEUE_SIZE=1000, |
50 | 55 | ENABLE_AMQP=False, |
51 | 56 | AMQP_VERBOSE=False, |
63 | 68 | USE_WHITELIST=False, |
64 | 69 | CARBON_METRIC_PREFIX='carbon', |
65 | 70 | CARBON_METRIC_INTERVAL=60, |
71 | CACHE_WRITE_STRATEGY='sorted', | |
72 | WRITE_BACK_FREQUENCY=None, | |
73 | ENABLE_LOGROTATION=True, | |
74 | LOG_LISTENER_CONNECTIONS=True, | |
66 | 75 | ) |
67 | 76 | |
68 | ||
69 | def _umask(value): | |
70 | return int(value, 8) | |
71 | 77 | |
72 | 78 | def _process_alive(pid): |
73 | 79 | if exists("/proc"): |
88 | 94 | _ordered_sections = [] |
89 | 95 | |
90 | 96 | def read(self, path): |
97 | # Verifies a file exists *and* is readable | |
98 | if not os.access(path, os.R_OK): | |
99 | raise CarbonConfigException("Error: Missing config file or wrong perms on %s" % path) | |
100 | ||
91 | 101 | result = ConfigParser.read(self, path) |
92 | ||
93 | 102 | sections = [] |
94 | 103 | for line in open(path): |
95 | 104 | line = line.strip() |
96 | 105 | |
97 | 106 | if line.startswith('[') and line.endswith(']'): |
98 | sections.append( line[1:-1] ) | |
107 | sections.append(line[1:-1]) | |
99 | 108 | |
100 | 109 | self._ordered_sections = sections |
101 | 110 | |
102 | 111 | return result |
103 | 112 | |
104 | 113 | def sections(self): |
105 | return list( self._ordered_sections ) # return a copy for safety | |
114 | return list(self._ordered_sections) # return a copy for safety | |
106 | 115 | |
107 | 116 | |
108 | 117 | class Settings(dict): |
115 | 124 | def readFrom(self, path, section): |
116 | 125 | parser = ConfigParser() |
117 | 126 | if not parser.read(path): |
118 | raise Exception("Failed to read config file %s" % path) | |
127 | raise CarbonConfigException("Failed to read config file %s" % path) | |
119 | 128 | |
120 | 129 | if not parser.has_section(section): |
121 | 130 | return |
122 | 131 | |
123 | for key,value in parser.items(section): | |
132 | for key, value in parser.items(section): | |
124 | 133 | key = key.upper() |
125 | 134 | |
126 | 135 | # Detect type from defaults dict |
127 | 136 | if key in defaults: |
128 | valueType = type( defaults[key] ) | |
137 | valueType = type(defaults[key]) | |
129 | 138 | else: |
130 | 139 | valueType = str |
131 | 140 | |
132 | 141 | if valueType is list: |
133 | value = [ v.strip() for v in value.split(',') ] | |
142 | value = [v.strip() for v in value.split(',')] | |
134 | 143 | |
135 | 144 | elif valueType is bool: |
136 | 145 | value = parser.getboolean(section, key) |
156 | 165 | |
157 | 166 | optFlags = [ |
158 | 167 | ["debug", "", "Run in debug mode."], |
159 | ] | |
168 | ] | |
160 | 169 | |
161 | 170 | optParameters = [ |
162 | 171 | ["config", "c", None, "Use the given config file."], |
164 | 173 | ["logdir", "", None, "Write logs to the given directory."], |
165 | 174 | ["whitelist", "", None, "List of metric patterns to allow."], |
166 | 175 | ["blacklist", "", None, "List of metric patterns to disallow."], |
167 | ] | |
176 | ] | |
168 | 177 | |
169 | 178 | def postOptions(self): |
170 | 179 | global settings |
207 | 216 | log.msg("Enabling Whisper autoflush") |
208 | 217 | whisper.AUTOFLUSH = True |
209 | 218 | |
219 | if settings.WHISPER_FALLOCATE_CREATE: | |
220 | if whisper.CAN_FALLOCATE: | |
221 | log.msg("Enabling Whisper fallocate support") | |
222 | else: | |
223 | log.err("WHISPER_FALLOCATE_CREATE is enabled but linking failed.") | |
224 | ||
210 | 225 | if settings.WHISPER_LOCK_WRITES: |
211 | 226 | if whisper.CAN_LOCK: |
212 | 227 | log.msg("Enabling Whisper file locking") |
214 | 229 | else: |
215 | 230 | log.err("WHISPER_LOCK_WRITES is enabled but import of fcntl module failed.") |
216 | 231 | |
232 | if settings.CACHE_WRITE_STRATEGY not in ('sorted', 'max', 'naive'): | |
233 | log.err("%s is not a valid value for CACHE_WRITE_STRATEGY, defaulting to %s" % | |
234 | (settings.CACHE_WRITE_STRATEGY, defaults['CACHE_WRITE_STRATEGY'])) | |
235 | else: | |
236 | log.msg("Using %s write strategy for cache" % | |
237 | settings.CACHE_WRITE_STRATEGY) | |
217 | 238 | if not "action" in self: |
218 | 239 | self["action"] = "start" |
219 | 240 | self.handleAction() |
350 | 371 | |
351 | 372 | optParameters = [ |
352 | 373 | ["rules", "", None, "Use the given relay rules file."], |
374 | ["aggregation-rules", "", None, "Use the given aggregation rules file."], | |
353 | 375 | ] + CarbonCacheOptions.optParameters |
354 | 376 | |
355 | 377 | def postOptions(self): |
358 | 380 | self["rules"] = join(settings["CONF_DIR"], "relay-rules.conf") |
359 | 381 | settings["relay-rules"] = self["rules"] |
360 | 382 | |
361 | if settings["RELAY_METHOD"] not in ("rules", "consistent-hashing"): | |
383 | if self["aggregation-rules"] is None: | |
384 | self["aggregation-rules"] = join(settings["CONF_DIR"], "aggregation-rules.conf") | |
385 | settings["aggregation-rules"] = self["aggregation-rules"] | |
386 | ||
387 | if settings["RELAY_METHOD"] not in ("rules", "consistent-hashing", "aggregated-consistent-hashing"): | |
362 | 388 | print ("In carbon.conf, RELAY_METHOD must be either 'rules' or " |
363 | "'consistent-hashing'. Invalid value: '%s'" % | |
389 | "'consistent-hashing' or 'aggregated-consistent-hashing'. Invalid value: '%s'" % | |
364 | 390 | settings.RELAY_METHOD) |
365 | 391 | sys.exit(1) |
366 | 392 | |
377 | 403 | parser.add_option( |
378 | 404 | "--pidfile", default=None, |
379 | 405 | help="Write pid to the given file") |
406 | parser.add_option( | |
407 | "--umask", default=None, | |
408 | help="Use the given umask when creating files") | |
380 | 409 | parser.add_option( |
381 | 410 | "--config", |
382 | 411 | default=None, |
454 | 483 | if graphite_root is None: |
455 | 484 | graphite_root = os.environ.get('GRAPHITE_ROOT') |
456 | 485 | if graphite_root is None: |
457 | raise ValueError("Either ROOT_DIR or GRAPHITE_ROOT " | |
486 | raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT " | |
458 | 487 | "needs to be provided.") |
459 | 488 | |
460 | 489 | # Default config directory to root-relative, unless overriden by the |
491 | 520 | config = options["config"] |
492 | 521 | |
493 | 522 | if not exists(config): |
494 | raise ValueError("Error: missing required config %r" % config) | |
523 | raise CarbonConfigException("Error: missing required config %r" % config) | |
495 | 524 | |
496 | 525 | settings.readFrom(config, section) |
497 | 526 | settings.setdefault("instance", options["instance"]) |
508 | 537 | (program, options["instance"]))) |
509 | 538 | settings["LOG_DIR"] = (options["logdir"] or |
510 | 539 | join(settings["LOG_DIR"], |
511 | "%s-%s" % (program ,options["instance"]))) | |
540 | "%s-%s" % (program, options["instance"]))) | |
512 | 541 | else: |
513 | 542 | settings["pidfile"] = ( |
514 | 543 | options["pidfile"] or |
0 | class CarbonConfigException(Exception): | |
1 | """Raised when a carbon daemon is improperly configured""" |
2 | 2 | except ImportError: |
3 | 3 | from md5 import md5 |
4 | 4 | import bisect |
5 | from carbon.conf import settings | |
6 | 5 | |
7 | 6 | |
8 | 7 | class ConsistentHashRing: |
32 | 31 | |
33 | 32 | def get_node(self, key): |
34 | 33 | assert self.ring |
35 | position = self.compute_ring_position(key) | |
36 | search_entry = (position, None) | |
37 | index = bisect.bisect_left(self.ring, search_entry) % len(self.ring) | |
38 | entry = self.ring[index] | |
39 | return entry[1] | |
34 | node = None | |
35 | node_iter = self.get_nodes(key) | |
36 | node = node_iter.next() | |
37 | node_iter.close() | |
38 | return node | |
40 | 39 | |
41 | 40 | def get_nodes(self, key): |
42 | nodes = [] | |
41 | assert self.ring | |
42 | nodes = set() | |
43 | 43 | position = self.compute_ring_position(key) |
44 | 44 | search_entry = (position, None) |
45 | 45 | index = bisect.bisect_left(self.ring, search_entry) % len(self.ring) |
48 | 48 | next_entry = self.ring[index] |
49 | 49 | (position, next_node) = next_entry |
50 | 50 | if next_node not in nodes: |
51 | nodes.append(next_node) | |
51 | nodes.add(next_node) | |
52 | yield next_node | |
52 | 53 | |
53 | 54 | index = (index + 1) % len(self.ring) |
54 | ||
55 | return nodes |
77 | 77 | cacheQueries = myStats.get('cacheQueries', 0) |
78 | 78 | cacheOverflow = myStats.get('cache.overflow', 0) |
79 | 79 | |
80 | # Calculate cache-data-structure-derived metrics prior to storing anything | |
81 | # in the cache itself -- which would otherwise affect said metrics. | |
82 | cache_size = cache.MetricCache.size | |
83 | cache_queues = len(cache.MetricCache) | |
84 | record('cache.size', cache_size) | |
85 | record('cache.queues', cache_queues) | |
86 | ||
80 | 87 | if updateTimes: |
81 | 88 | avgUpdateTime = sum(updateTimes) / len(updateTimes) |
82 | 89 | record('avgUpdateTime', avgUpdateTime) |
90 | 97 | record('creates', creates) |
91 | 98 | record('errors', errors) |
92 | 99 | record('cache.queries', cacheQueries) |
93 | record('cache.queues', len(cache.MetricCache)) | |
94 | record('cache.size', cache.MetricCache.size) | |
95 | 100 | record('cache.overflow', cacheOverflow) |
96 | 101 | |
97 | 102 | # aggregator metrics |
105 | 110 | # relay metrics |
106 | 111 | else: |
107 | 112 | record = relay_record |
113 | prefix = 'destinations.' | |
114 | relay_stats = [(k,v) for (k,v) in myStats.items() if k.startswith(prefix)] | |
115 | for stat_name, stat_value in relay_stats: | |
116 | record(stat_name, stat_value) | |
108 | 117 | |
109 | 118 | # common metrics |
110 | 119 | record('metricsReceived', myStats.get('metricsReceived', 0)) |
0 | 0 | import time |
1 | from os.path import exists | |
1 | 2 | from sys import stdout, stderr |
2 | 3 | from zope.interface import implements |
3 | 4 | from twisted.python.log import startLoggingWithObserver, textFromEventDict, msg, err, ILogObserver |
4 | 5 | from twisted.python.syslog import SyslogObserver |
5 | 6 | from twisted.python.logfile import DailyLogFile |
6 | 7 | |
8 | ||
9 | class CarbonLogFile(DailyLogFile): | |
10 | """Overridden to support logrotate.d""" | |
11 | def __init__(self, *args, **kwargs): | |
12 | DailyLogFile.__init__(self, *args, **kwargs) | |
13 | # avoid circular dependencies | |
14 | from carbon.conf import settings | |
15 | self.enableRotation = settings.ENABLE_LOGROTATION | |
16 | ||
17 | def shouldRotate(self): | |
18 | if self.enableRotation: | |
19 | return DailyLogFile.shouldRotate(self) | |
20 | else: | |
21 | return False | |
22 | ||
23 | def write(self, data): | |
24 | if not self.enableRotation: | |
25 | if not exists(self.path): | |
26 | self.reopen() | |
27 | DailyLogFile.write(self, data) | |
28 | ||
29 | # Backport from twisted >= 10 | |
30 | def reopen(self): | |
31 | self.close() | |
32 | self._openFile() | |
33 | ||
34 | ||
7 | 35 | class CarbonLogObserver(object): |
8 | 36 | implements(ILogObserver) |
9 | 37 | |
10 | 38 | def log_to_dir(self, logdir): |
11 | 39 | self.logdir = logdir |
12 | self.console_logfile = DailyLogFile('console.log', logdir) | |
40 | self.console_logfile = CarbonLogFile('console.log', logdir) | |
13 | 41 | self.custom_logs = {} |
14 | 42 | self.observer = self.logdir_observer |
15 | 43 | |
16 | 44 | def log_to_syslog(self, prefix): |
17 | 45 | observer = SyslogObserver(prefix).emit |
46 | ||
18 | 47 | def syslog_observer(event): |
19 | 48 | event["system"] = event.get("type", "console") |
20 | 49 | observer(event) |
24 | 53 | return self.observer(event) |
25 | 54 | |
26 | 55 | def stdout_observer(self, event): |
27 | stdout.write( formatEvent(event, includeType=True) + '\n' ) | |
56 | stdout.write(formatEvent(event, includeType=True) + '\n') | |
28 | 57 | stdout.flush() |
29 | 58 | |
30 | 59 | def logdir_observer(self, event): |
32 | 61 | log_type = event.get('type') |
33 | 62 | |
34 | 63 | if log_type is not None and log_type not in self.custom_logs: |
35 | self.custom_logs[log_type] = DailyLogFile(log_type + '.log', self.logdir) | |
64 | self.custom_logs[log_type] = CarbonLogFile(log_type + '.log', self.logdir) | |
36 | 65 | |
37 | 66 | logfile = self.custom_logs.get(log_type, self.console_logfile) |
38 | 67 | logfile.write(message + '\n') |
40 | 69 | |
41 | 70 | # Default to stdout |
42 | 71 | observer = stdout_observer |
43 | ||
72 | ||
44 | 73 | |
45 | 74 | carbonLogObserver = CarbonLogObserver() |
46 | 75 | |
62 | 91 | |
63 | 92 | logToSyslog = carbonLogObserver.log_to_syslog |
64 | 93 | |
94 | ||
65 | 95 | def logToStdout(): |
66 | 96 | startLoggingWithObserver(carbonLogObserver) |
97 | ||
67 | 98 | |
68 | 99 | def cache(message, **context): |
69 | 100 | context['type'] = 'cache' |
70 | 101 | msg(message, **context) |
71 | 102 | |
103 | ||
72 | 104 | def clients(message, **context): |
73 | 105 | context['type'] = 'clients' |
74 | 106 | msg(message, **context) |
107 | ||
75 | 108 | |
76 | 109 | def creates(message, **context): |
77 | 110 | context['type'] = 'creates' |
78 | 111 | msg(message, **context) |
79 | 112 | |
113 | ||
80 | 114 | def updates(message, **context): |
81 | 115 | context['type'] = 'updates' |
82 | 116 | msg(message, **context) |
117 | ||
83 | 118 | |
84 | 119 | def listener(message, **context): |
85 | 120 | context['type'] = 'listener' |
86 | 121 | msg(message, **context) |
87 | 122 | |
123 | ||
88 | 124 | def relay(message, **context): |
89 | 125 | context['type'] = 'relay' |
90 | 126 | msg(message, **context) |
127 | ||
91 | 128 | |
92 | 129 | def aggregator(message, **context): |
93 | 130 | context['type'] = 'aggregator' |
94 | 131 | msg(message, **context) |
95 | 132 | |
133 | ||
96 | 134 | def query(message, **context): |
97 | 135 | context['type'] = 'query' |
98 | 136 | msg(message, **context) |
137 | ||
99 | 138 | |
100 | 139 | def debug(message, **context): |
101 | 140 | if debugEnabled: |
102 | 141 | msg(message, **context) |
103 | 142 | |
104 | 143 | debugEnabled = False |
144 | ||
145 | ||
105 | 146 | def setDebugEnabled(enabled): |
106 | 147 | global debugEnabled |
107 | 148 | debugEnabled = enabled |
1 | 1 | import whisper |
2 | 2 | from carbon import log |
3 | 3 | from carbon.storage import getFilesystemPath |
4 | ||
5 | 4 | |
6 | 5 | |
7 | 6 | def getMetadata(metric, key): |
0 | from twisted.internet import reactor | |
0 | import time | |
1 | ||
1 | 2 | from twisted.internet.protocol import DatagramProtocol |
2 | 3 | from twisted.internet.error import ConnectionDone |
3 | 4 | from twisted.protocols.basic import LineOnlyReceiver, Int32StringReceiver |
13 | 14 | """ |
14 | 15 | def connectionMade(self): |
15 | 16 | self.peerName = self.getPeerName() |
16 | log.listener("%s connection with %s established" % (self.__class__.__name__, self.peerName)) | |
17 | if settings.LOG_LISTENER_CONNECTIONS: | |
18 | log.listener("%s connection with %s established" % (self.__class__.__name__, self.peerName)) | |
17 | 19 | |
18 | 20 | if state.metricReceiversPaused: |
19 | 21 | self.pauseReceiving() |
37 | 39 | |
38 | 40 | def connectionLost(self, reason): |
39 | 41 | if reason.check(ConnectionDone): |
40 | log.listener("%s connection with %s closed cleanly" % (self.__class__.__name__, self.peerName)) | |
42 | if settings.LOG_LISTENER_CONNECTIONS: | |
43 | log.listener("%s connection with %s closed cleanly" % (self.__class__.__name__, self.peerName)) | |
41 | 44 | else: |
42 | 45 | log.listener("%s connection with %s lost: %s" % (self.__class__.__name__, self.peerName, reason.value)) |
43 | 46 | |
52 | 55 | if WhiteList and metric not in WhiteList: |
53 | 56 | instrumentation.increment('whitelistRejects') |
54 | 57 | return |
55 | if datapoint[1] == datapoint[1]: # filter out NaN values | |
56 | events.metricReceived(metric, datapoint) | |
58 | if datapoint[1] != datapoint[1]: # filter out NaN values | |
59 | return | |
60 | if int(datapoint[0]) == -1: # use current time if none given | |
61 | datapoint = (time.time(), datapoint[1]) | |
62 | ||
63 | events.metricReceived(metric, datapoint) | |
57 | 64 | |
58 | 65 | |
59 | 66 | class MetricLineReceiver(MetricReceiver, LineOnlyReceiver): |
62 | 69 | def lineReceived(self, line): |
63 | 70 | try: |
64 | 71 | metric, value, timestamp = line.strip().split() |
65 | datapoint = ( float(timestamp), float(value) ) | |
72 | datapoint = (float(timestamp), float(value)) | |
66 | 73 | except: |
67 | 74 | log.listener('invalid line received from client %s, ignoring' % self.peerName) |
68 | 75 | return |
75 | 82 | for line in data.splitlines(): |
76 | 83 | try: |
77 | 84 | metric, value, timestamp = line.strip().split() |
78 | datapoint = ( float(timestamp), float(value) ) | |
85 | datapoint = (float(timestamp), float(value)) | |
79 | 86 | |
80 | 87 | self.metricReceived(metric, datapoint) |
81 | 88 | except: |
96 | 103 | log.listener('invalid pickle received from %s, ignoring' % self.peerName) |
97 | 104 | return |
98 | 105 | |
99 | for (metric, datapoint) in datapoints: | |
106 | for raw in datapoints: | |
100 | 107 | try: |
101 | datapoint = ( float(datapoint[0]), float(datapoint[1]) ) #force proper types | |
108 | (metric, (value, timestamp)) = raw | |
109 | except Exception, e: | |
110 | log.listener('Error decoding pickle: %s' % e) | |
111 | try: | |
112 | datapoint = (float(value), float(timestamp)) # force proper types | |
102 | 113 | except: |
103 | 114 | continue |
104 | 115 | |
124 | 135 | metric = request['metric'] |
125 | 136 | datapoints = MetricCache.get(metric, []) |
126 | 137 | result = dict(datapoints=datapoints) |
127 | log.query('[%s] cache query for \"%s\" returned %d values' % (self.peerAddr, metric, len(datapoints))) | |
138 | if settings.LOG_CACHE_HITS is True: | |
139 | log.query('[%s] cache query for \"%s\" returned %d values' % (self.peerAddr, metric, len(datapoints))) | |
128 | 140 | instrumentation.increment('cacheQueries') |
129 | 141 | |
130 | 142 | elif request['type'] == 'get-metadata': |
37 | 37 | new_regex_list = [] |
38 | 38 | for line in open(self.list_file): |
39 | 39 | pattern = line.strip() |
40 | if line.startswith('#') or not line: | |
40 | if line.startswith('#') or not pattern: | |
41 | 41 | continue |
42 | 42 | try: |
43 | 43 | new_regex_list.append(re.compile(pattern)) |
0 | 0 | import re |
1 | 1 | from carbon.conf import OrderedConfigParser |
2 | 2 | from carbon.util import parseDestinations |
3 | from carbon.exceptions import CarbonConfigException | |
3 | 4 | |
4 | 5 | |
5 | 6 | class RelayRule: |
17 | 18 | parser = OrderedConfigParser() |
18 | 19 | |
19 | 20 | if not parser.read(path): |
20 | raise Exception("Could not read rules file %s" % path) | |
21 | raise CarbonConfigException("Could not read rules file %s" % path) | |
21 | 22 | |
22 | 23 | defaultRule = None |
23 | 24 | for section in parser.sections(): |
24 | 25 | if not parser.has_option(section, 'destinations'): |
25 | raise ValueError("Rules file %s section %s does not define a " | |
26 | raise CarbonConfigException("Rules file %s section %s does not define a " | |
26 | 27 | "'destinations' list" % (path, section)) |
27 | 28 | |
28 | 29 | destination_strings = parser.get(section, 'destinations').split(',') |
30 | 31 | |
31 | 32 | if parser.has_option(section, 'pattern'): |
32 | 33 | if parser.has_option(section, 'default'): |
33 | raise Exception("Section %s contains both 'pattern' and " | |
34 | raise CarbonConfigException("Section %s contains both 'pattern' and " | |
34 | 35 | "'default'. You must use one or the other." % section) |
35 | 36 | pattern = parser.get(section, 'pattern') |
36 | 37 | regex = re.compile(pattern, re.I) |
46 | 47 | if not parser.getboolean(section, 'default'): |
47 | 48 | continue # just ignore default = false |
48 | 49 | if defaultRule: |
49 | raise Exception("Only one default rule can be specified") | |
50 | raise CarbonConfigException("Only one default rule can be specified") | |
50 | 51 | defaultRule = RelayRule(condition=lambda metric: True, |
51 | 52 | destinations=destinations) |
52 | 53 | |
53 | 54 | if not defaultRule: |
54 | raise Exception("No default rule defined. You must specify exactly one " | |
55 | raise CarbonConfigException("No default rule defined. You must specify exactly one " | |
55 | 56 | "rule with 'default = true' instead of a pattern.") |
56 | 57 | |
57 | 58 | rules.append(defaultRule) |
42 | 42 | class ConsistentHashingRouter(DatapointRouter): |
43 | 43 | def __init__(self, replication_factor=1): |
44 | 44 | self.replication_factor = int(replication_factor) |
45 | self.instance_ports = {} # { (server, instance) : port } | |
45 | self.instance_ports = {} # { (server, instance) : port } | |
46 | 46 | self.ring = ConsistentHashRing([]) |
47 | 47 | |
48 | 48 | def addDestination(self, destination): |
49 | 49 | (server, port, instance) = destination |
50 | 50 | if (server, instance) in self.instance_ports: |
51 | 51 | raise Exception("destination instance (%s, %s) already configured" % (server, instance)) |
52 | self.instance_ports[ (server, instance) ] = port | |
53 | self.ring.add_node( (server, instance) ) | |
52 | self.instance_ports[(server, instance)] = port | |
53 | self.ring.add_node((server, instance)) | |
54 | 54 | |
55 | 55 | def removeDestination(self, destination): |
56 | 56 | (server, port, instance) = destination |
57 | 57 | if (server, instance) not in self.instance_ports: |
58 | 58 | raise Exception("destination instance (%s, %s) not configured" % (server, instance)) |
59 | del self.instance_ports[ (server, instance) ] | |
60 | self.ring.remove_node( (server, instance) ) | |
59 | del self.instance_ports[(server, instance)] | |
60 | self.ring.remove_node((server, instance)) | |
61 | 61 | |
62 | 62 | def getDestinations(self, metric): |
63 | 63 | key = self.getKey(metric) |
64 | 64 | |
65 | used_servers = set() | |
66 | for (server, instance) in self.ring.get_nodes(key): | |
67 | if server in used_servers: | |
68 | continue | |
69 | else: | |
70 | used_servers.add(server) | |
71 | port = self.instance_ports[ (server, instance) ] | |
72 | yield (server, port, instance) | |
73 | ||
74 | if len(used_servers) >= self.replication_factor: | |
65 | for count,node in enumerate(self.ring.get_nodes(key)): | |
66 | if count == self.replication_factor: | |
75 | 67 | return |
68 | (server, instance) = node | |
69 | port = self.instance_ports[ (server, instance) ] | |
70 | yield (server, port, instance) | |
76 | 71 | |
77 | 72 | def getKey(self, metric): |
78 | 73 | return metric |
87 | 82 | module = imp.load_module('keyfunc_module', module_file, module_path, description) |
88 | 83 | keyfunc = getattr(module, func_name) |
89 | 84 | self.setKeyFunction(keyfunc) |
85 | ||
86 | class AggregatedConsistentHashingRouter(DatapointRouter): | |
87 | def __init__(self, agg_rules_manager, replication_factor=1): | |
88 | self.hash_router = ConsistentHashingRouter(replication_factor) | |
89 | self.agg_rules_manager = agg_rules_manager | |
90 | ||
91 | def addDestination(self, destination): | |
92 | self.hash_router.addDestination(destination) | |
93 | ||
94 | def removeDestination(self, destination): | |
95 | self.hash_router.removeDestination(destination) | |
96 | ||
97 | def getDestinations(self, key): | |
98 | # resolve metric to aggregate forms | |
99 | resolved_metrics = [] | |
100 | for rule in self.agg_rules_manager.rules: | |
101 | aggregate_metric = rule.get_aggregate_metric(key) | |
102 | if aggregate_metric is None: | |
103 | continue | |
104 | else: | |
105 | resolved_metrics.append(aggregate_metric) | |
106 | ||
107 | # if the metric will not be aggregated, send it raw | |
108 | # (will pass through aggregation) | |
109 | if len(resolved_metrics) == 0: | |
110 | resolved_metrics.append(key) | |
111 | ||
112 | # get consistent hashing destinations based on aggregate forms | |
113 | destinations = set() | |
114 | for resolved_metric in resolved_metrics: | |
115 | for destination in self.hash_router.getDestinations(resolved_metric): | |
116 | destinations.add(destination) | |
117 | ||
118 | for destination in destinations: | |
119 | yield destination |
21 | 21 | # Attaching modules to the global state module simplifies import order hassles |
22 | 22 | from carbon import util, state, events, instrumentation |
23 | 23 | from carbon.log import carbonLogObserver |
24 | from carbon.exceptions import CarbonConfigException | |
24 | 25 | state.events = events |
25 | 26 | state.instrumentation = instrumentation |
26 | 27 | |
34 | 35 | parent.setComponent(ILogObserver, carbonLogObserver) |
35 | 36 | |
36 | 37 | |
37 | ||
38 | 38 | def createBaseService(config): |
39 | 39 | from carbon.conf import settings |
40 | 40 | from carbon.protocols import (MetricLineReceiver, MetricPickleReceiver, |
51 | 51 | amqp_port = settings.get("AMQP_PORT", 5672) |
52 | 52 | amqp_user = settings.get("AMQP_USER", "guest") |
53 | 53 | amqp_password = settings.get("AMQP_PASSWORD", "guest") |
54 | amqp_verbose = settings.get("AMQP_VERBOSE", False) | |
55 | amqp_vhost = settings.get("AMQP_VHOST", "/") | |
56 | amqp_spec = settings.get("AMQP_SPEC", None) | |
54 | amqp_verbose = settings.get("AMQP_VERBOSE", False) | |
55 | amqp_vhost = settings.get("AMQP_VHOST", "/") | |
56 | amqp_spec = settings.get("AMQP_SPEC", None) | |
57 | 57 | amqp_exchange_name = settings.get("AMQP_EXCHANGE", "graphite") |
58 | ||
59 | 58 | |
60 | 59 | for interface, port, protocol in ((settings.LINE_RECEIVER_INTERFACE, |
61 | 60 | settings.LINE_RECEIVER_PORT, |
93 | 92 | service.setServiceParent(root_service) |
94 | 93 | |
95 | 94 | if settings.USE_WHITELIST: |
96 | from carbon.regexlist import WhiteList,BlackList | |
95 | from carbon.regexlist import WhiteList, BlackList | |
97 | 96 | WhiteList.read_from(settings["whitelist"]) |
98 | 97 | BlackList.read_from(settings["blacklist"]) |
99 | 98 | |
159 | 158 | RewriteRuleManager.read_from(settings["rewrite-rules"]) |
160 | 159 | |
161 | 160 | if not settings.DESTINATIONS: |
162 | raise Exception("Required setting DESTINATIONS is missing from carbon.conf") | |
161 | raise CarbonConfigException("Required setting DESTINATIONS is missing from carbon.conf") | |
163 | 162 | |
164 | 163 | for destination in util.parseDestinations(settings.DESTINATIONS): |
165 | 164 | client_manager.startClient(destination) |
168 | 167 | |
169 | 168 | |
170 | 169 | def createRelayService(config): |
171 | from carbon.routers import RelayRulesRouter, ConsistentHashingRouter | |
170 | from carbon.routers import RelayRulesRouter, ConsistentHashingRouter, AggregatedConsistentHashingRouter | |
172 | 171 | from carbon.client import CarbonClientManager |
173 | 172 | from carbon.conf import settings |
174 | 173 | from carbon import events |
180 | 179 | router = RelayRulesRouter(settings["relay-rules"]) |
181 | 180 | elif settings.RELAY_METHOD == 'consistent-hashing': |
182 | 181 | router = ConsistentHashingRouter(settings.REPLICATION_FACTOR) |
182 | elif settings.RELAY_METHOD == 'aggregated-consistent-hashing': | |
183 | from carbon.aggregator.rules import RuleManager | |
184 | RuleManager.read_from(settings["aggregation-rules"]) | |
185 | router = AggregatedConsistentHashingRouter(RuleManager, settings.REPLICATION_FACTOR) | |
183 | 186 | |
184 | 187 | client_manager = CarbonClientManager(router) |
185 | 188 | client_manager.setServiceParent(root_service) |
188 | 191 | events.metricGenerated.addHandler(client_manager.sendDatapoint) |
189 | 192 | |
190 | 193 | if not settings.DESTINATIONS: |
191 | raise Exception("Required setting DESTINATIONS is missing from carbon.conf") | |
194 | raise CarbonConfigException("Required setting DESTINATIONS is missing from carbon.conf") | |
192 | 195 | |
193 | 196 | for destination in util.parseDestinations(settings.DESTINATIONS): |
194 | 197 | client_manager.startClient(destination) |
11 | 11 | See the License for the specific language governing permissions and |
12 | 12 | limitations under the License.""" |
13 | 13 | |
14 | import os, re | |
14 | import os | |
15 | import re | |
15 | 16 | import whisper |
16 | 17 | |
17 | from os.path import join, exists | |
18 | from os.path import join, exists, sep | |
18 | 19 | from carbon.conf import OrderedConfigParser, settings |
19 | 20 | from carbon.util import pickle |
20 | 21 | from carbon import log |
21 | ||
22 | from carbon.exceptions import CarbonConfigException | |
22 | 23 | |
23 | 24 | STORAGE_SCHEMAS_CONFIG = join(settings.CONF_DIR, 'storage-schemas.conf') |
24 | 25 | STORAGE_AGGREGATION_CONFIG = join(settings.CONF_DIR, 'storage-aggregation.conf') |
25 | 26 | STORAGE_LISTS_DIR = join(settings.CONF_DIR, 'lists') |
26 | 27 | |
28 | ||
27 | 29 | def getFilesystemPath(metric): |
28 | return join(settings.LOCAL_DATA_DIR, metric.replace('.','/')) + '.wsp' | |
30 | metric_path = metric.replace('.', sep).lstrip(sep) + '.wsp' | |
31 | return join(settings.LOCAL_DATA_DIR, metric_path) | |
29 | 32 | |
30 | 33 | |
31 | 34 | class Schema: |
33 | 36 | raise NotImplementedError() |
34 | 37 | |
35 | 38 | def matches(self, metric): |
36 | return bool( self.test(metric) ) | |
39 | return bool(self.test(metric)) | |
37 | 40 | |
38 | 41 | |
39 | 42 | class DefaultSchema(Schema): |
91 | 94 | |
92 | 95 | class Archive: |
93 | 96 | |
94 | def __init__(self,secondsPerPoint,points): | |
97 | def __init__(self, secondsPerPoint, points): | |
95 | 98 | self.secondsPerPoint = int(secondsPerPoint) |
96 | 99 | self.points = int(points) |
97 | 100 | |
98 | 101 | def __str__(self): |
99 | return "Archive = (Seconds per point: %d, Datapoints to save: %d)" % (self.secondsPerPoint, self.points) | |
102 | return "Archive = (Seconds per point: %d, Datapoints to save: %d)" % (self.secondsPerPoint, self.points) | |
100 | 103 | |
101 | 104 | def getTuple(self): |
102 | return (self.secondsPerPoint,self.points) | |
105 | return (self.secondsPerPoint, self.points) | |
103 | 106 | |
104 | 107 | @staticmethod |
105 | 108 | def fromString(retentionDef): |
113 | 116 | config.read(STORAGE_SCHEMAS_CONFIG) |
114 | 117 | |
115 | 118 | for section in config.sections(): |
116 | options = dict( config.items(section) ) | |
119 | options = dict(config.items(section)) | |
117 | 120 | matchAll = options.get('match-all') |
118 | 121 | pattern = options.get('pattern') |
119 | 122 | listName = options.get('list') |
120 | 123 | |
121 | 124 | retentions = options['retentions'].split(',') |
122 | archives = [ Archive.fromString(s) for s in retentions ] | |
123 | ||
125 | archives = [Archive.fromString(s) for s in retentions] | |
126 | ||
124 | 127 | if matchAll: |
125 | 128 | mySchema = DefaultSchema(section, archives) |
126 | 129 | |
129 | 132 | |
130 | 133 | elif listName: |
131 | 134 | mySchema = ListSchema(section, listName, archives) |
132 | ||
135 | ||
133 | 136 | archiveList = [a.getTuple() for a in archives] |
134 | 137 | |
135 | 138 | try: |
136 | 139 | whisper.validateArchiveList(archiveList) |
137 | 140 | schemaList.append(mySchema) |
138 | except InvalidConfiguration, e: | |
139 | log.msg("Invalid schemas found in %s: %s" % (section, e.message) ) | |
140 | ||
141 | except whisper.InvalidConfiguration, e: | |
142 | log.msg("Invalid schemas found in %s: %s" % (section, e)) | |
143 | ||
141 | 144 | schemaList.append(defaultSchema) |
142 | 145 | return schemaList |
143 | 146 | |
149 | 152 | |
150 | 153 | try: |
151 | 154 | config.read(STORAGE_AGGREGATION_CONFIG) |
152 | except IOError: | |
155 | except (IOError, CarbonConfigException): | |
153 | 156 | log.msg("%s not found, ignoring." % STORAGE_AGGREGATION_CONFIG) |
154 | 157 | |
155 | 158 | for section in config.sections(): |
156 | options = dict( config.items(section) ) | |
159 | options = dict(config.items(section)) | |
157 | 160 | matchAll = options.get('match-all') |
158 | 161 | pattern = options.get('pattern') |
159 | 162 | listName = options.get('list') |
168 | 171 | if aggregationMethod is not None: |
169 | 172 | assert aggregationMethod in whisper.aggregationMethods |
170 | 173 | except: |
171 | log.msg("Invalid schemas found in %s." % section ) | |
174 | log.msg("Invalid schemas found in %s." % section) | |
172 | 175 | continue |
173 | 176 | |
174 | 177 | archives = (xFilesFactor, aggregationMethod) |
187 | 190 | schemaList.append(defaultAggregation) |
188 | 191 | return schemaList |
189 | 192 | |
190 | defaultArchive = Archive(60, 60 * 24 * 7) #default retention for unclassified data (7 days of minutely data) | |
193 | defaultArchive = Archive(60, 60 * 24 * 7) # default retention for unclassified data (7 days of minutely data) | |
191 | 194 | defaultSchema = DefaultSchema('default', [defaultArchive]) |
192 | 195 | defaultAggregation = DefaultSchema('default', (None, None)) |
0 | import sys | |
0 | import copy | |
1 | 1 | import os |
2 | 2 | import pwd |
3 | ||
4 | from os.path import abspath, basename, dirname, join | |
3 | import sys | |
4 | ||
5 | from os.path import abspath, basename, dirname | |
5 | 6 | try: |
6 | 7 | from cStringIO import StringIO |
7 | 8 | except ImportError: |
13 | 14 | import pickle |
14 | 15 | USING_CPICKLE = False |
15 | 16 | |
17 | from time import sleep, time | |
16 | 18 | from twisted.python.util import initgroups |
17 | 19 | from twisted.scripts.twistd import runApp |
18 | 20 | from twisted.scripts._twistd_unix import daemonize |
19 | 21 | |
20 | 22 | |
21 | daemonize = daemonize # Backwards compatibility | |
23 | daemonize = daemonize # Backwards compatibility | |
22 | 24 | |
23 | 25 | |
24 | 26 | def dropprivs(user): |
69 | 71 | twistd_options.append("--profile") |
70 | 72 | if options.pidfile: |
71 | 73 | twistd_options.extend(["--pidfile", options.pidfile]) |
74 | if options.umask: | |
75 | twistd_options.extend(["--umask", options.umask]) | |
72 | 76 | |
73 | 77 | # Now for the plugin-specific options. |
74 | 78 | twistd_options.append(program) |
78 | 82 | |
79 | 83 | for option_name, option_value in vars(options).items(): |
80 | 84 | if (option_value is not None and |
81 | option_name not in ("debug", "profile", "pidfile")): | |
85 | option_name not in ("debug", "profile", "pidfile", "umask")): | |
82 | 86 | twistd_options.extend(["--%s" % option_name.replace("_", "-"), |
83 | 87 | option_value]) |
84 | 88 | |
104 | 108 | else: |
105 | 109 | raise ValueError("Invalid destination string \"%s\"" % dest_string) |
106 | 110 | |
107 | destinations.append( (server, int(port), instance) ) | |
111 | destinations.append((server, int(port), instance)) | |
108 | 112 | |
109 | 113 | return destinations |
110 | ||
111 | 114 | |
112 | 115 | |
113 | 116 | # This whole song & dance is due to pickle being insecure |
118 | 121 | if USING_CPICKLE: |
119 | 122 | class SafeUnpickler(object): |
120 | 123 | PICKLE_SAFE = { |
121 | 'copy_reg' : set(['_reconstructor']), | |
122 | '__builtin__' : set(['object']), | |
124 | 'copy_reg': set(['_reconstructor']), | |
125 | '__builtin__': set(['object']), | |
123 | 126 | } |
124 | 127 | |
125 | 128 | @classmethod |
141 | 144 | else: |
142 | 145 | class SafeUnpickler(pickle.Unpickler): |
143 | 146 | PICKLE_SAFE = { |
144 | 'copy_reg' : set(['_reconstructor']), | |
145 | '__builtin__' : set(['object']), | |
147 | 'copy_reg': set(['_reconstructor']), | |
148 | '__builtin__': set(['object']), | |
146 | 149 | } |
150 | ||
147 | 151 | def find_class(self, module, name): |
148 | 152 | if not module in self.PICKLE_SAFE: |
149 | 153 | raise pickle.UnpicklingError('Attempting to unpickle unsafe module %s' % module) |
152 | 156 | if not name in self.PICKLE_SAFE[module]: |
153 | 157 | raise pickle.UnpicklingError('Attempting to unpickle unsafe class %s' % name) |
154 | 158 | return getattr(mod, name) |
155 | ||
159 | ||
156 | 160 | @classmethod |
157 | 161 | def loads(cls, pickle_string): |
158 | 162 | return cls(StringIO(pickle_string)).load() |
159 | ||
163 | ||
160 | 164 | |
161 | 165 | def get_unpickler(insecure=False): |
162 | 166 | if insecure: |
163 | 167 | return pickle |
164 | 168 | else: |
165 | 169 | return SafeUnpickler |
170 | ||
171 | ||
172 | class TokenBucket(object): | |
173 | '''This is a basic tokenbucket rate limiter implementation for use in | |
174 | enforcing various configurable rate limits''' | |
175 | def __init__(self, capacity, fill_rate): | |
176 | '''Capacity is the total number of tokens the bucket can hold, fill rate is | |
177 | the rate in tokens (or fractional tokens) to be added to the bucket per | |
178 | second.''' | |
179 | self.capacity = float(capacity) | |
180 | self._tokens = float(capacity) | |
181 | self.fill_rate = float(fill_rate) | |
182 | self.timestamp = time() | |
183 | ||
184 | def drain(self, cost, blocking=False): | |
185 | '''Given a number of tokens (or fractions) drain will return True and | |
186 | drain the number of tokens from the bucket if the capacity allows, | |
187 | otherwise we return false and leave the contents of the bucket.''' | |
188 | if cost <= self.tokens: | |
189 | self._tokens -= cost | |
190 | return True | |
191 | else: | |
192 | if blocking: | |
193 | tokens_needed = cost - self._tokens | |
194 | seconds_per_token = 1 / self.fill_rate | |
195 | seconds_left = seconds_per_token * self.fill_rate | |
196 | sleep(self.timestamp + seconds_left - time()) | |
197 | self._tokens -= cost | |
198 | return True | |
199 | return False | |
200 | ||
201 | @property | |
202 | def tokens(self): | |
203 | '''The tokens property will return the current number of tokens in the | |
204 | bucket.''' | |
205 | if self._tokens < self.capacity: | |
206 | now = time() | |
207 | delta = self.fill_rate * (now - self.timestamp) | |
208 | self._tokens = min(self.capacity, self._tokens + delta) | |
209 | self.timestamp = now | |
210 | return self._tokens | |
211 | ||
212 | ||
213 | class defaultdict(dict): | |
214 | def __init__(self, default_factory=None, *a, **kw): | |
215 | if (default_factory is not None and not hasattr(default_factory, '__call__')): | |
216 | raise TypeError('first argument must be callable') | |
217 | dict.__init__(self, *a, **kw) | |
218 | self.default_factory = default_factory | |
219 | ||
220 | def __getitem__(self, key): | |
221 | try: | |
222 | return dict.__getitem__(self, key) | |
223 | except KeyError: | |
224 | return self.__missing__(key) | |
225 | ||
226 | def __missing__(self, key): | |
227 | if self.default_factory is None: | |
228 | raise KeyError(key) | |
229 | self[key] = value = self.default_factory() | |
230 | return value | |
231 | ||
232 | def __reduce__(self): | |
233 | if self.default_factory is None: | |
234 | args = tuple() | |
235 | else: | |
236 | args = self.default_factory, | |
237 | return type(self), args, None, None, self.iteritems() | |
238 | ||
239 | def copy(self): | |
240 | return self.__copy__() | |
241 | ||
242 | def __copy__(self): | |
243 | return type(self)(self.default_factory, self) | |
244 | ||
245 | def __deepcopy__(self, memo): | |
246 | return type(self)(self.default_factory, copy.deepcopy(self.items())) | |
247 | ||
248 | def __repr__(self): | |
249 | return 'defaultdict(%s, %s)' % (self.default_factory, dict.__repr__(self)) |
11 | 11 | See the License for the specific language governing permissions and |
12 | 12 | limitations under the License.""" |
13 | 13 | |
14 | ||
15 | 14 | import os |
16 | 15 | import time |
17 | from os.path import join, exists, dirname, basename | |
16 | from os.path import exists, dirname | |
18 | 17 | |
19 | 18 | import whisper |
20 | 19 | from carbon import state |
21 | 20 | from carbon.cache import MetricCache |
22 | from carbon.storage import getFilesystemPath, loadStorageSchemas, loadAggregationSchemas | |
21 | from carbon.storage import getFilesystemPath, loadStorageSchemas,\ | |
22 | loadAggregationSchemas | |
23 | 23 | from carbon.conf import settings |
24 | 24 | from carbon import log, events, instrumentation |
25 | from carbon.util import TokenBucket | |
25 | 26 | |
26 | 27 | from twisted.internet import reactor |
27 | 28 | from twisted.internet.task import LoopingCall |
28 | 29 | from twisted.application.service import Service |
29 | 30 | |
30 | 31 | |
31 | lastCreateInterval = 0 | |
32 | createCount = 0 | |
33 | schemas = loadStorageSchemas() | |
34 | agg_schemas = loadAggregationSchemas() | |
32 | SCHEMAS = loadStorageSchemas() | |
33 | AGGREGATION_SCHEMAS = loadAggregationSchemas() | |
35 | 34 | CACHE_SIZE_LOW_WATERMARK = settings.MAX_CACHE_SIZE * 0.95 |
36 | 35 | |
37 | 36 | |
37 | # Inititalize token buckets so that we can enforce rate limits on creates and | |
38 | # updates if the config wants them. | |
39 | CREATE_BUCKET = None | |
40 | UPDATE_BUCKET = None | |
41 | if settings.MAX_CREATES_PER_MINUTE != float('inf'): | |
42 | capacity = settings.MAX_CREATES_PER_MINUTE | |
43 | fill_rate = float(settings.MAX_CREATES_PER_MINUTE) / 60 | |
44 | CREATE_BUCKET = TokenBucket(capacity, fill_rate) | |
45 | ||
46 | if settings.MAX_UPDATES_PER_SECOND != float('inf'): | |
47 | capacity = settings.MAX_UPDATES_PER_SECOND | |
48 | fill_rate = settings.MAX_UPDATES_PER_SECOND | |
49 | UPDATE_BUCKET = TokenBucket(capacity, fill_rate) | |
50 | ||
51 | ||
38 | 52 | def optimalWriteOrder(): |
39 | "Generates metrics with the most cached values first and applies a soft rate limit on new metrics" | |
40 | global lastCreateInterval | |
41 | global createCount | |
42 | metrics = MetricCache.counts() | |
43 | ||
44 | t = time.time() | |
45 | metrics.sort(key=lambda item: item[1], reverse=True) # by queue size, descending | |
46 | log.msg("Sorted %d cache queues in %.6f seconds" % (len(metrics), time.time() - t)) | |
47 | ||
48 | for metric, queueSize in metrics: | |
53 | """Generates metrics with the most cached values first and applies a soft | |
54 | rate limit on new metrics""" | |
55 | while MetricCache: | |
56 | (metric, datapoints) = MetricCache.pop() | |
49 | 57 | if state.cacheTooFull and MetricCache.size < CACHE_SIZE_LOW_WATERMARK: |
50 | 58 | events.cacheSpaceAvailable() |
51 | 59 | |
52 | 60 | dbFilePath = getFilesystemPath(metric) |
53 | 61 | dbFileExists = exists(dbFilePath) |
54 | 62 | |
55 | if not dbFileExists: | |
56 | createCount += 1 | |
57 | now = time.time() | |
58 | ||
59 | if now - lastCreateInterval >= 60: | |
60 | lastCreateInterval = now | |
61 | createCount = 1 | |
62 | ||
63 | elif createCount >= settings.MAX_CREATES_PER_MINUTE: | |
64 | # dropping queued up datapoints for new metrics prevents filling up the entire cache | |
65 | # when a bunch of new metrics are received. | |
66 | try: | |
67 | MetricCache.pop(metric) | |
68 | except KeyError: | |
69 | pass | |
70 | ||
71 | continue | |
72 | ||
73 | try: # metrics can momentarily disappear from the MetricCache due to the implementation of MetricCache.store() | |
74 | datapoints = MetricCache.pop(metric) | |
75 | except KeyError: | |
76 | log.msg("MetricCache contention, skipping %s update for now" % metric) | |
77 | continue # we simply move on to the next metric when this race condition occurs | |
63 | if not dbFileExists and CREATE_BUCKET: | |
64 | # If our tokenbucket has enough tokens available to create a new metric | |
65 | # file then yield the metric data to complete that operation. Otherwise | |
66 | # we'll just drop the metric on the ground and move on to the next | |
67 | # metric. | |
68 | # XXX This behavior should probably be configurable to no tdrop metrics | |
69 | # when rate limitng unless our cache is too big or some other legit | |
70 | # reason. | |
71 | if CREATE_BUCKET.drain(1): | |
72 | yield (metric, datapoints, dbFilePath, dbFileExists) | |
73 | continue | |
78 | 74 | |
79 | 75 | yield (metric, datapoints, dbFilePath, dbFileExists) |
80 | 76 | |
81 | 77 | |
82 | 78 | def writeCachedDataPoints(): |
83 | 79 | "Write datapoints until the MetricCache is completely empty" |
84 | updates = 0 | |
85 | lastSecond = 0 | |
86 | 80 | |
87 | 81 | while MetricCache: |
88 | 82 | dataWritten = False |
94 | 88 | archiveConfig = None |
95 | 89 | xFilesFactor, aggregationMethod = None, None |
96 | 90 | |
97 | for schema in schemas: | |
91 | for schema in SCHEMAS: | |
98 | 92 | if schema.matches(metric): |
99 | 93 | log.creates('new metric %s matched schema %s' % (metric, schema.name)) |
100 | 94 | archiveConfig = [archive.getTuple() for archive in schema.archives] |
101 | 95 | break |
102 | 96 | |
103 | for schema in agg_schemas: | |
97 | for schema in AGGREGATION_SCHEMAS: | |
104 | 98 | if schema.matches(metric): |
105 | 99 | log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name)) |
106 | 100 | xFilesFactor, aggregationMethod = schema.archives |
110 | 104 | raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric) |
111 | 105 | |
112 | 106 | dbDir = dirname(dbFilePath) |
113 | os.system("mkdir -p -m 755 '%s'" % dbDir) | |
114 | ||
115 | log.creates("creating database file %s (archive=%s xff=%s agg=%s)" % | |
107 | try: | |
108 | if not exists(dbDir): | |
109 | os.makedirs(dbDir, 0755) | |
110 | except OSError, e: | |
111 | log.err("%s" % e) | |
112 | log.creates("creating database file %s (archive=%s xff=%s agg=%s)" % | |
116 | 113 | (dbFilePath, archiveConfig, xFilesFactor, aggregationMethod)) |
117 | whisper.create(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod, settings.WHISPER_SPARSE_CREATE) | |
118 | os.chmod(dbFilePath, 0755) | |
114 | whisper.create( | |
115 | dbFilePath, | |
116 | archiveConfig, | |
117 | xFilesFactor, | |
118 | aggregationMethod, | |
119 | settings.WHISPER_SPARSE_CREATE, | |
120 | settings.WHISPER_FALLOCATE_CREATE) | |
119 | 121 | instrumentation.increment('creates') |
120 | ||
122 | # If we've got a rate limit configured lets makes sure we enforce it | |
123 | if UPDATE_BUCKET: | |
124 | UPDATE_BUCKET.drain(1, blocking=True) | |
121 | 125 | try: |
122 | 126 | t1 = time.time() |
123 | 127 | whisper.update_many(dbFilePath, datapoints) |
124 | t2 = time.time() | |
125 | updateTime = t2 - t1 | |
128 | updateTime = time.time() - t1 | |
126 | 129 | except: |
127 | 130 | log.msg("Error writing to %s" % (dbFilePath)) |
128 | 131 | log.err() |
131 | 134 | pointCount = len(datapoints) |
132 | 135 | instrumentation.increment('committedPoints', pointCount) |
133 | 136 | instrumentation.append('updateTimes', updateTime) |
134 | ||
135 | 137 | if settings.LOG_UPDATES: |
136 | 138 | log.updates("wrote %d datapoints for %s in %.5f seconds" % (pointCount, metric, updateTime)) |
137 | ||
138 | # Rate limit update operations | |
139 | thisSecond = int(t2) | |
140 | ||
141 | if thisSecond != lastSecond: | |
142 | lastSecond = thisSecond | |
143 | updates = 0 | |
144 | else: | |
145 | updates += 1 | |
146 | if updates >= settings.MAX_UPDATES_PER_SECOND: | |
147 | time.sleep( int(t2 + 1) - t2 ) | |
148 | 139 | |
149 | 140 | # Avoid churning CPU when only new metrics are in the cache |
150 | 141 | if not dataWritten: |
157 | 148 | writeCachedDataPoints() |
158 | 149 | except: |
159 | 150 | log.err() |
160 | ||
161 | time.sleep(1) # The writer thread only sleeps when the cache is empty or an error occurs | |
151 | time.sleep(1) # The writer thread only sleeps when the cache is empty or an error occurs | |
162 | 152 | |
163 | 153 | |
164 | 154 | def reloadStorageSchemas(): |
165 | global schemas | |
155 | global SCHEMAS | |
166 | 156 | try: |
167 | schemas = loadStorageSchemas() | |
157 | SCHEMAS = loadStorageSchemas() | |
168 | 158 | except: |
169 | log.msg("Failed to reload storage schemas") | |
159 | log.msg("Failed to reload storage SCHEMAS") | |
170 | 160 | log.err() |
171 | 161 | |
162 | ||
172 | 163 | def reloadAggregationSchemas(): |
173 | global agg_schemas | |
164 | global AGGREGATION_SCHEMAS | |
174 | 165 | try: |
175 | schemas = loadAggregationSchemas() | |
166 | AGGREGATION_SCHEMAS = loadAggregationSchemas() | |
176 | 167 | except: |
177 | log.msg("Failed to reload aggregation schemas") | |
168 | log.msg("Failed to reload aggregation SCHEMAS") | |
178 | 169 | log.err() |
170 | ||
171 | ||
172 | def shutdownModifyUpdateSpeed(): | |
173 | try: | |
174 | settings.MAX_UPDATES_PER_SECOND = settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN | |
175 | log.msg("Carbon shutting down. Changed the update rate to: " + str(settings.MAX_UPDATES_PER_SECOND_ON_SHUTDOWN)) | |
176 | except KeyError: | |
177 | log.msg("Carbon shutting down. Update rate not changed") | |
179 | 178 | |
180 | 179 | |
181 | 180 | class WriterService(Service): |
187 | 186 | def startService(self): |
188 | 187 | self.storage_reload_task.start(60, False) |
189 | 188 | self.aggregation_reload_task.start(60, False) |
189 | reactor.addSystemEventTrigger('before', 'shutdown', shutdownModifyUpdateSpeed) | |
190 | 190 | reactor.callInThread(writeForever) |
191 | 191 | Service.startService(self) |
192 | 192 |
0 | 0 | #!/usr/bin/env python |
1 | 1 | |
2 | 2 | import os |
3 | import platform | |
3 | 4 | from glob import glob |
4 | 5 | |
5 | 6 | if os.environ.get('USE_SETUPTOOLS'): |
14 | 15 | storage_dirs = [ ('storage/whisper',[]), ('storage/lists',[]), |
15 | 16 | ('storage/log',[]), ('storage/rrd',[]) ] |
16 | 17 | conf_files = [ ('conf', glob('conf/*.example')) ] |
17 | #XXX Need a way to have these work for bdist_rpm but be left alone for everything else | |
18 | #init_scripts = [ ('/etc/init.d', ['distro/redhat/init.d/carbon-cache', | |
19 | # 'distro/redhat/init.d/carbon-relay', | |
20 | # 'distro/redhat/init.d/carbon-aggregator']) ] | |
18 | ||
19 | install_files = storage_dirs + conf_files | |
20 | ||
21 | # If we are building on RedHat, let's use the redhat init scripts. | |
22 | if platform.dist()[0] == 'redhat': | |
23 | init_scripts = [ ('/etc/init.d', ['distro/redhat/init.d/carbon-cache', | |
24 | 'distro/redhat/init.d/carbon-relay', | |
25 | 'distro/redhat/init.d/carbon-aggregator']) ] | |
26 | install_files += init_scripts | |
27 | ||
21 | 28 | |
22 | 29 | setup( |
23 | 30 | name='carbon', |
24 | version='0.9.10', | |
25 | url='https://launchpad.net/graphite', | |
31 | version='0.9.12', | |
32 | url='http://graphite-project.github.com', | |
26 | 33 | author='Chris Davis', |
27 | 34 | author_email='chrismd@gmail.com', |
28 | 35 | license='Apache Software License 2.0', |
31 | 38 | package_dir={'' : 'lib'}, |
32 | 39 | scripts=glob('bin/*'), |
33 | 40 | package_data={ 'carbon' : ['*.xml'] }, |
34 | data_files=storage_dirs + conf_files, # + init_scripts, | |
41 | data_files=install_files, | |
35 | 42 | install_requires=['twisted', 'txamqp'], |
36 | 43 | **setup_kwargs |
37 | 44 | ) |