Import upstream version 5.1.0
Debian Janitor
2 years ago
0 | # We use github actions to test the code on windows and linux amd64. Circleci is used for linux arm64. | |
1 | # | |
2 | version: 2.1 | |
3 | ||
4 | orbs: | |
5 | os-detect: circleci/os-detect@0.2 | |
6 | ||
7 | executors: | |
8 | linux_arm64: | |
9 | description: "arm64" | |
10 | machine: | |
11 | image: ubuntu-2004:202101-01 | |
12 | resource_class: arm.medium | |
13 | ||
14 | commands: | |
15 | install_go_linux: | |
16 | parameters: | |
17 | version: | |
18 | type: string | |
19 | steps: | |
20 | - os-detect/init # this setup the '$SUDO' variable | |
21 | - run: | |
22 | name: "install Golang linux" | |
23 | command: | | |
24 | if command -v go >/dev/null; then | |
25 | if go version | grep -q -F "go<< parameters.version >> "; then | |
26 | echo "Binary already exists, skipping download." | |
27 | exit 0 | |
28 | fi | |
29 | echo "Error different version of Go already installed: '`go version`' when requested was '<< parameters.version >>'" | |
30 | ||
31 | $SUDO rm -rf /usr/local/go | |
32 | $SUDO install "--owner=${USER}" -d /usr/local/go | |
33 | fi | |
34 | ||
35 | echo "Installing the requested version of Go." | |
36 | ||
37 | curl --fail --location -sS "https://dl.google.com/go/go<<parameters.version >>.linux-arm64.tar.gz" \ | |
38 | | sudo tar --no-same-owner --strip-components=1 --gunzip -x -C /usr/local/go/ | |
39 | ||
40 | echo "export PATH=$PATH:/usr/local/go/bin" >> $BASH_ENV | |
41 | $SUDO chown -R "$(whoami):" /usr/local/go | |
42 | ||
43 | go version | |
44 | ||
45 | run_tests: | |
46 | steps: | |
47 | - checkout | |
48 | - run: go vet ./statsd/... | |
49 | - run: go fmt ./statsd/... | |
50 | - run: go test -v ./statsd/... | |
51 | ||
52 | jobs: | |
53 | # Those allow us to have the os name in the job name. 'matrix' don't add static parameters to the name in the circleci | |
54 | # UI. | |
55 | tests_arm64: | |
56 | working_directory: /home/circleci/.go_workspace/src/github.com/DataDog/datadog-go | |
57 | environment: | |
58 | GO111MODULE: auto | |
59 | executor: linux_arm64 | |
60 | parameters: | |
61 | go-version: | |
62 | type: string | |
63 | steps: | |
64 | - install_go_linux: | |
65 | version: << parameters.go-version >> | |
66 | - run_tests | |
67 | ||
68 | workflows: | |
69 | all-tests: | |
70 | jobs: | |
71 | - tests_arm64: | |
72 | matrix: | |
73 | parameters: | |
74 | go-version: ["1.13", "1.14", "1.15", "1.16", "1.17"] |
0 | # See https://help.github.com/articles/about-codeowners/ for syntax | |
1 | # Rules are matched bottom-to-top, so one team can own subdirectories | |
2 | # and another team can own the rest of the directory. | |
3 | ||
4 | ||
5 | # Documentation | |
6 | *.md @DataDog/baklava |
0 | name: "CodeQL" | |
1 | ||
2 | on: | |
3 | push: | |
4 | branches: [ master ] | |
5 | pull_request: | |
6 | # The branches below must be a subset of the branches above | |
7 | branches: [ master ] | |
8 | ||
9 | jobs: | |
10 | analyze: | |
11 | name: Analyze | |
12 | runs-on: ubuntu-latest | |
13 | permissions: | |
14 | actions: read | |
15 | contents: read | |
16 | security-events: write | |
17 | ||
18 | strategy: | |
19 | fail-fast: false | |
20 | matrix: | |
21 | language: [ 'go' ] | |
22 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] | |
23 | # Learn more about CodeQL language support at https://git.io/codeql-language-support | |
24 | ||
25 | steps: | |
26 | - name: Checkout repository | |
27 | uses: actions/checkout@v2 | |
28 | ||
29 | # Initializes the CodeQL tools for scanning. | |
30 | - name: Initialize CodeQL | |
31 | uses: github/codeql-action/init@v1 | |
32 | with: | |
33 | languages: ${{ matrix.language }} | |
34 | # If you wish to specify custom queries, you can do so here or in a config file. | |
35 | # By default, queries listed here will override any specified in a config file. | |
36 | # Prefix the list here with "+" to use these queries and those in the config file. | |
37 | # queries: ./path/to/local/query, your-org/your-repo/queries@main | |
38 | ||
39 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). | |
40 | # If this step fails, then you should remove it and run the build manually | |
41 | - name: Autobuild | |
42 | uses: github/codeql-action/autobuild@v1 | |
43 | ||
44 | - name: Perform CodeQL Analysis | |
45 | uses: github/codeql-action/analyze@v1 |
0 | # We use github actions to test the code on windows and linux amd64. Circleci is used for linux arm64. | |
1 | ||
2 | name: datadog-go | |
3 | on: | |
4 | pull_request: | |
5 | ||
6 | jobs: | |
7 | native: | |
8 | strategy: | |
9 | matrix: | |
10 | go-version: [ 1.17, 1.16, 1.15, 1.14, 1.13] | |
11 | runs-on: [ ubuntu-latest, windows-latest, macos-latest ] | |
12 | fail-fast: false | |
13 | runs-on: ${{ matrix.runs-on }} | |
14 | steps: | |
15 | - name: Setup go | |
16 | uses: actions/setup-go@v2 | |
17 | with: | |
18 | go-version: ${{ matrix.go-version }} | |
19 | - name: Checkout code | |
20 | uses: actions/checkout@v2 | |
21 | - run: go vet ./statsd/... | |
22 | - run: go fmt ./statsd/... | |
23 | - run: go test -race -v ./statsd/... |
0 | name: generate-mock | |
1 | on: | |
2 | pull_request: | |
3 | ||
4 | jobs: | |
5 | native: | |
6 | strategy: | |
7 | matrix: | |
8 | runs-on: [ ubuntu-latest, windows-latest] | |
9 | fail-fast: false | |
10 | runs-on: ${{ matrix.runs-on }} | |
11 | steps: | |
12 | - name: Setup go | |
13 | uses: actions/setup-go@v2 | |
14 | with: | |
15 | go-version: 1.16 | |
16 | - name: Checkout code | |
17 | uses: actions/checkout@v2 | |
18 | - run: go install github.com/golang/mock/mockgen | |
19 | - run: go generate statsd/statsd.go | |
20 | - run: git diff --exit-code HEAD |
0 | language: go | |
1 | ||
2 | go: | |
3 | - 1.5 | |
4 | - 1.6 | |
5 | - 1.7 | |
6 | - 1.8 | |
7 | - 1.9 | |
8 | ||
9 | script: | |
10 | - go test -race -v ./... |
0 | 0 | ## Changes |
1 | ||
2 | [//]: # (comment: Don't forget to update statsd/telemetry.go:clientVersionTelemetryTag when releasing a new version) | |
3 | ||
4 | # 5.1.0 / 2022-03-02 | |
5 | ||
6 | * [FEATURE] Adding support for container origin detection. See [#250][]. | |
7 | * [FEATURE] Adding `IsClosed` method the client. See [#254][], thanks [@lucassscaravelli][]. | |
8 | * [FEATURE] Adding a mock for the `Client` interface to ease testing from users. See [#255][]. | |
9 | * [IMPROVEMENT] Optimize `getContext` and `getContextAndTags` functions. See [#253][], thanks [@martin-sucha][]. | |
10 | * [IMPROVEMENT] Export error `MessageTooLongError` to catch error when sending message that can't fit in a buffer. See | |
11 | [#252][]. | |
12 | * [BUGFIX] Add missing `GetTelemetry` from the `Client` Interface. See [#255][]. | |
13 | ||
14 | # 5.0.2 / 2021-11-29 | |
15 | ||
16 | * [BUGFIX] Fix Windows erroneous import. See [#242][], thanks [@programmer04][]. | |
17 | ||
18 | # 5.0.1 / 2021-10-18 | |
19 | ||
20 | * [BUGFIX] Fix Event.Check method: text is no longer required. See [#235][]. | |
21 | ||
22 | # 5.0.0 / 2021-10-01 | |
23 | ||
24 | ## Breaking changes | |
25 | ||
26 | Many field/methods have been removed from the public API of the client to allow for the client internals to evolve | |
27 | more easily in the future without breaking the public API of the client. | |
28 | ||
29 | - New import path for the v5 is `github.com/DataDog/datadog-go/v5/statsd` | |
30 | - The project now use go.mod file for its dependencies. | |
31 | - `WithDevMode` option has been removed. The extended telemetry enabled by `WithDevMode` is now part of the default | |
32 | telemetry. | |
33 | - `WithWriteTimeoutUDS` option has been renamed `WithWriteTimeout` since it also impact named pipe transport. | |
34 | - `SetWriteTimeout` method has been removed in favor of `WithWriteTimeout` option. | |
35 | - The following internal fields and methods have been removed from the public API: | |
36 | + `WriterNameUDP` | |
37 | + `WriterNameUDS` | |
38 | + `WriterWindowsPipe` | |
39 | + `TelemetryInterval` | |
40 | - Field `Client.Namespace` is now private, please use the `WithNamespace` option. | |
41 | - Field `Client.Tags` is now private, please use the `WithTags` option. | |
42 | - Method `NewBuffered` has been removed in favor of the `WithMaxMessagesPerPayload()` option. | |
43 | Instead of `statsd.NewBuffered(add, bufferLength)` please use `statsd.New(addr, statsd.WithMaxMessagesPerPayload(bufferLength))` | |
44 | - `Encode` method for `Event` and `ServiceCheck` have been removed. | |
45 | - The `Check` method for `Event` and `ServiceCheck` now uses pointer receivers. | |
46 | - All `Options` internals are no longer part of the public API. Only the part needed by the client app is left in the | |
47 | public API. This also improves/clarifies the `Options` documentation and usage. | |
48 | - `statsdWriter` have been removed from the API, `io.WriteCloser` can now be used instead. | |
49 | - `SenderMetrics` and `ClientMetrics` structs as well as `FlushTelemetryMetrics` method have been removed from the | |
50 | public API in favor of the `Telemetry` struct and the `GetTelemetry` method. The client telemetry is now cummulative | |
51 | since the start of the client instead of being reset after being sent to the Agent. See `Telemetry` struct | |
52 | documentation for more information on what each field represents. This allows client apps to take action based on | |
53 | the telemetry (ex: adapting sampling rate based on the number of packets dropped). The telemetry sent to the agent | |
54 | hasn't changed so the same dashboard can be used for V4 and V5 apps. | |
55 | - Client side aggregation for Counts, Gauges and Sets is enabled by default. See `WithoutClientSideAggregation()` option | |
56 | to disable it. | |
57 | - `WithBufferShardCount` option has been renamed `WithWorkersCount`. | |
58 | ||
59 | ## Notes | |
60 | ||
61 | - [FEATURE] Adding public method `GetTelemetry` to retrieve the client internal telemetry since the start of the client. | |
62 | - [FEATURE] Client side aggregation for Counts, Gauges and Sets is enabled by default. | |
63 | `WithExtendedClientSideAggregation()` for Timings, Histograms and Distributions is still disabled by default. Both | |
64 | features are no longer considered BETA. | |
65 | ||
66 | # 4.8.3 / 2021-10-27 | |
67 | ||
68 | * [BUGFIX] Fix `Event.Check` method: text is no longer required. See [#237][]. | |
69 | ||
70 | # 4.8.2 / 2021-09-06 | |
71 | ||
72 | * [BETA][BUGFIX] Fix race condition in aggregation where two sample could overwrite each other when sampled for the first time. See [#225][] | |
73 | ||
74 | # 4.8.1 / 2021-07-09 | |
75 | ||
76 | * [BUGFIX] Prevent telemetry from using the client global namespace. See [#205][] | |
77 | * [BETA][BUGFIX] Fix timings having a different precision with and without extended aggregation. See [#204][] | |
78 | ||
79 | # 4.8.0 / 2021-06-14 | |
80 | ||
81 | * [BETA][IMPROVEMENT] Reduce aggregation default window to 2s to reduce sampling aliasing. See [#199][] | |
82 | * [IMPROVEMENT] Automatically add a "\n" after each metric so the agent can determine if a metric is truncated. Per source EOL detection was made available in agent 7.28 with the `dogstatsd_eol_required` setting. See [#198][] | |
83 | ||
84 | # 4.7.0 / 2021-05-05 | |
85 | ||
86 | * [BETA] Increase the number of workers in the aggregator when using channelMode with extended aggregation to have | |
87 | similar performance than channelMode without aggregation. See [#195][]. | |
88 | ||
89 | # 4.6.1 / 2021-04-30 | |
90 | ||
91 | * [BETA BUGFIX] Fix telemetry with extended aggregation and channelMode. See [#194][]. | |
92 | ||
93 | # 4.6.0 / 2021-04-16 | |
94 | ||
95 | * [BETA] Support sample rate and channel mode for extended aggregation (ie: histograms, distributions and timings). See [#187][]. | |
96 | ||
97 | # 4.5.1 / 2021-03-31 | |
98 | ||
99 | * [BUGFIX] Fix support of UDS and named pipe for DD_AGENT_HOST environment variable. See [#192][]. | |
100 | ||
101 | # 4.5.0 / 2021-03-15 | |
102 | ||
103 | * [IMPROVEMENT] Increase UDS default timeout from 1ms to 100ms. See [#186][]. | |
104 | * [IMPROVEMENT] Defer connection establishment to first write for Windows Named Pipe. See [#190][]. | |
105 | ||
106 | # 4.4.0 / 2021-02-10 | |
107 | ||
108 | * [BETA BUGFIX] Fix multi-metric aggregation when packing different metrics in the same packet. See [#181][]. | |
109 | * [FEATURE] Add support for Windows Named Pipes (Windows only). See [#182][] and [#185][]. | |
110 | ||
111 | # 4.3.1 / 2021-01-28 | |
112 | ||
113 | * [BUGFIX] Fix race condition when using sample rate (introduce in 4.3.0). See [#179][]. | |
114 | ||
115 | # 4.3.0 / 2021-01-20 | |
116 | ||
117 | * [BETA] Adding client side aggregation for distributions, histograms and timings. See [#176][]. | |
118 | * [IMPROVEMENT] Use a worker-specific random source to remove lock contention. See [#178][]. Thanks to [@matthewdale][]. | |
119 | * [IMPROVEMENT] Update devMode telemetry naming and taging to ease graphing in Datadog. See [#175][]. | |
120 | ||
121 | # 4.2.0 / 2020-11-02 | |
122 | ||
123 | * [UDS] Use better payload size defaults for UDS connections. See [#171][]. | |
124 | ||
125 | # 4.1.0 / 2020-10-23 | |
126 | ||
127 | [BETA BUGFIX] Ignore sampling rate when client side aggregation is enabled (for Gauge, Count and Set). See [#170][]. | |
128 | [FEATURE] Adding a new option `WithDevMode()`, to send more telemetry metrics to ease troubleshooting issues. See [#169][]. | |
129 | ||
130 | ||
131 | # 4.0.1 / 2020-10-07 | |
132 | ||
133 | ### Notes | |
134 | ||
135 | * [BUGFIX] Fix incomplete manual flush of the sender when the client isn't stopped. See [#163][]. | |
136 | ||
137 | # 4.0.0 / 2020-08-21 | |
138 | ||
139 | ### Notes | |
140 | ||
141 | * [FEATURE] Add new option `WithTelemetryAddr`, to send the telemetry data to a different endpoint. See [#157][]. | |
142 | * [BUGFIX] Fix race condition in the flush mechanism of the aggregator. See [#166][]. Thanks to [@cyx][]. | |
143 | ||
144 | ### Breaking changes | |
145 | ||
146 | - Dropping support for EOL versions of Golang 1.11 and lower. | |
147 | ||
148 | # 3.7.2 / 2020-06-16 | |
149 | ||
150 | ### Notes | |
151 | ||
152 | * [BUGFIX] Fix panic on 32bits and ARM when using the telemetry. See [#156][]. | |
153 | * [BETA BUGFIX] Fix typo in method name to configure the aggregation window interval. See [#154][]. | |
154 | ||
155 | # 3.7.1 / 2020-05-01 | |
156 | ||
157 | ### Notes | |
158 | ||
159 | * [BUGFIX] Fix panic when calling CloneWithExtraOptions with a nil client. See [#148][]. | |
160 | ||
161 | # 3.7.0 / 2020-04-29 | |
162 | ||
163 | ### Notes | |
164 | ||
165 | * [FEATURE] Add new function to clone a Client, so library can inherit and extend options from the main application. See [#147][]. | |
166 | * [IMPROVEMENT] Auto append a '.' when needed to namespace. See [#145][]. Thanks to [@kamatama41][]. | |
167 | * [IMPROVEMENT] Add the client global tags to the telemetry tags. See [#143][]. Thanks to [@chrisleavoy][]. | |
168 | ||
169 | # 3.6.0 / 2020-04-21 | |
170 | ||
171 | ### Notes | |
172 | ||
173 | * [IMPROVEMENT] Reduce lock contention by sharding worker by metric name. See [#108][]. | |
174 | * [FEATURE] Adding a "channel mode" to send metrics to the client, disable by default. See [#134][]. | |
175 | * [BUGFIX] Fix metrics not being flushed when the client is closed. See [#144][]. | |
176 | * [BETA] Adding client side aggregation for Gauge, Count and Set. See [#139][]. | |
177 | ||
178 | # 3.5.0 / 2020-03-17 | |
179 | ||
180 | ### Notes | |
181 | ||
182 | * [IMPROVEMENT] Add support for `DD_ENV`, `DD_SERVICE`, and `DD_VERSION` to set global tags for `env`, `service` and `version`/ See [#137][] | |
183 | ||
184 | # 3.4.1 / 2020-03-10 | |
185 | ||
186 | ### Notes | |
187 | ||
188 | * [BUGFIX] Fix possible deadlock when closing the client. See [#135][]. Thanks to [@danp60][]. | |
189 | ||
190 | # 3.4.0 / 2020-01-15 | |
191 | ||
192 | ### Notes | |
193 | ||
194 | * [IMPROVEMENT] Improve tags for the telemetry. See [#118][]. | |
195 | * [IMPROVEMENT] Add option to disable the telemetry. See [#117][]. | |
196 | * [IMPROVEMENT] Add metrics, event and service check count to the telemetry. See [#118][]. | |
197 | ||
198 | # 3.3.1 / 2019-12-13 | |
199 | ||
200 | ### Notes | |
201 | ||
202 | * [BUGFIX] Fix Unix domain socket path extraction. See [#113][]. | |
203 | * [BUGFIX] Fix an issue with custom writers leading to metric drops. See [#106][]. | |
204 | * [BUGFIX] Fix an error check in uds.Write leading to unneeded re-connections. See [#115][]. | |
205 | ||
206 | # 3.3.0 / 2019-12-02 | |
207 | ||
208 | ### Notes | |
209 | ||
210 | * [BUGFIX] Close the stop channel when closing a statsd client to avoid leaking. See [#107][]. | |
211 | ||
212 | # 3.2.0 / 2019-10-28 | |
213 | ||
214 | ### Notes | |
215 | ||
216 | * [IMPROVEMENT] Add all `Client` public methods to the `ClientInterface` and `NoOpClient`. See [#100][]. Thanks [@skaji][]. | |
217 | ||
218 | # 3.1.0 / 2019-10-24 | |
219 | ||
220 | ### Notes | |
221 | ||
222 | * [FEATURE] Add a noop client. See [#92][]. Thanks [@goodspark][]. | |
223 | ||
224 | # 3.0.0 / 2019-10-18 | |
225 | ||
226 | ### Notes | |
227 | ||
228 | * [FEATURE] Add a way to configure the maximum size of a single payload (was always 1432, the optimal size for local UDP). See [#91][]. | |
229 | * [IMPROVEMENT] Various performance improvements. See [#91][]. | |
230 | * [OTHER] The client now pre-allocates 4MB of memory to queue up metrics. This can be controlled using the [WithBufferPoolSize](https://godoc.org/github.com/DataDog/datadog-go/statsd#WithBufferPoolSize) option. | |
231 | ||
232 | ### Breaking changes | |
233 | ||
234 | - Sending a metric over UDS won't return an error if we fail to forward the datagram to the agent. We took this decision for two main reasons: | |
235 | - This made the UDS client blocking by default which is not desirable | |
236 | - This design was flawed if you used a buffer as only the call that actually sent the buffer would return an error | |
237 | - The `Buffered` option has been removed as the client can only be buffered. If for some reason you need to have only one dogstatsd message per payload you can still use the `WithMaxMessagesPerPayload` option set to 1. | |
238 | - The `AsyncUDS` option has been removed as the networking layer is now running in a separate Goroutine. | |
239 | ||
240 | # 2.3.0 / 2019-10-15 | |
241 | ||
242 | ### Notes | |
243 | ||
244 | * [IMPROVEMENT] Use an error constant for "nil client" errors. See [#90][]. Thanks [@asf-stripe][]. | |
245 | ||
246 | # 2.2.0 / 2019-04-11 | |
247 | ||
248 | ### Notes | |
249 | ||
250 | * [FEATURE] UDS: non-blocking implementation. See [#81][]. | |
251 | * [FEATURE] Support configuration from standard environment variables. See [#78][]. | |
252 | * [FEATURE] Configuration at client creation. See [#82][]. | |
253 | * [IMPROVEMENT] UDS: change Mutex to RWMutex for fast already-connected path. See [#84][]. Thanks [@KJTsanaktsidis][]. | |
254 | * [IMPROVEMENT] Return error when using on nil client. See [#65][]. Thanks [@Aceeri][]. | |
255 | * [IMPROVEMENT] Reduce `Client.format` allocations. See [#53][]. Thanks [@vcabbage][]. | |
256 | * [BUGFIX] UDS: add lock to writer for concurrency safety. See [#62][]. | |
257 | * [DOCUMENTATION] Document new options, non-blocking client, etc. See [#85][]. | |
258 | * [TESTING] Adding go 1.10 and go 1.11 to CI. See [#75][]. Thanks [@thedevsaddam][]. | |
1 | 259 | |
2 | 260 | # 2.1.0 / 2018-03-30 |
3 | 261 | |
71 | 329 | [#46]: https://github.com/DataDog/datadog-go/issues/46 |
72 | 330 | [#47]: https://github.com/DataDog/datadog-go/issues/47 |
73 | 331 | [#52]: https://github.com/DataDog/datadog-go/issues/52 |
332 | [#53]: https://github.com/DataDog/datadog-go/issues/53 | |
333 | [#62]: https://github.com/DataDog/datadog-go/issues/62 | |
334 | [#65]: https://github.com/DataDog/datadog-go/issues/65 | |
335 | [#75]: https://github.com/DataDog/datadog-go/issues/75 | |
336 | [#78]: https://github.com/DataDog/datadog-go/issues/78 | |
337 | [#81]: https://github.com/DataDog/datadog-go/issues/81 | |
338 | [#82]: https://github.com/DataDog/datadog-go/issues/82 | |
339 | [#84]: https://github.com/DataDog/datadog-go/issues/84 | |
340 | [#85]: https://github.com/DataDog/datadog-go/issues/85 | |
341 | [#90]: https://github.com/DataDog/datadog-go/issues/90 | |
342 | [#91]: https://github.com/DataDog/datadog-go/issues/91 | |
343 | [#92]: https://github.com/DataDog/datadog-go/issues/92 | |
344 | [#100]: https://github.com/DataDog/datadog-go/issues/100 | |
345 | [#106]: https://github.com/DataDog/datadog-go/issues/106 | |
346 | [#107]: https://github.com/DataDog/datadog-go/issues/107 | |
347 | [#113]: https://github.com/DataDog/datadog-go/issues/113 | |
348 | [#117]: https://github.com/DataDog/datadog-go/issues/117 | |
349 | [#118]: https://github.com/DataDog/datadog-go/issues/118 | |
350 | [#115]: https://github.com/DataDog/datadog-go/issues/115 | |
351 | [#135]: https://github.com/DataDog/datadog-go/issues/135 | |
352 | [#137]: https://github.com/DataDog/datadog-go/issues/137 | |
353 | [#108]: https://github.com/DataDog/datadog-go/pull/108 | |
354 | [#134]: https://github.com/DataDog/datadog-go/pull/134 | |
355 | [#139]: https://github.com/DataDog/datadog-go/pull/139 | |
356 | [#143]: https://github.com/DataDog/datadog-go/pull/143 | |
357 | [#144]: https://github.com/DataDog/datadog-go/pull/144 | |
358 | [#145]: https://github.com/DataDog/datadog-go/pull/145 | |
359 | [#147]: https://github.com/DataDog/datadog-go/pull/147 | |
360 | [#148]: https://github.com/DataDog/datadog-go/pull/148 | |
361 | [#154]: https://github.com/DataDog/datadog-go/pull/154 | |
362 | [#156]: https://github.com/DataDog/datadog-go/pull/156 | |
363 | [#157]: https://github.com/DataDog/datadog-go/pull/157 | |
364 | [#163]: https://github.com/DataDog/datadog-go/pull/163 | |
365 | [#169]: https://github.com/DataDog/datadog-go/pull/169 | |
366 | [#170]: https://github.com/DataDog/datadog-go/pull/170 | |
367 | [#171]: https://github.com/DataDog/datadog-go/pull/171 | |
368 | [#175]: https://github.com/DataDog/datadog-go/pull/175 | |
369 | [#176]: https://github.com/DataDog/datadog-go/pull/176 | |
370 | [#178]: https://github.com/DataDog/datadog-go/pull/178 | |
371 | [#179]: https://github.com/DataDog/datadog-go/pull/179 | |
372 | [#181]: https://github.com/DataDog/datadog-go/pull/181 | |
373 | [#182]: https://github.com/DataDog/datadog-go/pull/182 | |
374 | [#185]: https://github.com/DataDog/datadog-go/pull/185 | |
375 | [#186]: https://github.com/DataDog/datadog-go/pull/186 | |
376 | [#187]: https://github.com/DataDog/datadog-go/pull/187 | |
377 | [#190]: https://github.com/DataDog/datadog-go/pull/190 | |
378 | [#192]: https://github.com/DataDog/datadog-go/pull/192 | |
379 | [#194]: https://github.com/DataDog/datadog-go/pull/194 | |
380 | [#195]: https://github.com/DataDog/datadog-go/pull/195 | |
381 | [#198]: https://github.com/DataDog/datadog-go/pull/198 | |
382 | [#199]: https://github.com/DataDog/datadog-go/pull/199 | |
383 | [#204]: https://github.com/DataDog/datadog-go/pull/204 | |
384 | [#205]: https://github.com/DataDog/datadog-go/pull/205 | |
385 | [#225]: https://github.com/DataDog/datadog-go/pull/225 | |
386 | [#235]: https://github.com/DataDog/datadog-go/pull/235 | |
387 | [#237]: https://github.com/DataDog/datadog-go/pull/237 | |
388 | [#242]: https://github.com/DataDog/datadog-go/pull/242 | |
389 | [#250]: https://github.com/DataDog/datadog-go/pull/250 | |
390 | [#252]: https://github.com/DataDog/datadog-go/pull/252 | |
391 | [#253]: https://github.com/DataDog/datadog-go/pull/253 | |
392 | [#254]: https://github.com/DataDog/datadog-go/pull/254 | |
393 | [#255]: https://github.com/DataDog/datadog-go/pull/255 | |
394 | [@Aceeri]: https://github.com/Aceeri | |
74 | 395 | [@Jasrags]: https://github.com/Jasrags |
396 | [@KJTsanaktsidis]: https://github.com/KJTsanaktsidis | |
75 | 397 | [@abtris]: https://github.com/abtris |
76 | 398 | [@aviau]: https://github.com/aviau |
77 | 399 | [@colega]: https://github.com/colega |
87 | 409 | [@sschepens]: https://github.com/sschepens |
88 | 410 | [@tariq1890]: https://github.com/tariq1890 |
89 | 411 | [@theckman]: https://github.com/theckman |
412 | [@thedevsaddam]: https://github.com/thedevsaddam | |
90 | 413 | [@thomas91310]: https://github.com/thomas91310 |
91 | 414 | [@tummychow]: https://github.com/tummychow |
415 | [@vcabbage]: https://github.com/vcabbage | |
92 | 416 | [@victortrac]: https://github.com/victortrac |
93 | [@w-vi]: https://github.com/w-vi⏎ | |
417 | [@w-vi]: https://github.com/w-vi | |
418 | [@asf-stripe]: https://github.com/asf-stripe | |
419 | [@goodspark]: https://github.com/goodspark | |
420 | [@skaji]: https://github.com/skaji | |
421 | [@danp60]: https://github.com/danp60 | |
422 | [@kamatama41]: https://github.com/kamatama41 | |
423 | [@chrisleavoy]: https://github.com/chrisleavoy | |
424 | [@cyx]: https://github.com/cyx | |
425 | [@matthewdale]: https://github.com/matthewdale | |
426 | [@programmer04]: https://github.com/programmer04 | |
427 | [@martin-sucha]: https://github.com/martin-sucha | |
428 | [@lucassscaravelli]: https://github.com/lucassscaravelli |
0 | [![Build Status](https://travis-ci.org/DataDog/datadog-go.svg?branch=master)](https://travis-ci.org/DataDog/datadog-go) | |
1 | # Overview | |
2 | ||
3 | Packages in `datadog-go` provide Go clients for various APIs at [DataDog](http://datadoghq.com). | |
4 | ||
5 | ## Statsd | |
6 | ||
7 | [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/DataDog/datadog-go/statsd) | |
0 | [![Build Status](https://circleci.com/gh/DataDog/datadog-go.svg?style=svg)](https://app.circleci.com/pipelines/github/DataDog/datadog-go) | |
1 | ||
2 | # Datadog Go | |
3 | ||
4 | [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/DataDog/datadog-go/v5/statsd) | |
8 | 5 | [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](http://opensource.org/licenses/MIT) |
9 | 6 | |
10 | The [statsd](https://github.com/DataDog/datadog-go/tree/master/statsd) package provides a client for | |
11 | [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/): | |
7 | `datadog-go` is a library that provides a [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go) client in Golang. | |
8 | ||
9 | Go 1.12+ is officially supported. Older versions might work but are not tested. | |
10 | ||
11 | The following documentation is available: | |
12 | ||
13 | * [GoDoc documentation for Datadog Go](http://godoc.org/github.com/DataDog/datadog-go/v5/statsd) | |
14 | * [Official Datadog DogStatsD documentation](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go). | |
15 | ||
16 | ||
17 | <!-- vim-markdown-toc GFM --> | |
18 | ||
19 | * [New major version](#new-major-version) | |
20 | * [Installation](#installation) | |
21 | - [Supported environment variables](#supported-environment-variables) | |
22 | - [Unix Domain Sockets Client](#unix-domain-sockets-client) | |
23 | * [Usage](#usage) | |
24 | - [Metrics](#metrics) | |
25 | - [Events](#events) | |
26 | - [Service Checks](#service-checks) | |
27 | * [Client side aggregation](#client-side-aggregation) | |
28 | - ["Basic" aggregation](#basic-aggregation) | |
29 | - ["Extended" aggregation](#extended-aggregation) | |
30 | * [Performance / Metric drops](#performance--metric-drops) | |
31 | - [Monitoring this client](#monitoring-this-client) | |
32 | - [Tweaking kernel options](#tweaking-kernel-options) | |
33 | + [Unix Domain Sockets](#unix-domain-sockets) | |
34 | - [Maximum packets size in high-throughput scenarios](#maximum-packets-size-in-high-throughput-scenarios) | |
35 | * [Development](#development) | |
36 | * [License](#license) | |
37 | * [Credits](#credits) | |
38 | ||
39 | <!-- vim-markdown-toc --> | |
40 | ||
41 | ||
42 | ## New major version | |
43 | ||
44 | The new major version `v5` is now the default. All new features will be added to this version and only bugfixes will be | |
45 | backported to `v4` (see `v4` branch). | |
46 | ||
47 | `v5` introduce a number of breaking changes compare to `v4`, see the | |
48 | [CHANGELOG](https://github.com/DataDog/datadog-go/blob/master/CHANGELOG.md#500--2021-10-01) for more information. | |
49 | ||
50 | Note that the import paths for `v5` and `v4` are different: | |
51 | - `v5`: github.com/DataDog/datadog-go/v5/statsd | |
52 | - `v4`: github.com/DataDog/datadog-go/statsd | |
53 | ||
54 | When migrating to the `v5` you will need to upgrade your imports. | |
55 | ||
56 | ## Installation | |
57 | ||
58 | Get the code with: | |
59 | ||
60 | ```shell | |
61 | $ go get github.com/DataDog/datadog-go/v5/statsd | |
62 | ``` | |
63 | ||
64 | Then create a new DogStatsD client: | |
12 | 65 | |
13 | 66 | ```go |
14 | import "github.com/DataDog/datadog-go/statsd" | |
67 | package main | |
68 | ||
69 | import ( | |
70 | "log" | |
71 | "github.com/DataDog/datadog-go/v5/statsd" | |
72 | ) | |
15 | 73 | |
16 | 74 | func main() { |
17 | c, err := statsd.New("127.0.0.1:8125") | |
75 | statsd, err := statsd.New("127.0.0.1:8125") | |
18 | 76 | if err != nil { |
19 | 77 | log.Fatal(err) |
20 | 78 | } |
21 | // prefix every metric with the app name | |
22 | c.Namespace = "flubber." | |
23 | // send the EC2 availability zone as a tag with every metric | |
24 | c.Tags = append(c.Tags, "region:us-east-1a") | |
25 | err = c.Gauge("request.duration", 1.2, nil, 1) | |
26 | // ... | |
27 | 79 | } |
28 | 80 | ``` |
29 | 81 | |
82 | Find a list of all the available options for your DogStatsD Client in the [Datadog-go godoc documentation](https://godoc.org/github.com/DataDog/datadog-go/v5/statsd#Option) or in [Datadog public DogStatsD documentation](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go#client-instantiation-parameters). | |
83 | ||
84 | ### Supported environment variables | |
85 | ||
86 | * If the `addr` parameter is empty, the client uses the `DD_AGENT_HOST` environment variables to build a target address. | |
87 | Example: `DD_AGENT_HOST=127.0.0.1:8125` for UDP, `DD_AGENT_HOST=unix:///path/to/socket` for UDS and `DD_AGENT_HOST=\\.\pipe\my_windows_pipe` for Windows named pipe. | |
88 | * If the `DD_ENTITY_ID` environment variable is found, its value is injected as a global `dd.internal.entity_id` tag. The Datadog Agent uses this tag to insert container tags into the metrics. | |
89 | ||
90 | To enable origin detection and set the `DD_ENTITY_ID` environment variable, add the following lines to your application manifest: | |
91 | ||
92 | ```yaml | |
93 | env: | |
94 | - name: DD_ENTITY_ID | |
95 | valueFrom: | |
96 | fieldRef: | |
97 | fieldPath: metadata.uid | |
98 | ``` | |
99 | ||
100 | * `DD_ENV`, `DD_SERVICE`, and `DD_VERSION` can be used by the statsd client to set `{env, service, version}` as global tags for all data emitted. | |
101 | ||
102 | ### Unix Domain Sockets Client | |
103 | ||
104 | Agent v6+ accepts packets through a Unix Socket datagram connection. Details about the advantages of using UDS over UDP are available in the [DogStatsD Unix Socket documentation](https://docs.datadoghq.com/developers/dogstatsd/unix_socket/). You can use this protocol by giving a `unix:///path/to/dsd.socket` address argument to the `New` constructor. | |
105 | ||
106 | ## Usage | |
107 | ||
108 | In order to use DogStatsD metrics, events, and Service Checks, the Agent must be [running and available](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go). | |
109 | ||
110 | ### Metrics | |
111 | ||
112 | After the client is created, you can start sending custom metrics to Datadog. See the dedicated [Metric Submission: DogStatsD documentation](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go) to see how to submit all supported metric types to Datadog with working code examples: | |
113 | ||
114 | * [Submit a COUNT metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#count). | |
115 | * [Submit a GAUGE metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#gauge). | |
116 | * [Submit a SET metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#set) | |
117 | * [Submit a HISTOGRAM metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#histogram) | |
118 | * [Submit a DISTRIBUTION metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#distribution) | |
119 | ||
120 | Metric names must only contain ASCII alphanumerics, underscores, and periods. The client will not replace nor check for invalid characters. | |
121 | ||
122 | Some options are suppported when submitting metrics, like [applying a sample rate to your metrics](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#metric-submission-options) or [tagging your metrics with your custom tags](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#metric-tagging). Find all the available functions to report metrics [in the Datadog Go client GoDoc documentation](https://godoc.org/github.com/DataDog/datadog-go/v5/statsd#Client). | |
123 | ||
124 | ### Events | |
125 | ||
126 | After the client is created, you can start sending events to your Datadog Event Stream. See the dedicated [Event Submission: DogStatsD documentation](https://docs.datadoghq.com/developers/events/dogstatsd/?code-lang=go) to see how to submit an event to your Datadog Event Stream. | |
127 | ||
128 | ### Service Checks | |
129 | ||
130 | After the client is created, you can start sending Service Checks to Datadog. See the dedicated [Service Check Submission: DogStatsD documentation](https://docs.datadoghq.com/developers/service_checks/dogstatsd_service_checks_submission/?code-lang=go) to see how to submit a Service Check to Datadog. | |
131 | ||
132 | ## Client side aggregation | |
133 | ||
134 | Starting with version `5.0.0` (and `3.6.0` in beta), the client offers aggregation or value packing on the client side. | |
135 | ||
136 | This feature aims at reducing both the number of packets sent to the Agent and the packet drops in very high throughput | |
137 | scenarios. | |
138 | ||
139 | The aggregation window is 2s by default and can be changed through `WithAggregationInterval()` option. Note that the | |
140 | aggregation window on the Agent side is 10s for DogStatsD metrics. So for example, setting an aggregation window of 3s in | |
141 | the client will produce a spike in your dashboard every 30 second for counts metrics (as the third 10s bucket on the | |
142 | Agent will receive 4 samples from the client). | |
143 | ||
144 | Aggregation can be disabled using the `WithoutClientSideAggregation()` option. | |
145 | ||
146 | The telemetry `datadog.dogstatsd.client.metrics` is unchanged and represents the number of metrics before aggregation. | |
147 | New metrics `datadog.dogstatsd.client.aggregated_context` and `datadog.dogstatsd.client.aggregated_context_by_type` have | |
148 | been introduced. See the [Monitoring this client](#monitoring-this-client) section. | |
149 | ||
150 | ### "Basic" aggregation | |
151 | ||
152 | Enabled by default, the client will aggregate `gauge`, `count` and `set`. | |
153 | ||
154 | This can be disabled with the `WithoutClientSideAggregation()` option. | |
155 | ||
156 | ### "Extended" aggregation | |
157 | ||
158 | This feature is only compatible with Agent's version >=6.25.0 && <7.0.0 or Agent's versions >=7.25.0. | |
159 | ||
160 | Disabled by default, the client can also pack multiple values for `histogram`, `distribution` and `timing` in one | |
161 | message. Real aggregation is not possible for those types since the Agent also aggregates and two aggregation levels | |
162 | would change the final value sent to Datadog. | |
163 | ||
164 | When this option is enabled, the agent will buffer the metrics by combination of metric name and tags, and send them in the fewest number of messages. | |
165 | ||
166 | For example, if we sample 3 times the same metric. Instead of sending on the network: | |
167 | ||
168 | ``` | |
169 | my_distribution_metric:21|d|#all,my,tags | |
170 | my_distribution_metric:43.2|d|#all,my,tags | |
171 | my_distribution_metric:1657|d|#all,my,tags | |
172 | ``` | |
173 | ||
174 | The client will send only one message: | |
175 | ||
176 | ``` | |
177 | my_distribution_metric:21:43.2:1657|d|#all,my,tags | |
178 | ``` | |
179 | ||
180 | This will greatly reduce network usage and packet drops but will slightly increase the memory and CPU usage of the | |
181 | client. Looking at the telemetry metrics `datadog.dogstatsd.client.metrics_by_type` / | |
182 | `datadog.dogstatsd.client.aggregated_context_by_type` will show the aggregation ratio for each type. This is an | |
183 | interesting data to know how useful extended aggregation is to your app. | |
184 | ||
185 | This can be enabled with the `WithExtendedClientSideAggregation()` option. | |
186 | ||
187 | ## Performance / Metric drops | |
188 | ||
189 | ### Monitoring this client | |
190 | ||
191 | This client automatically injects telemetry about itself in the DogStatsD stream. | |
192 | Those metrics will not be counted as custom and will not be billed. This feature can be disabled using the `WithoutTelemetry` option. | |
193 | ||
194 | See [Telemetry documentation](https://docs.datadoghq.com/developers/dogstatsd/high_throughput/?code-lang=go#client-side-telemetry) to learn more about it. | |
195 | ||
196 | ### Tweaking kernel options | |
197 | ||
198 | In very high throughput environments it is possible to improve performance by changing the values of some kernel options. | |
199 | ||
200 | #### Unix Domain Sockets | |
201 | ||
202 | - `sysctl -w net.unix.max_dgram_qlen=X` - Set datagram queue size to X (default value is usually 10). | |
203 | - `sysctl -w net.core.wmem_max=X` - Set the max size of the send buffer for all the host sockets. | |
204 | ||
205 | ### Maximum packets size in high-throughput scenarios | |
206 | ||
207 | In order to have the most efficient use of this library in high-throughput scenarios, | |
208 | default values for the maximum packets size have already been set to have the best | |
209 | usage of the underlying network. | |
210 | However, if you perfectly know your network and you know that a different value for the maximum packets | |
211 | size should be used, you can set it with the option `WithMaxBytesPerPayload`. Example: | |
212 | ||
213 | ```go | |
214 | package main | |
215 | ||
216 | import ( | |
217 | "log" | |
218 | "github.com/DataDog/datadog-go/v5/statsd" | |
219 | ) | |
220 | ||
221 | func main() { | |
222 | statsd, err := statsd.New("127.0.0.1:8125", WithMaxBytesPerPayload(4096)) | |
223 | if err != nil { | |
224 | log.Fatal(err) | |
225 | } | |
226 | } | |
227 | ``` | |
228 | ||
229 | ## Development | |
230 | ||
231 | Run the tests with: | |
232 | ||
233 | $ go test | |
234 | ||
30 | 235 | ## License |
31 | 236 | |
32 | All code distributed under the [MIT License](http://opensource.org/licenses/MIT) unless otherwise specified. | |
237 | datadog-go is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php). | |
238 | ||
239 | ## Credits | |
240 | ||
241 | Original code by [ooyala](https://github.com/ooyala/go-dogstatsd). |
0 | package main | |
1 | ||
2 | import ( | |
3 | "log" | |
4 | ||
5 | "github.com/DataDog/datadog-go/v5/statsd" | |
6 | ) | |
7 | ||
8 | func main() { | |
9 | client, err := statsd.New("127.0.0.1:8125", | |
10 | statsd.WithTags([]string{"env:prod", "service:myservice"}), | |
11 | ) | |
12 | if err != nil { | |
13 | log.Fatal(err) | |
14 | } | |
15 | ||
16 | client.Histogram("my.metrics", 21, []string{"tag1", "tag2:value"}, 1) | |
17 | client.Close() | |
18 | } |
0 | module github.com/DataDog/datadog-go/v5 | |
1 | ||
2 | go 1.13 | |
3 | ||
4 | require ( | |
5 | github.com/Microsoft/go-winio v0.5.0 | |
6 | github.com/golang/mock v1.6.0 | |
7 | github.com/stretchr/testify v1.7.0 | |
8 | ) |
0 | github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= | |
1 | github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= | |
2 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |
3 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= | |
4 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | |
5 | github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= | |
6 | github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= | |
7 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= | |
8 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | |
9 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | |
10 | github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= | |
11 | github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= | |
12 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | |
13 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= | |
14 | github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= | |
15 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | |
16 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= | |
17 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | |
18 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | |
19 | golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= | |
20 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | |
21 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | |
22 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | |
23 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= | |
24 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |
25 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | |
26 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | |
27 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |
28 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |
29 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |
30 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |
31 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | |
32 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= | |
33 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= | |
34 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= | |
35 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | |
36 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | |
37 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | |
38 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | |
39 | golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= | |
40 | golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= | |
41 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |
42 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |
43 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= | |
44 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | |
45 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= | |
46 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | |
47 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= | |
48 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= |
1 | 1 | |
2 | 2 | Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags |
3 | 3 | and histograms. |
4 | ||
5 | ## Get the code | |
6 | ||
7 | $ go get github.com/DataDog/datadog-go/statsd | |
8 | ||
9 | ## Usage | |
10 | ||
11 | ```go | |
12 | // Create the client | |
13 | c, err := statsd.New("127.0.0.1:8125") | |
14 | if err != nil { | |
15 | log.Fatal(err) | |
16 | } | |
17 | // Prefix every metric with the app name | |
18 | c.Namespace = "flubber." | |
19 | // Send the EC2 availability zone as a tag with every metric | |
20 | c.Tags = append(c.Tags, "us-east-1a") | |
21 | ||
22 | // Do some metrics! | |
23 | err = c.Gauge("request.queue_depth", 12, nil, 1) | |
24 | err = c.Timing("request.duration", duration, nil, 1) // Uses a time.Duration! | |
25 | err = c.TimeInMilliseconds("request", 12, nil, 1) | |
26 | err = c.Incr("request.count_total", nil, 1) | |
27 | err = c.Decr("request.count_total", nil, 1) | |
28 | err = c.Count("request.count_total", 2, nil, 1) | |
29 | ``` | |
30 | ||
31 | ## Buffering Client | |
32 | ||
33 | DogStatsD accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec. | |
34 | ||
35 | ## Unix Domain Sockets Client | |
36 | ||
37 | DogStatsD version 6 accepts packets through a Unix Socket datagram connection. You can use this protocol by giving a | |
38 | `unix:///path/to/dsd.socket` addr argument to the `New` or `NewBufferingClient`. | |
39 | ||
40 | With this protocol, writes can become blocking if the server's receiving buffer is full. Our default behaviour is to | |
41 | timeout and drop the packet after 1 ms. You can set a custom timeout duration via the `SetWriteTimeout` method. | |
42 | ||
43 | The default mode is to pass write errors from the socket to the caller. This includes write errors the library will | |
44 | automatically recover from (DogStatsD server not ready yet or is restarting). You can drop these errors and emulate | |
45 | the UDP behaviour by setting the `SkipErrors` property to `true`. Please note that packets will be dropped in both modes. | |
46 | ||
47 | ## Development | |
48 | ||
49 | Run the tests with: | |
50 | ||
51 | $ go test | |
52 | ||
53 | ## Documentation | |
54 | ||
55 | Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd | |
56 | ||
57 | ## License | |
58 | ||
59 | go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php). | |
60 | ||
61 | ## Credits | |
62 | ||
63 | Original code by [ooyala](https://github.com/ooyala/go-dogstatsd). |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "strings" | |
4 | "sync" | |
5 | "sync/atomic" | |
6 | "time" | |
7 | ) | |
8 | ||
9 | type ( | |
10 | countsMap map[string]*countMetric | |
11 | gaugesMap map[string]*gaugeMetric | |
12 | setsMap map[string]*setMetric | |
13 | bufferedMetricMap map[string]*bufferedMetric | |
14 | ) | |
15 | ||
16 | type aggregator struct { | |
17 | nbContextGauge uint64 | |
18 | nbContextCount uint64 | |
19 | nbContextSet uint64 | |
20 | ||
21 | countsM sync.RWMutex | |
22 | gaugesM sync.RWMutex | |
23 | setsM sync.RWMutex | |
24 | ||
25 | gauges gaugesMap | |
26 | counts countsMap | |
27 | sets setsMap | |
28 | histograms bufferedMetricContexts | |
29 | distributions bufferedMetricContexts | |
30 | timings bufferedMetricContexts | |
31 | ||
32 | closed chan struct{} | |
33 | ||
34 | client *Client | |
35 | ||
36 | // aggregator implements channelMode mechanism to receive histograms, | |
37 | // distributions and timings. Since they need sampling they need to | |
38 | // lock for random. When using both channelMode and ExtendedAggregation | |
39 | // we don't want goroutine to fight over the lock. | |
40 | inputMetrics chan metric | |
41 | stopChannelMode chan struct{} | |
42 | wg sync.WaitGroup | |
43 | } | |
44 | ||
45 | func newAggregator(c *Client) *aggregator { | |
46 | return &aggregator{ | |
47 | client: c, | |
48 | counts: countsMap{}, | |
49 | gauges: gaugesMap{}, | |
50 | sets: setsMap{}, | |
51 | histograms: newBufferedContexts(newHistogramMetric), | |
52 | distributions: newBufferedContexts(newDistributionMetric), | |
53 | timings: newBufferedContexts(newTimingMetric), | |
54 | closed: make(chan struct{}), | |
55 | stopChannelMode: make(chan struct{}), | |
56 | } | |
57 | } | |
58 | ||
59 | func (a *aggregator) start(flushInterval time.Duration) { | |
60 | ticker := time.NewTicker(flushInterval) | |
61 | ||
62 | go func() { | |
63 | for { | |
64 | select { | |
65 | case <-ticker.C: | |
66 | a.flush() | |
67 | case <-a.closed: | |
68 | return | |
69 | } | |
70 | } | |
71 | }() | |
72 | } | |
73 | ||
74 | func (a *aggregator) startReceivingMetric(bufferSize int, nbWorkers int) { | |
75 | a.inputMetrics = make(chan metric, bufferSize) | |
76 | for i := 0; i < nbWorkers; i++ { | |
77 | a.wg.Add(1) | |
78 | go a.pullMetric() | |
79 | } | |
80 | } | |
81 | ||
82 | func (a *aggregator) stopReceivingMetric() { | |
83 | close(a.stopChannelMode) | |
84 | a.wg.Wait() | |
85 | } | |
86 | ||
87 | func (a *aggregator) stop() { | |
88 | a.closed <- struct{}{} | |
89 | } | |
90 | ||
91 | func (a *aggregator) pullMetric() { | |
92 | for { | |
93 | select { | |
94 | case m := <-a.inputMetrics: | |
95 | switch m.metricType { | |
96 | case histogram: | |
97 | a.histogram(m.name, m.fvalue, m.tags, m.rate) | |
98 | case distribution: | |
99 | a.distribution(m.name, m.fvalue, m.tags, m.rate) | |
100 | case timing: | |
101 | a.timing(m.name, m.fvalue, m.tags, m.rate) | |
102 | } | |
103 | case <-a.stopChannelMode: | |
104 | a.wg.Done() | |
105 | return | |
106 | } | |
107 | } | |
108 | } | |
109 | ||
110 | func (a *aggregator) flush() { | |
111 | for _, m := range a.flushMetrics() { | |
112 | a.client.sendBlocking(m) | |
113 | } | |
114 | } | |
115 | ||
116 | func (a *aggregator) flushTelemetryMetrics(t *Telemetry) { | |
117 | if a == nil { | |
118 | // aggregation is disabled | |
119 | return | |
120 | } | |
121 | ||
122 | t.AggregationNbContextGauge = atomic.LoadUint64(&a.nbContextGauge) | |
123 | t.AggregationNbContextCount = atomic.LoadUint64(&a.nbContextCount) | |
124 | t.AggregationNbContextSet = atomic.LoadUint64(&a.nbContextSet) | |
125 | t.AggregationNbContextHistogram = a.histograms.getNbContext() | |
126 | t.AggregationNbContextDistribution = a.distributions.getNbContext() | |
127 | t.AggregationNbContextTiming = a.timings.getNbContext() | |
128 | } | |
129 | ||
130 | func (a *aggregator) flushMetrics() []metric { | |
131 | metrics := []metric{} | |
132 | ||
133 | // We reset the values to avoid sending 'zero' values for metrics not | |
134 | // sampled during this flush interval | |
135 | ||
136 | a.setsM.Lock() | |
137 | sets := a.sets | |
138 | a.sets = setsMap{} | |
139 | a.setsM.Unlock() | |
140 | ||
141 | for _, s := range sets { | |
142 | metrics = append(metrics, s.flushUnsafe()...) | |
143 | } | |
144 | ||
145 | a.gaugesM.Lock() | |
146 | gauges := a.gauges | |
147 | a.gauges = gaugesMap{} | |
148 | a.gaugesM.Unlock() | |
149 | ||
150 | for _, g := range gauges { | |
151 | metrics = append(metrics, g.flushUnsafe()) | |
152 | } | |
153 | ||
154 | a.countsM.Lock() | |
155 | counts := a.counts | |
156 | a.counts = countsMap{} | |
157 | a.countsM.Unlock() | |
158 | ||
159 | for _, c := range counts { | |
160 | metrics = append(metrics, c.flushUnsafe()) | |
161 | } | |
162 | ||
163 | metrics = a.histograms.flush(metrics) | |
164 | metrics = a.distributions.flush(metrics) | |
165 | metrics = a.timings.flush(metrics) | |
166 | ||
167 | atomic.AddUint64(&a.nbContextCount, uint64(len(counts))) | |
168 | atomic.AddUint64(&a.nbContextGauge, uint64(len(gauges))) | |
169 | atomic.AddUint64(&a.nbContextSet, uint64(len(sets))) | |
170 | return metrics | |
171 | } | |
172 | ||
173 | func getContext(name string, tags []string) string { | |
174 | c, _ := getContextAndTags(name, tags) | |
175 | return c | |
176 | } | |
177 | ||
178 | func getContextAndTags(name string, tags []string) (string, string) { | |
179 | if len(tags) == 0 { | |
180 | return name + nameSeparatorSymbol, "" | |
181 | } | |
182 | n := len(name) + len(nameSeparatorSymbol) + len(tagSeparatorSymbol)*(len(tags)-1) | |
183 | for _, s := range tags { | |
184 | n += len(s) | |
185 | } | |
186 | ||
187 | var sb strings.Builder | |
188 | sb.Grow(n) | |
189 | sb.WriteString(name) | |
190 | sb.WriteString(nameSeparatorSymbol) | |
191 | sb.WriteString(tags[0]) | |
192 | for _, s := range tags[1:] { | |
193 | sb.WriteString(tagSeparatorSymbol) | |
194 | sb.WriteString(s) | |
195 | } | |
196 | ||
197 | s := sb.String() | |
198 | ||
199 | return s, s[len(name)+len(nameSeparatorSymbol):] | |
200 | } | |
201 | ||
202 | func (a *aggregator) count(name string, value int64, tags []string) error { | |
203 | context := getContext(name, tags) | |
204 | a.countsM.RLock() | |
205 | if count, found := a.counts[context]; found { | |
206 | count.sample(value) | |
207 | a.countsM.RUnlock() | |
208 | return nil | |
209 | } | |
210 | a.countsM.RUnlock() | |
211 | ||
212 | a.countsM.Lock() | |
213 | // Check if another goroutines hasn't created the value betwen the RUnlock and 'Lock' | |
214 | if count, found := a.counts[context]; found { | |
215 | count.sample(value) | |
216 | a.countsM.Unlock() | |
217 | return nil | |
218 | } | |
219 | ||
220 | a.counts[context] = newCountMetric(name, value, tags) | |
221 | a.countsM.Unlock() | |
222 | return nil | |
223 | } | |
224 | ||
225 | func (a *aggregator) gauge(name string, value float64, tags []string) error { | |
226 | context := getContext(name, tags) | |
227 | a.gaugesM.RLock() | |
228 | if gauge, found := a.gauges[context]; found { | |
229 | gauge.sample(value) | |
230 | a.gaugesM.RUnlock() | |
231 | return nil | |
232 | } | |
233 | a.gaugesM.RUnlock() | |
234 | ||
235 | gauge := newGaugeMetric(name, value, tags) | |
236 | ||
237 | a.gaugesM.Lock() | |
238 | // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock' | |
239 | if gauge, found := a.gauges[context]; found { | |
240 | gauge.sample(value) | |
241 | a.gaugesM.Unlock() | |
242 | return nil | |
243 | } | |
244 | a.gauges[context] = gauge | |
245 | a.gaugesM.Unlock() | |
246 | return nil | |
247 | } | |
248 | ||
249 | func (a *aggregator) set(name string, value string, tags []string) error { | |
250 | context := getContext(name, tags) | |
251 | a.setsM.RLock() | |
252 | if set, found := a.sets[context]; found { | |
253 | set.sample(value) | |
254 | a.setsM.RUnlock() | |
255 | return nil | |
256 | } | |
257 | a.setsM.RUnlock() | |
258 | ||
259 | a.setsM.Lock() | |
260 | // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock' | |
261 | if set, found := a.sets[context]; found { | |
262 | set.sample(value) | |
263 | a.setsM.Unlock() | |
264 | return nil | |
265 | } | |
266 | a.sets[context] = newSetMetric(name, value, tags) | |
267 | a.setsM.Unlock() | |
268 | return nil | |
269 | } | |
270 | ||
271 | // Only histograms, distributions and timings are sampled with a rate since we | |
272 | // only pack them in on message instead of aggregating them. Discarding the | |
273 | // sample rate will have impacts on the CPU and memory usage of the Agent. | |
274 | ||
275 | // type alias for Client.sendToAggregator | |
276 | type bufferedMetricSampleFunc func(name string, value float64, tags []string, rate float64) error | |
277 | ||
278 | func (a *aggregator) histogram(name string, value float64, tags []string, rate float64) error { | |
279 | return a.histograms.sample(name, value, tags, rate) | |
280 | } | |
281 | ||
282 | func (a *aggregator) distribution(name string, value float64, tags []string, rate float64) error { | |
283 | return a.distributions.sample(name, value, tags, rate) | |
284 | } | |
285 | ||
286 | func (a *aggregator) timing(name string, value float64, tags []string, rate float64) error { | |
287 | return a.timings.sample(name, value, tags, rate) | |
288 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "sort" | |
4 | "strings" | |
5 | "sync" | |
6 | "testing" | |
7 | ||
8 | "github.com/stretchr/testify/assert" | |
9 | ) | |
10 | ||
11 | func TestAggregatorSample(t *testing.T) { | |
12 | a := newAggregator(nil) | |
13 | ||
14 | tags := []string{"tag1", "tag2"} | |
15 | ||
16 | for i := 0; i < 2; i++ { | |
17 | a.gauge("gaugeTest", 21, tags) | |
18 | assert.Len(t, a.gauges, 1) | |
19 | assert.Contains(t, a.gauges, "gaugeTest:tag1,tag2") | |
20 | ||
21 | a.count("countTest", 21, tags) | |
22 | assert.Len(t, a.counts, 1) | |
23 | assert.Contains(t, a.counts, "countTest:tag1,tag2") | |
24 | ||
25 | a.set("setTest", "value1", tags) | |
26 | assert.Len(t, a.sets, 1) | |
27 | assert.Contains(t, a.sets, "setTest:tag1,tag2") | |
28 | ||
29 | a.set("setTest", "value1", tags) | |
30 | assert.Len(t, a.sets, 1) | |
31 | assert.Contains(t, a.sets, "setTest:tag1,tag2") | |
32 | ||
33 | a.histogram("histogramTest", 21, tags, 1) | |
34 | assert.Len(t, a.histograms.values, 1) | |
35 | assert.Contains(t, a.histograms.values, "histogramTest:tag1,tag2") | |
36 | ||
37 | a.distribution("distributionTest", 21, tags, 1) | |
38 | assert.Len(t, a.distributions.values, 1) | |
39 | assert.Contains(t, a.distributions.values, "distributionTest:tag1,tag2") | |
40 | ||
41 | a.timing("timingTest", 21, tags, 1) | |
42 | assert.Len(t, a.timings.values, 1) | |
43 | assert.Contains(t, a.timings.values, "timingTest:tag1,tag2") | |
44 | } | |
45 | } | |
46 | ||
47 | func TestAggregatorFlush(t *testing.T) { | |
48 | a := newAggregator(nil) | |
49 | ||
50 | tags := []string{"tag1", "tag2"} | |
51 | ||
52 | a.gauge("gaugeTest1", 21, tags) | |
53 | a.gauge("gaugeTest1", 10, tags) | |
54 | a.gauge("gaugeTest2", 15, tags) | |
55 | ||
56 | a.count("countTest1", 21, tags) | |
57 | a.count("countTest1", 10, tags) | |
58 | a.count("countTest2", 1, tags) | |
59 | ||
60 | a.set("setTest1", "value1", tags) | |
61 | a.set("setTest1", "value1", tags) | |
62 | a.set("setTest1", "value2", tags) | |
63 | a.set("setTest2", "value1", tags) | |
64 | ||
65 | a.histogram("histogramTest1", 21, tags, 1) | |
66 | a.histogram("histogramTest1", 22, tags, 1) | |
67 | a.histogram("histogramTest2", 23, tags, 1) | |
68 | ||
69 | a.distribution("distributionTest1", 21, tags, 1) | |
70 | a.distribution("distributionTest1", 22, tags, 1) | |
71 | a.distribution("distributionTest2", 23, tags, 1) | |
72 | ||
73 | a.timing("timingTest1", 21, tags, 1) | |
74 | a.timing("timingTest1", 22, tags, 1) | |
75 | a.timing("timingTest2", 23, tags, 1) | |
76 | ||
77 | metrics := a.flushMetrics() | |
78 | ||
79 | assert.Len(t, a.gauges, 0) | |
80 | assert.Len(t, a.counts, 0) | |
81 | assert.Len(t, a.sets, 0) | |
82 | assert.Len(t, a.histograms.values, 0) | |
83 | assert.Len(t, a.distributions.values, 0) | |
84 | assert.Len(t, a.timings.values, 0) | |
85 | ||
86 | assert.Len(t, metrics, 13) | |
87 | ||
88 | sort.Slice(metrics, func(i, j int) bool { | |
89 | if metrics[i].metricType == metrics[j].metricType { | |
90 | res := strings.Compare(metrics[i].name, metrics[j].name) | |
91 | // this happens fo set | |
92 | if res == 0 { | |
93 | return strings.Compare(metrics[i].svalue, metrics[j].svalue) != 1 | |
94 | } | |
95 | return res != 1 | |
96 | } | |
97 | return metrics[i].metricType < metrics[j].metricType | |
98 | }) | |
99 | ||
100 | assert.Equal(t, []metric{ | |
101 | metric{ | |
102 | metricType: gauge, | |
103 | name: "gaugeTest1", | |
104 | tags: tags, | |
105 | rate: 1, | |
106 | fvalue: float64(10), | |
107 | }, | |
108 | metric{ | |
109 | metricType: gauge, | |
110 | name: "gaugeTest2", | |
111 | tags: tags, | |
112 | rate: 1, | |
113 | fvalue: float64(15), | |
114 | }, | |
115 | metric{ | |
116 | metricType: count, | |
117 | name: "countTest1", | |
118 | tags: tags, | |
119 | rate: 1, | |
120 | ivalue: int64(31), | |
121 | }, | |
122 | metric{ | |
123 | metricType: count, | |
124 | name: "countTest2", | |
125 | tags: tags, | |
126 | rate: 1, | |
127 | ivalue: int64(1), | |
128 | }, | |
129 | metric{ | |
130 | metricType: histogramAggregated, | |
131 | name: "histogramTest1", | |
132 | stags: strings.Join(tags, tagSeparatorSymbol), | |
133 | rate: 1, | |
134 | fvalues: []float64{21.0, 22.0}, | |
135 | }, | |
136 | metric{ | |
137 | metricType: histogramAggregated, | |
138 | name: "histogramTest2", | |
139 | stags: strings.Join(tags, tagSeparatorSymbol), | |
140 | rate: 1, | |
141 | fvalues: []float64{23.0}, | |
142 | }, | |
143 | metric{ | |
144 | metricType: distributionAggregated, | |
145 | name: "distributionTest1", | |
146 | stags: strings.Join(tags, tagSeparatorSymbol), | |
147 | rate: 1, | |
148 | fvalues: []float64{21.0, 22.0}, | |
149 | }, | |
150 | metric{ | |
151 | metricType: distributionAggregated, | |
152 | name: "distributionTest2", | |
153 | stags: strings.Join(tags, tagSeparatorSymbol), | |
154 | rate: 1, | |
155 | fvalues: []float64{23.0}, | |
156 | }, | |
157 | metric{ | |
158 | metricType: set, | |
159 | name: "setTest1", | |
160 | tags: tags, | |
161 | rate: 1, | |
162 | svalue: "value1", | |
163 | }, | |
164 | metric{ | |
165 | metricType: set, | |
166 | name: "setTest1", | |
167 | tags: tags, | |
168 | rate: 1, | |
169 | svalue: "value2", | |
170 | }, | |
171 | metric{ | |
172 | metricType: set, | |
173 | name: "setTest2", | |
174 | tags: tags, | |
175 | rate: 1, | |
176 | svalue: "value1", | |
177 | }, | |
178 | metric{ | |
179 | metricType: timingAggregated, | |
180 | name: "timingTest1", | |
181 | stags: strings.Join(tags, tagSeparatorSymbol), | |
182 | rate: 1, | |
183 | fvalues: []float64{21.0, 22.0}, | |
184 | }, | |
185 | metric{ | |
186 | metricType: timingAggregated, | |
187 | name: "timingTest2", | |
188 | stags: strings.Join(tags, tagSeparatorSymbol), | |
189 | rate: 1, | |
190 | fvalues: []float64{23.0}, | |
191 | }, | |
192 | }, | |
193 | metrics) | |
194 | ||
195 | } | |
196 | ||
197 | func TestAggregatorFlushConcurrency(t *testing.T) { | |
198 | a := newAggregator(nil) | |
199 | ||
200 | var wg sync.WaitGroup | |
201 | wg.Add(10) | |
202 | ||
203 | tags := []string{"tag1", "tag2"} | |
204 | ||
205 | for i := 0; i < 5; i++ { | |
206 | go func() { | |
207 | defer wg.Done() | |
208 | ||
209 | a.gauge("gaugeTest1", 21, tags) | |
210 | a.count("countTest1", 21, tags) | |
211 | a.set("setTest1", "value1", tags) | |
212 | a.histogram("histogramTest1", 21, tags, 1) | |
213 | a.distribution("distributionTest1", 21, tags, 1) | |
214 | a.timing("timingTest1", 21, tags, 1) | |
215 | }() | |
216 | } | |
217 | ||
218 | for i := 0; i < 5; i++ { | |
219 | go func() { | |
220 | defer wg.Done() | |
221 | ||
222 | a.flushMetrics() | |
223 | }() | |
224 | } | |
225 | ||
226 | wg.Wait() | |
227 | } | |
228 | ||
229 | func TestGetContextAndTags(t *testing.T) { | |
230 | tests := []struct { | |
231 | testName string | |
232 | name string | |
233 | tags []string | |
234 | wantContext string | |
235 | wantTags string | |
236 | }{ | |
237 | { | |
238 | testName: "no tags", | |
239 | name: "name", | |
240 | tags: nil, | |
241 | wantContext: "name:", | |
242 | wantTags: "", | |
243 | }, | |
244 | { | |
245 | testName: "one tag", | |
246 | name: "name", | |
247 | tags: []string{"tag1"}, | |
248 | wantContext: "name:tag1", | |
249 | wantTags: "tag1", | |
250 | }, | |
251 | { | |
252 | testName: "two tags", | |
253 | name: "name", | |
254 | tags: []string{"tag1", "tag2"}, | |
255 | wantContext: "name:tag1,tag2", | |
256 | wantTags: "tag1,tag2", | |
257 | }, | |
258 | } | |
259 | for _, test := range tests { | |
260 | t.Run(test.testName, func(t *testing.T) { | |
261 | gotContext, gotTags := getContextAndTags(test.name, test.tags) | |
262 | assert.Equal(t, test.wantContext, gotContext) | |
263 | assert.Equal(t, test.wantTags, gotTags) | |
264 | }) | |
265 | } | |
266 | } |
0 | // +build !go1.13 | |
1 | ||
2 | package statsd_test | |
3 | ||
4 | import "testing" | |
5 | ||
6 | func reportMetric(*testing.B, float64, string) {} |
0 | // +build go1.13 | |
1 | ||
2 | package statsd_test | |
3 | ||
4 | import "testing" | |
5 | ||
6 | func reportMetric(b *testing.B, value float64, unit string) { | |
7 | b.ReportMetric(value, unit) | |
8 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "strconv" | |
4 | ) | |
5 | ||
6 | // MessageTooLongError is an error returned when a sample, event or service check is too large once serialized. See | |
7 | // WithMaxBytesPerPayload option for more details. | |
8 | type MessageTooLongError struct{} | |
9 | ||
10 | func (e MessageTooLongError) Error() string { | |
11 | return "message too long. See 'WithMaxBytesPerPayload' documentation." | |
12 | } | |
13 | ||
14 | var errBufferFull = MessageTooLongError{} | |
15 | ||
16 | type partialWriteError string | |
17 | ||
18 | func (e partialWriteError) Error() string { return string(e) } | |
19 | ||
20 | const errPartialWrite = partialWriteError("value partially written") | |
21 | ||
22 | const metricOverhead = 512 | |
23 | ||
24 | // statsdBuffer is a buffer containing statsd messages | |
25 | // this struct methods are NOT safe for concurent use | |
26 | type statsdBuffer struct { | |
27 | buffer []byte | |
28 | maxSize int | |
29 | maxElements int | |
30 | elementCount int | |
31 | } | |
32 | ||
33 | func newStatsdBuffer(maxSize, maxElements int) *statsdBuffer { | |
34 | return &statsdBuffer{ | |
35 | buffer: make([]byte, 0, maxSize+metricOverhead), // pre-allocate the needed size + metricOverhead to avoid having Go re-allocate on it's own if an element does not fit | |
36 | maxSize: maxSize, | |
37 | maxElements: maxElements, | |
38 | } | |
39 | } | |
40 | ||
41 | func (b *statsdBuffer) writeGauge(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { | |
42 | if b.elementCount >= b.maxElements { | |
43 | return errBufferFull | |
44 | } | |
45 | originalBuffer := b.buffer | |
46 | b.buffer = appendGauge(b.buffer, namespace, globalTags, name, value, tags, rate) | |
47 | b.writeSeparator() | |
48 | return b.validateNewElement(originalBuffer) | |
49 | } | |
50 | ||
51 | func (b *statsdBuffer) writeCount(namespace string, globalTags []string, name string, value int64, tags []string, rate float64) error { | |
52 | if b.elementCount >= b.maxElements { | |
53 | return errBufferFull | |
54 | } | |
55 | originalBuffer := b.buffer | |
56 | b.buffer = appendCount(b.buffer, namespace, globalTags, name, value, tags, rate) | |
57 | b.writeSeparator() | |
58 | return b.validateNewElement(originalBuffer) | |
59 | } | |
60 | ||
61 | func (b *statsdBuffer) writeHistogram(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { | |
62 | if b.elementCount >= b.maxElements { | |
63 | return errBufferFull | |
64 | } | |
65 | originalBuffer := b.buffer | |
66 | b.buffer = appendHistogram(b.buffer, namespace, globalTags, name, value, tags, rate) | |
67 | b.writeSeparator() | |
68 | return b.validateNewElement(originalBuffer) | |
69 | } | |
70 | ||
71 | // writeAggregated serialized as many values as possible in the current buffer and return the position in values where it stopped. | |
72 | func (b *statsdBuffer) writeAggregated(metricSymbol []byte, namespace string, globalTags []string, name string, values []float64, tags string, tagSize int, precision int) (int, error) { | |
73 | if b.elementCount >= b.maxElements { | |
74 | return 0, errBufferFull | |
75 | } | |
76 | ||
77 | originalBuffer := b.buffer | |
78 | b.buffer = appendHeader(b.buffer, namespace, name) | |
79 | ||
80 | // buffer already full | |
81 | if len(b.buffer)+tagSize > b.maxSize { | |
82 | b.buffer = originalBuffer | |
83 | return 0, errBufferFull | |
84 | } | |
85 | ||
86 | // We add as many value as possible | |
87 | var position int | |
88 | for idx, v := range values { | |
89 | previousBuffer := b.buffer | |
90 | if idx != 0 { | |
91 | b.buffer = append(b.buffer, ':') | |
92 | } | |
93 | ||
94 | b.buffer = strconv.AppendFloat(b.buffer, v, 'f', precision, 64) | |
95 | ||
96 | // Should we stop serializing and switch to another buffer | |
97 | if len(b.buffer)+tagSize > b.maxSize { | |
98 | b.buffer = previousBuffer | |
99 | break | |
100 | } | |
101 | position = idx + 1 | |
102 | } | |
103 | ||
104 | // we could not add a single value | |
105 | if position == 0 { | |
106 | b.buffer = originalBuffer | |
107 | return 0, errBufferFull | |
108 | } | |
109 | ||
110 | b.buffer = append(b.buffer, '|') | |
111 | b.buffer = append(b.buffer, metricSymbol...) | |
112 | b.buffer = appendTagsAggregated(b.buffer, globalTags, tags) | |
113 | b.buffer = appendContainerID(b.buffer) | |
114 | b.writeSeparator() | |
115 | b.elementCount++ | |
116 | ||
117 | if position != len(values) { | |
118 | return position, errPartialWrite | |
119 | } | |
120 | return position, nil | |
121 | ||
122 | } | |
123 | ||
124 | func (b *statsdBuffer) writeDistribution(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { | |
125 | if b.elementCount >= b.maxElements { | |
126 | return errBufferFull | |
127 | } | |
128 | originalBuffer := b.buffer | |
129 | b.buffer = appendDistribution(b.buffer, namespace, globalTags, name, value, tags, rate) | |
130 | b.writeSeparator() | |
131 | return b.validateNewElement(originalBuffer) | |
132 | } | |
133 | ||
134 | func (b *statsdBuffer) writeSet(namespace string, globalTags []string, name string, value string, tags []string, rate float64) error { | |
135 | if b.elementCount >= b.maxElements { | |
136 | return errBufferFull | |
137 | } | |
138 | originalBuffer := b.buffer | |
139 | b.buffer = appendSet(b.buffer, namespace, globalTags, name, value, tags, rate) | |
140 | b.writeSeparator() | |
141 | return b.validateNewElement(originalBuffer) | |
142 | } | |
143 | ||
144 | func (b *statsdBuffer) writeTiming(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error { | |
145 | if b.elementCount >= b.maxElements { | |
146 | return errBufferFull | |
147 | } | |
148 | originalBuffer := b.buffer | |
149 | b.buffer = appendTiming(b.buffer, namespace, globalTags, name, value, tags, rate) | |
150 | b.writeSeparator() | |
151 | return b.validateNewElement(originalBuffer) | |
152 | } | |
153 | ||
154 | func (b *statsdBuffer) writeEvent(event *Event, globalTags []string) error { | |
155 | if b.elementCount >= b.maxElements { | |
156 | return errBufferFull | |
157 | } | |
158 | originalBuffer := b.buffer | |
159 | b.buffer = appendEvent(b.buffer, event, globalTags) | |
160 | b.writeSeparator() | |
161 | return b.validateNewElement(originalBuffer) | |
162 | } | |
163 | ||
164 | func (b *statsdBuffer) writeServiceCheck(serviceCheck *ServiceCheck, globalTags []string) error { | |
165 | if b.elementCount >= b.maxElements { | |
166 | return errBufferFull | |
167 | } | |
168 | originalBuffer := b.buffer | |
169 | b.buffer = appendServiceCheck(b.buffer, serviceCheck, globalTags) | |
170 | b.writeSeparator() | |
171 | return b.validateNewElement(originalBuffer) | |
172 | } | |
173 | ||
174 | func (b *statsdBuffer) validateNewElement(originalBuffer []byte) error { | |
175 | if len(b.buffer) > b.maxSize { | |
176 | b.buffer = originalBuffer | |
177 | return errBufferFull | |
178 | } | |
179 | b.elementCount++ | |
180 | return nil | |
181 | } | |
182 | ||
183 | func (b *statsdBuffer) writeSeparator() { | |
184 | b.buffer = append(b.buffer, '\n') | |
185 | } | |
186 | ||
187 | func (b *statsdBuffer) reset() { | |
188 | b.buffer = b.buffer[:0] | |
189 | b.elementCount = 0 | |
190 | } | |
191 | ||
192 | func (b *statsdBuffer) bytes() []byte { | |
193 | return b.buffer | |
194 | } |
0 | package statsd | |
1 | ||
2 | type bufferPool struct { | |
3 | pool chan *statsdBuffer | |
4 | bufferMaxSize int | |
5 | bufferMaxElements int | |
6 | } | |
7 | ||
8 | func newBufferPool(poolSize, bufferMaxSize, bufferMaxElements int) *bufferPool { | |
9 | p := &bufferPool{ | |
10 | pool: make(chan *statsdBuffer, poolSize), | |
11 | bufferMaxSize: bufferMaxSize, | |
12 | bufferMaxElements: bufferMaxElements, | |
13 | } | |
14 | for i := 0; i < poolSize; i++ { | |
15 | p.addNewBuffer() | |
16 | } | |
17 | return p | |
18 | } | |
19 | ||
20 | func (p *bufferPool) addNewBuffer() { | |
21 | p.pool <- newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements) | |
22 | } | |
23 | ||
24 | func (p *bufferPool) borrowBuffer() *statsdBuffer { | |
25 | select { | |
26 | case b := <-p.pool: | |
27 | return b | |
28 | default: | |
29 | return newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements) | |
30 | } | |
31 | } | |
32 | ||
33 | func (p *bufferPool) returnBuffer(buffer *statsdBuffer) { | |
34 | buffer.reset() | |
35 | select { | |
36 | case p.pool <- buffer: | |
37 | default: | |
38 | } | |
39 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | ||
5 | "github.com/stretchr/testify/assert" | |
6 | ) | |
7 | ||
8 | func TestBufferPoolSize(t *testing.T) { | |
9 | bufferPool := newBufferPool(10, 1024, 20) | |
10 | ||
11 | assert.Equal(t, 10, cap(bufferPool.pool)) | |
12 | assert.Equal(t, 10, len(bufferPool.pool)) | |
13 | } | |
14 | ||
15 | func TestBufferPoolBufferCreation(t *testing.T) { | |
16 | bufferPool := newBufferPool(10, 1024, 20) | |
17 | buffer := bufferPool.borrowBuffer() | |
18 | ||
19 | assert.Equal(t, 1024, buffer.maxSize) | |
20 | assert.Equal(t, 20, buffer.maxElements) | |
21 | } | |
22 | ||
23 | func TestBufferPoolEmpty(t *testing.T) { | |
24 | bufferPool := newBufferPool(1, 1024, 20) | |
25 | bufferPool.borrowBuffer() | |
26 | ||
27 | assert.Equal(t, 0, len(bufferPool.pool)) | |
28 | buffer := bufferPool.borrowBuffer() | |
29 | assert.NotNil(t, buffer.bytes()) | |
30 | } | |
31 | ||
32 | func TestBufferReturn(t *testing.T) { | |
33 | bufferPool := newBufferPool(1, 1024, 20) | |
34 | buffer := bufferPool.borrowBuffer() | |
35 | buffer.writeCount("", nil, "", 1, nil, 1) | |
36 | ||
37 | assert.Equal(t, 0, len(bufferPool.pool)) | |
38 | bufferPool.returnBuffer(buffer) | |
39 | assert.Equal(t, 1, len(bufferPool.pool)) | |
40 | buffer = bufferPool.borrowBuffer() | |
41 | assert.Equal(t, 0, len(buffer.bytes())) | |
42 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | ||
5 | "github.com/stretchr/testify/assert" | |
6 | ) | |
7 | ||
8 | func TestBufferGauge(t *testing.T) { | |
9 | buffer := newStatsdBuffer(1024, 1) | |
10 | err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
11 | assert.Nil(t, err) | |
12 | assert.Equal(t, "namespace.metric:1|g|#tag:tag\n", string(buffer.bytes())) | |
13 | ||
14 | // with a container ID field | |
15 | patchContainerID("container-id") | |
16 | defer resetContainerID() | |
17 | ||
18 | buffer = newStatsdBuffer(1024, 1) | |
19 | err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
20 | assert.Nil(t, err) | |
21 | assert.Equal(t, "namespace.metric:1|g|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
22 | } | |
23 | ||
24 | func TestBufferCount(t *testing.T) { | |
25 | buffer := newStatsdBuffer(1024, 1) | |
26 | err := buffer.writeCount("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
27 | assert.Nil(t, err) | |
28 | assert.Equal(t, "namespace.metric:1|c|#tag:tag\n", string(buffer.bytes())) | |
29 | ||
30 | // with a container ID field | |
31 | patchContainerID("container-id") | |
32 | defer resetContainerID() | |
33 | ||
34 | buffer = newStatsdBuffer(1024, 1) | |
35 | err = buffer.writeCount("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
36 | assert.Nil(t, err) | |
37 | assert.Equal(t, "namespace.metric:1|c|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
38 | } | |
39 | ||
40 | func TestBufferHistogram(t *testing.T) { | |
41 | buffer := newStatsdBuffer(1024, 1) | |
42 | err := buffer.writeHistogram("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
43 | assert.Nil(t, err) | |
44 | assert.Equal(t, "namespace.metric:1|h|#tag:tag\n", string(buffer.bytes())) | |
45 | ||
46 | // with a container ID field | |
47 | patchContainerID("container-id") | |
48 | defer resetContainerID() | |
49 | ||
50 | buffer = newStatsdBuffer(1024, 1) | |
51 | err = buffer.writeHistogram("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
52 | assert.Nil(t, err) | |
53 | assert.Equal(t, "namespace.metric:1|h|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
54 | } | |
55 | ||
56 | func TestBufferDistribution(t *testing.T) { | |
57 | buffer := newStatsdBuffer(1024, 1) | |
58 | err := buffer.writeDistribution("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
59 | assert.Nil(t, err) | |
60 | assert.Equal(t, "namespace.metric:1|d|#tag:tag\n", string(buffer.bytes())) | |
61 | ||
62 | // with a container ID field | |
63 | patchContainerID("container-id") | |
64 | defer resetContainerID() | |
65 | ||
66 | buffer = newStatsdBuffer(1024, 1) | |
67 | err = buffer.writeDistribution("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
68 | assert.Nil(t, err) | |
69 | assert.Equal(t, "namespace.metric:1|d|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
70 | } | |
71 | func TestBufferSet(t *testing.T) { | |
72 | buffer := newStatsdBuffer(1024, 1) | |
73 | err := buffer.writeSet("namespace.", []string{"tag:tag"}, "metric", "value", []string{}, 1) | |
74 | assert.Nil(t, err) | |
75 | assert.Equal(t, "namespace.metric:value|s|#tag:tag\n", string(buffer.bytes())) | |
76 | ||
77 | // with a container ID field | |
78 | patchContainerID("container-id") | |
79 | defer resetContainerID() | |
80 | ||
81 | buffer = newStatsdBuffer(1024, 1) | |
82 | err = buffer.writeSet("namespace.", []string{"tag:tag"}, "metric", "value", []string{}, 1) | |
83 | assert.Nil(t, err) | |
84 | assert.Equal(t, "namespace.metric:value|s|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
85 | } | |
86 | ||
87 | func TestBufferTiming(t *testing.T) { | |
88 | buffer := newStatsdBuffer(1024, 1) | |
89 | err := buffer.writeTiming("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
90 | assert.Nil(t, err) | |
91 | assert.Equal(t, "namespace.metric:1.000000|ms|#tag:tag\n", string(buffer.bytes())) | |
92 | ||
93 | // with a container ID field | |
94 | patchContainerID("container-id") | |
95 | defer resetContainerID() | |
96 | ||
97 | buffer = newStatsdBuffer(1024, 1) | |
98 | err = buffer.writeTiming("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
99 | assert.Nil(t, err) | |
100 | assert.Equal(t, "namespace.metric:1.000000|ms|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
101 | } | |
102 | ||
103 | func TestBufferEvent(t *testing.T) { | |
104 | buffer := newStatsdBuffer(1024, 1) | |
105 | err := buffer.writeEvent(&Event{Title: "title", Text: "text"}, []string{"tag:tag"}) | |
106 | assert.Nil(t, err) | |
107 | assert.Equal(t, "_e{5,4}:title|text|#tag:tag\n", string(buffer.bytes())) | |
108 | ||
109 | // with a container ID field | |
110 | patchContainerID("container-id") | |
111 | defer resetContainerID() | |
112 | ||
113 | buffer = newStatsdBuffer(1024, 1) | |
114 | err = buffer.writeEvent(&Event{Title: "title", Text: "text"}, []string{"tag:tag"}) | |
115 | assert.Nil(t, err) | |
116 | assert.Equal(t, "_e{5,4}:title|text|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
117 | } | |
118 | ||
119 | func TestBufferServiceCheck(t *testing.T) { | |
120 | buffer := newStatsdBuffer(1024, 1) | |
121 | err := buffer.writeServiceCheck(&ServiceCheck{Name: "name", Status: Ok}, []string{"tag:tag"}) | |
122 | assert.Nil(t, err) | |
123 | assert.Equal(t, "_sc|name|0|#tag:tag\n", string(buffer.bytes())) | |
124 | ||
125 | // with a container ID field | |
126 | patchContainerID("container-id") | |
127 | defer resetContainerID() | |
128 | ||
129 | buffer = newStatsdBuffer(1024, 1) | |
130 | err = buffer.writeServiceCheck(&ServiceCheck{Name: "name", Status: Ok}, []string{"tag:tag"}) | |
131 | assert.Nil(t, err) | |
132 | assert.Equal(t, "_sc|name|0|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
133 | } | |
134 | ||
135 | func TestBufferFullSize(t *testing.T) { | |
136 | buffer := newStatsdBuffer(30, 10) | |
137 | err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
138 | assert.Nil(t, err) | |
139 | assert.Len(t, buffer.bytes(), 30) | |
140 | err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
141 | assert.Equal(t, errBufferFull, err) | |
142 | } | |
143 | ||
144 | func TestBufferSeparator(t *testing.T) { | |
145 | buffer := newStatsdBuffer(1024, 10) | |
146 | err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
147 | assert.Nil(t, err) | |
148 | err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
149 | assert.Nil(t, err) | |
150 | assert.Equal(t, "namespace.metric:1|g|#tag:tag\nnamespace.metric:1|g|#tag:tag\n", string(buffer.bytes())) | |
151 | } | |
152 | ||
153 | func TestBufferAggregated(t *testing.T) { | |
154 | buffer := newStatsdBuffer(1024, 1) | |
155 | pos, err := buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1}, "", 12, -1) | |
156 | assert.Nil(t, err) | |
157 | assert.Equal(t, 1, pos) | |
158 | assert.Equal(t, "namespace.metric:1|h|#tag:tag\n", string(buffer.bytes())) | |
159 | ||
160 | buffer = newStatsdBuffer(1024, 1) | |
161 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1) | |
162 | assert.Nil(t, err) | |
163 | assert.Equal(t, 4, pos) | |
164 | assert.Equal(t, "namespace.metric:1:2:3:4|h|#tag:tag\n", string(buffer.bytes())) | |
165 | ||
166 | // max element already used | |
167 | buffer = newStatsdBuffer(1024, 1) | |
168 | buffer.elementCount = 1 | |
169 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1) | |
170 | assert.Equal(t, errBufferFull, err) | |
171 | ||
172 | // not enought size to start serializing (tags and header too big) | |
173 | buffer = newStatsdBuffer(4, 1) | |
174 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1) | |
175 | assert.Equal(t, errBufferFull, err) | |
176 | ||
177 | // not enought size to serializing one message | |
178 | buffer = newStatsdBuffer(29, 1) | |
179 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1) | |
180 | assert.Equal(t, errBufferFull, err) | |
181 | ||
182 | // space for only 1 number | |
183 | buffer = newStatsdBuffer(30, 1) | |
184 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1) | |
185 | assert.Equal(t, errPartialWrite, err) | |
186 | assert.Equal(t, 1, pos) | |
187 | assert.Equal(t, "namespace.metric:1|h|#tag:tag\n", string(buffer.bytes())) | |
188 | ||
189 | // first value too big | |
190 | buffer = newStatsdBuffer(30, 1) | |
191 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{12, 2, 3, 4}, "", 12, -1) | |
192 | assert.Equal(t, errBufferFull, err) | |
193 | assert.Equal(t, 0, pos) | |
194 | assert.Equal(t, "", string(buffer.bytes())) // checking that the buffer was reset | |
195 | ||
196 | // not enough space left | |
197 | buffer = newStatsdBuffer(40, 1) | |
198 | buffer.buffer = append(buffer.buffer, []byte("abcdefghij")...) | |
199 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{12, 2, 3, 4}, "", 12, -1) | |
200 | assert.Equal(t, errBufferFull, err) | |
201 | assert.Equal(t, 0, pos) | |
202 | assert.Equal(t, "abcdefghij", string(buffer.bytes())) // checking that the buffer was reset | |
203 | ||
204 | // space for only 2 number | |
205 | buffer = newStatsdBuffer(32, 1) | |
206 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1) | |
207 | assert.Equal(t, errPartialWrite, err) | |
208 | assert.Equal(t, 2, pos) | |
209 | assert.Equal(t, "namespace.metric:1:2|h|#tag:tag\n", string(buffer.bytes())) | |
210 | ||
211 | // with a container ID field | |
212 | patchContainerID("container-id") | |
213 | defer resetContainerID() | |
214 | ||
215 | buffer = newStatsdBuffer(1024, 1) | |
216 | pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1}, "", 12, -1) | |
217 | assert.Nil(t, err) | |
218 | assert.Equal(t, 1, pos) | |
219 | assert.Equal(t, "namespace.metric:1|h|#tag:tag|c:container-id\n", string(buffer.bytes())) | |
220 | } | |
221 | ||
222 | func TestBufferMaxElement(t *testing.T) { | |
223 | buffer := newStatsdBuffer(1024, 1) | |
224 | ||
225 | err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
226 | assert.Nil(t, err) | |
227 | ||
228 | err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
229 | assert.Equal(t, errBufferFull, err) | |
230 | ||
231 | err = buffer.writeCount("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
232 | assert.Equal(t, errBufferFull, err) | |
233 | ||
234 | err = buffer.writeHistogram("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
235 | assert.Equal(t, errBufferFull, err) | |
236 | ||
237 | err = buffer.writeDistribution("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
238 | assert.Equal(t, errBufferFull, err) | |
239 | ||
240 | err = buffer.writeSet("namespace.", []string{"tag:tag"}, "metric", "value", []string{}, 1) | |
241 | assert.Equal(t, errBufferFull, err) | |
242 | ||
243 | err = buffer.writeTiming("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1) | |
244 | assert.Equal(t, errBufferFull, err) | |
245 | ||
246 | err = buffer.writeEvent(&Event{Title: "title", Text: "text"}, []string{"tag:tag"}) | |
247 | assert.Equal(t, errBufferFull, err) | |
248 | ||
249 | err = buffer.writeServiceCheck(&ServiceCheck{Name: "name", Status: Ok}, []string{"tag:tag"}) | |
250 | assert.Equal(t, errBufferFull, err) | |
251 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "math/rand" | |
4 | "sync" | |
5 | "sync/atomic" | |
6 | "time" | |
7 | ) | |
8 | ||
9 | // bufferedMetricContexts represent the contexts for Histograms, Distributions | |
10 | // and Timing. Since those 3 metric types behave the same way and are sampled | |
11 | // with the same type they're represented by the same class. | |
12 | type bufferedMetricContexts struct { | |
13 | nbContext uint64 | |
14 | mutex sync.RWMutex | |
15 | values bufferedMetricMap | |
16 | newMetric func(string, float64, string) *bufferedMetric | |
17 | ||
18 | // Each bufferedMetricContexts uses its own random source and random | |
19 | // lock to prevent goroutines from contending for the lock on the | |
20 | // "math/rand" package-global random source (e.g. calls like | |
21 | // "rand.Float64()" must acquire a shared lock to get the next | |
22 | // pseudorandom number). | |
23 | random *rand.Rand | |
24 | randomLock sync.Mutex | |
25 | } | |
26 | ||
27 | func newBufferedContexts(newMetric func(string, float64, string) *bufferedMetric) bufferedMetricContexts { | |
28 | return bufferedMetricContexts{ | |
29 | values: bufferedMetricMap{}, | |
30 | newMetric: newMetric, | |
31 | // Note that calling "time.Now().UnixNano()" repeatedly quickly may return | |
32 | // very similar values. That's fine for seeding the worker-specific random | |
33 | // source because we just need an evenly distributed stream of float values. | |
34 | // Do not use this random source for cryptographic randomness. | |
35 | random: rand.New(rand.NewSource(time.Now().UnixNano())), | |
36 | } | |
37 | } | |
38 | ||
39 | func (bc *bufferedMetricContexts) flush(metrics []metric) []metric { | |
40 | bc.mutex.Lock() | |
41 | values := bc.values | |
42 | bc.values = bufferedMetricMap{} | |
43 | bc.mutex.Unlock() | |
44 | ||
45 | for _, d := range values { | |
46 | metrics = append(metrics, d.flushUnsafe()) | |
47 | } | |
48 | atomic.AddUint64(&bc.nbContext, uint64(len(values))) | |
49 | return metrics | |
50 | } | |
51 | ||
52 | func (bc *bufferedMetricContexts) sample(name string, value float64, tags []string, rate float64) error { | |
53 | if !shouldSample(rate, bc.random, &bc.randomLock) { | |
54 | return nil | |
55 | } | |
56 | ||
57 | context, stringTags := getContextAndTags(name, tags) | |
58 | ||
59 | bc.mutex.RLock() | |
60 | if v, found := bc.values[context]; found { | |
61 | v.sample(value) | |
62 | bc.mutex.RUnlock() | |
63 | return nil | |
64 | } | |
65 | bc.mutex.RUnlock() | |
66 | ||
67 | bc.mutex.Lock() | |
68 | // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock' | |
69 | if v, found := bc.values[context]; found { | |
70 | v.sample(value) | |
71 | bc.mutex.Unlock() | |
72 | return nil | |
73 | } | |
74 | bc.values[context] = bc.newMetric(name, value, stringTags) | |
75 | bc.mutex.Unlock() | |
76 | return nil | |
77 | } | |
78 | ||
79 | func (bc *bufferedMetricContexts) getNbContext() uint64 { | |
80 | return atomic.LoadUint64(&bc.nbContext) | |
81 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "bufio" | |
4 | "fmt" | |
5 | "io" | |
6 | "os" | |
7 | "regexp" | |
8 | "sync" | |
9 | ) | |
10 | ||
11 | const ( | |
12 | // cgroupPath is the path to the cgroup file where we can find the container id if one exists. | |
13 | cgroupPath = "/proc/self/cgroup" | |
14 | ) | |
15 | ||
16 | const ( | |
17 | uuidSource = "[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}" | |
18 | containerSource = "[0-9a-f]{64}" | |
19 | taskSource = "[0-9a-f]{32}-\\d+" | |
20 | ) | |
21 | ||
22 | var ( | |
23 | // expLine matches a line in the /proc/self/cgroup file. It has a submatch for the last element (path), which contains the container ID. | |
24 | expLine = regexp.MustCompile(`^\d+:[^:]*:(.+)$`) | |
25 | ||
26 | // expContainerID matches contained IDs and sources. Source: https://github.com/Qard/container-info/blob/master/index.js | |
27 | expContainerID = regexp.MustCompile(fmt.Sprintf(`(%s|%s|%s)(?:.scope)?$`, uuidSource, containerSource, taskSource)) | |
28 | ||
29 | // containerID holds the container ID. | |
30 | containerID = "" | |
31 | ) | |
32 | ||
33 | // parseContainerID finds the first container ID reading from r and returns it. | |
34 | func parseContainerID(r io.Reader) string { | |
35 | scn := bufio.NewScanner(r) | |
36 | for scn.Scan() { | |
37 | path := expLine.FindStringSubmatch(scn.Text()) | |
38 | if len(path) != 2 { | |
39 | // invalid entry, continue | |
40 | continue | |
41 | } | |
42 | if parts := expContainerID.FindStringSubmatch(path[1]); len(parts) == 2 { | |
43 | return parts[1] | |
44 | } | |
45 | } | |
46 | return "" | |
47 | } | |
48 | ||
49 | // readContainerID attempts to return the container ID from the provided file path or empty on failure. | |
50 | func readContainerID(fpath string) string { | |
51 | f, err := os.Open(fpath) | |
52 | if err != nil { | |
53 | return "" | |
54 | } | |
55 | defer f.Close() | |
56 | return parseContainerID(f) | |
57 | } | |
58 | ||
59 | // getContainerID returns the container ID configured at the client creation | |
60 | // It can either be auto-discovered with origin detection or provided by the user. | |
61 | // User-defined container ID is prioritized. | |
62 | func getContainerID() string { | |
63 | return containerID | |
64 | } | |
65 | ||
66 | var initOnce sync.Once | |
67 | ||
68 | // initContainerID initializes the container ID. | |
69 | // It can either be provided by the user or read from cgroups. | |
70 | func initContainerID(userProvidedID string, cgroupFallback bool) { | |
71 | initOnce.Do(func() { | |
72 | if userProvidedID != "" { | |
73 | containerID = userProvidedID | |
74 | return | |
75 | } | |
76 | ||
77 | if cgroupFallback { | |
78 | containerID = readContainerID(cgroupPath) | |
79 | } | |
80 | }) | |
81 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "io/ioutil" | |
5 | "os" | |
6 | "strings" | |
7 | "testing" | |
8 | ||
9 | "github.com/stretchr/testify/assert" | |
10 | ) | |
11 | ||
12 | func TestParseContainerID(t *testing.T) { | |
13 | for input, expectedResult := range map[string]string{ | |
14 | `other_line | |
15 | 10:hugetlb:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
16 | 9:cpuset:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
17 | 8:pids:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
18 | 7:freezer:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
19 | 6:cpu,cpuacct:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
20 | 5:perf_event:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
21 | 4:blkio:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
22 | 3:devices:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa | |
23 | 2:net_cls,net_prio:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa`: "8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa", | |
24 | "10:hugetlb:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa": "8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa", | |
25 | "10:hugetlb:/kubepods": "", | |
26 | "11:hugetlb:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da": "432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da", | |
27 | "1:name=systemd:/docker/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376": "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376", | |
28 | "1:name=systemd:/uuid/34dc0b5e-626f-2c5c-4c51-70e34b10e765": "34dc0b5e-626f-2c5c-4c51-70e34b10e765", | |
29 | "1:name=systemd:/ecs/34dc0b5e626f2c5c4c5170e34b10e765-1234567890": "34dc0b5e626f2c5c4c5170e34b10e765-1234567890", | |
30 | "1:name=systemd:/docker/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376.scope": "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376", | |
31 | `1:name=systemd:/nope | |
32 | 2:pids:/docker/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376 | |
33 | 3:cpu:/invalid`: "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376", | |
34 | } { | |
35 | id := parseContainerID(strings.NewReader(input)) | |
36 | assert.Equal(t, expectedResult, id) | |
37 | } | |
38 | } | |
39 | ||
40 | func TestReadContainerID(t *testing.T) { | |
41 | cid := "8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa" | |
42 | cgroupContents := "10:hugetlb:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/" + cid | |
43 | ||
44 | tmpFile, err := ioutil.TempFile(os.TempDir(), "fake-cgroup-") | |
45 | assert.NoError(t, err) | |
46 | ||
47 | defer os.Remove(tmpFile.Name()) | |
48 | ||
49 | _, err = io.WriteString(tmpFile, cgroupContents) | |
50 | assert.NoError(t, err) | |
51 | ||
52 | err = tmpFile.Close() | |
53 | assert.NoError(t, err) | |
54 | ||
55 | actualCID := readContainerID(tmpFile.Name()) | |
56 | assert.Equal(t, cid, actualCID) | |
57 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "os" | |
4 | "sort" | |
5 | "testing" | |
6 | "time" | |
7 | ||
8 | "github.com/stretchr/testify/assert" | |
9 | ) | |
10 | ||
11 | func TestPipelineWithGlobalTags(t *testing.T) { | |
12 | ts, client := newClientAndTestServer(t, | |
13 | "udp", | |
14 | "localhost:8765", | |
15 | []string{"tag1", "tag2"}, | |
16 | WithTags([]string{"tag1", "tag2"}), | |
17 | ) | |
18 | ||
19 | ts.sendAllAndAssert(t, client) | |
20 | } | |
21 | ||
22 | func TestKownEnvTags(t *testing.T) { | |
23 | entityIDEnvName := "DD_ENTITY_ID" | |
24 | ddEnvName := "DD_ENV" | |
25 | ddServiceName := "DD_SERVICE" | |
26 | ddVersionName := "DD_VERSION" | |
27 | ||
28 | defer func() { os.Unsetenv(entityIDEnvName) }() | |
29 | defer func() { os.Unsetenv(ddEnvName) }() | |
30 | defer func() { os.Unsetenv(ddServiceName) }() | |
31 | defer func() { os.Unsetenv(ddVersionName) }() | |
32 | ||
33 | os.Setenv(entityIDEnvName, "test_id") | |
34 | os.Setenv(ddEnvName, "test_env") | |
35 | os.Setenv(ddServiceName, "test_service") | |
36 | os.Setenv(ddVersionName, "test_version") | |
37 | ||
38 | expectedTags := []string{"dd.internal.entity_id:test_id", "env:test_env", "service:test_service", "version:test_version"} | |
39 | ts, client := newClientAndTestServer(t, | |
40 | "udp", | |
41 | "localhost:8765", | |
42 | expectedTags, | |
43 | ) | |
44 | ||
45 | sort.Strings(client.tags) | |
46 | assert.Equal(t, expectedTags, client.tags) | |
47 | ts.sendAllAndAssert(t, client) | |
48 | } | |
49 | ||
50 | func TestKnownEnvTagsWithCustomTags(t *testing.T) { | |
51 | entityIDEnvName := "DD_ENTITY_ID" | |
52 | ddEnvName := "DD_ENV" | |
53 | ddServiceName := "DD_SERVICE" | |
54 | ddVersionName := "DD_VERSION" | |
55 | ||
56 | defer func() { os.Unsetenv(entityIDEnvName) }() | |
57 | defer func() { os.Unsetenv(ddEnvName) }() | |
58 | defer func() { os.Unsetenv(ddServiceName) }() | |
59 | defer func() { os.Unsetenv(ddVersionName) }() | |
60 | ||
61 | os.Setenv(entityIDEnvName, "test_id") | |
62 | os.Setenv(ddEnvName, "test_env") | |
63 | os.Setenv(ddServiceName, "test_service") | |
64 | os.Setenv(ddVersionName, "test_version") | |
65 | ||
66 | expectedTags := []string{"tag1", "tag2", "dd.internal.entity_id:test_id", "env:test_env", | |
67 | "service:test_service", "version:test_version"} | |
68 | ts, client := newClientAndTestServer(t, | |
69 | "udp", | |
70 | "localhost:8765", | |
71 | expectedTags, | |
72 | WithTags([]string{"tag1", "tag2"}), | |
73 | ) | |
74 | ||
75 | ts.sendAllAndAssert(t, client) | |
76 | ||
77 | sort.Strings(expectedTags) | |
78 | sort.Strings(client.tags) | |
79 | assert.Equal(t, expectedTags, client.tags) | |
80 | } | |
81 | ||
82 | func TestKnownEnvTagsEmptyString(t *testing.T) { | |
83 | entityIDEnvName := "DD_ENTITY_ID" | |
84 | ddEnvName := "DD_ENV" | |
85 | ddServiceName := "DD_SERVICE" | |
86 | ddVersionName := "DD_VERSION" | |
87 | ||
88 | defer func() { os.Unsetenv(entityIDEnvName) }() | |
89 | defer func() { os.Unsetenv(ddEnvName) }() | |
90 | defer func() { os.Unsetenv(ddServiceName) }() | |
91 | defer func() { os.Unsetenv(ddVersionName) }() | |
92 | ||
93 | os.Setenv(entityIDEnvName, "") | |
94 | os.Setenv(ddEnvName, "") | |
95 | os.Setenv(ddServiceName, "") | |
96 | os.Setenv(ddVersionName, "") | |
97 | ||
98 | ts, client := newClientAndTestServer(t, | |
99 | "udp", | |
100 | "localhost:8765", | |
101 | nil, | |
102 | ) | |
103 | ||
104 | assert.Len(t, client.tags, 0) | |
105 | ts.sendAllAndAssert(t, client) | |
106 | } | |
107 | ||
108 | func TestContainerIDWithEntityID(t *testing.T) { | |
109 | resetContainerID() | |
110 | ||
111 | entityIDEnvName := "DD_ENTITY_ID" | |
112 | defer func() { os.Unsetenv(entityIDEnvName) }() | |
113 | os.Setenv(entityIDEnvName, "pod-uid") | |
114 | ||
115 | expectedTags := []string{"dd.internal.entity_id:pod-uid"} | |
116 | ts, client := newClientAndTestServer(t, | |
117 | "udp", | |
118 | "localhost:8765", | |
119 | expectedTags, | |
120 | WithContainerID("fake-container-id"), | |
121 | ) | |
122 | ||
123 | sort.Strings(client.tags) | |
124 | assert.Equal(t, expectedTags, client.tags) | |
125 | ts.assertContainerID(t, "") | |
126 | ts.sendAllAndAssert(t, client) | |
127 | } | |
128 | ||
129 | func TestContainerIDWithoutEntityID(t *testing.T) { | |
130 | resetContainerID() | |
131 | os.Unsetenv("DD_ENTITY_ID") | |
132 | ||
133 | ts, client := newClientAndTestServer(t, | |
134 | "udp", | |
135 | "localhost:8765", | |
136 | []string{}, | |
137 | WithContainerID("fake-container-id"), | |
138 | ) | |
139 | ||
140 | ts.assertContainerID(t, "fake-container-id") | |
141 | ts.sendAllAndAssert(t, client) | |
142 | } | |
143 | ||
144 | func TestOriginDetectionDisabled(t *testing.T) { | |
145 | resetContainerID() | |
146 | os.Unsetenv("DD_ENTITY_ID") | |
147 | ||
148 | originDetectionEnvName := "DD_ORIGIN_DETECTION_ENABLED" | |
149 | defer func() { os.Unsetenv(originDetectionEnvName) }() | |
150 | os.Setenv(originDetectionEnvName, "false") | |
151 | ||
152 | ts, client := newClientAndTestServer(t, | |
153 | "udp", | |
154 | "localhost:8765", | |
155 | []string{}, | |
156 | ) | |
157 | ||
158 | ts.assertContainerID(t, "") | |
159 | ts.sendAllAndAssert(t, client) | |
160 | } | |
161 | ||
162 | func TestOriginDetectionEnabledWithEntityID(t *testing.T) { | |
163 | resetContainerID() | |
164 | ||
165 | entityIDEnvName := "DD_ENTITY_ID" | |
166 | defer func() { os.Unsetenv(entityIDEnvName) }() | |
167 | os.Setenv(entityIDEnvName, "pod-uid") | |
168 | ||
169 | originDetectionEnvName := "DD_ORIGIN_DETECTION_ENABLED" | |
170 | defer func() { os.Unsetenv(originDetectionEnvName) }() | |
171 | os.Setenv(originDetectionEnvName, "true") | |
172 | ||
173 | expectedTags := []string{"dd.internal.entity_id:pod-uid"} | |
174 | ts, client := newClientAndTestServer(t, | |
175 | "udp", | |
176 | "localhost:8765", | |
177 | expectedTags, | |
178 | WithContainerID("fake-container-id"), | |
179 | ) | |
180 | ||
181 | sort.Strings(client.tags) | |
182 | assert.Equal(t, expectedTags, client.tags) | |
183 | ts.assertContainerID(t, "") | |
184 | ts.sendAllAndAssert(t, client) | |
185 | } | |
186 | ||
187 | func TestPipelineWithGlobalTagsAndEnv(t *testing.T) { | |
188 | orig := os.Getenv("DD_ENV") | |
189 | os.Setenv("DD_ENV", "test") | |
190 | defer os.Setenv("DD_ENV", orig) | |
191 | ||
192 | ts, client := newClientAndTestServer(t, | |
193 | "udp", | |
194 | "localhost:8765", | |
195 | []string{"tag1", "tag2", "env:test"}, | |
196 | WithTags([]string{"tag1", "tag2"}), | |
197 | ) | |
198 | ||
199 | ts.sendAllAndAssert(t, client) | |
200 | } | |
201 | ||
202 | func TestTelemetryAllOptions(t *testing.T) { | |
203 | orig := os.Getenv("DD_ENV") | |
204 | os.Setenv("DD_ENV", "test") | |
205 | defer os.Setenv("DD_ENV", orig) | |
206 | ||
207 | ts, client := newClientAndTestServer(t, | |
208 | "udp", | |
209 | "localhost:8765", | |
210 | []string{"tag1", "tag2", "env:test"}, | |
211 | WithExtendedClientSideAggregation(), | |
212 | WithTags([]string{"tag1", "tag2"}), | |
213 | WithNamespace("test_namespace"), | |
214 | ) | |
215 | ||
216 | ts.sendAllAndAssert(t, client) | |
217 | } | |
218 | ||
219 | type testCase struct { | |
220 | opt []Option | |
221 | testFunc func(*testing.T, *testServer, *Client) | |
222 | } | |
223 | ||
224 | func getTestMap() map[string]testCase { | |
225 | return map[string]testCase{ | |
226 | "Default": testCase{ | |
227 | []Option{}, | |
228 | func(t *testing.T, ts *testServer, client *Client) { | |
229 | ts.sendAllAndAssert(t, client) | |
230 | }, | |
231 | }, | |
232 | "Default without aggregation": testCase{ | |
233 | []Option{ | |
234 | WithoutClientSideAggregation(), | |
235 | }, | |
236 | func(t *testing.T, ts *testServer, client *Client) { | |
237 | ts.sendAllAndAssert(t, client) | |
238 | }, | |
239 | }, | |
240 | "With namespace": testCase{ | |
241 | []Option{ | |
242 | WithNamespace("test_namespace"), | |
243 | }, | |
244 | func(t *testing.T, ts *testServer, client *Client) { | |
245 | ts.sendAllAndAssert(t, client) | |
246 | }, | |
247 | }, | |
248 | "With namespace dot": testCase{ | |
249 | []Option{ | |
250 | WithNamespace("test_namespace."), | |
251 | }, | |
252 | func(t *testing.T, ts *testServer, client *Client) { | |
253 | ts.sendAllAndAssert(t, client) | |
254 | }, | |
255 | }, | |
256 | "With max messages per payload": testCase{ | |
257 | []Option{ | |
258 | WithMaxMessagesPerPayload(5), | |
259 | // Make sure we hit the maxMessagesPerPayload before hitting the flush timeout | |
260 | WithBufferFlushInterval(3 * time.Second), | |
261 | WithWorkersCount(1), | |
262 | }, | |
263 | func(t *testing.T, ts *testServer, client *Client) { | |
264 | ts.sendAllAndAssert(t, client) | |
265 | // We send 4 non aggregated metrics, 1 service_check and 1 event. So 2 reads (5 items per | |
266 | // payload). Then we flush the aggregator that will send 5 metrics, so 1 read. Finally, | |
267 | // the telemetry is 18 metrics flushed at a different time so 4 more payload for a | |
268 | // total of 8 reads on the network | |
269 | ts.assertNbRead(t, 8) | |
270 | }, | |
271 | }, | |
272 | "With max messages per payload + WithoutClientSideAggregation": testCase{ | |
273 | []Option{ | |
274 | WithMaxMessagesPerPayload(5), | |
275 | // Make sure we hit the maxMessagesPerPayload before hitting the flush timeout | |
276 | WithBufferFlushInterval(3 * time.Second), | |
277 | WithoutClientSideAggregation(), | |
278 | WithWorkersCount(1), | |
279 | }, | |
280 | func(t *testing.T, ts *testServer, client *Client) { | |
281 | ts.sendAllAndAssert(t, client) | |
282 | // We send 9 non aggregated metrics, 1 service_check and 1 event. So 3 reads (5 items | |
283 | // per payload). Then the telemetry is 18 metrics flushed at a different time so 4 more | |
284 | // payload for a total of 8 reads on the network | |
285 | ts.assertNbRead(t, 7) | |
286 | }, | |
287 | }, | |
288 | "ChannelMode without client side aggregation": testCase{ | |
289 | []Option{ | |
290 | WithoutClientSideAggregation(), | |
291 | WithChannelMode(), | |
292 | }, | |
293 | func(t *testing.T, ts *testServer, client *Client) { | |
294 | ts.sendAllAndAssert(t, client) | |
295 | }, | |
296 | }, | |
297 | "Basic client side aggregation": testCase{ | |
298 | []Option{}, | |
299 | func(t *testing.T, ts *testServer, client *Client) { | |
300 | expectedMetrics := ts.sendAllMetricsForBasicAggregation(client) | |
301 | ts.assert(t, client, expectedMetrics) | |
302 | }, | |
303 | }, | |
304 | "Extended client side aggregation": testCase{ | |
305 | []Option{ | |
306 | WithExtendedClientSideAggregation(), | |
307 | }, | |
308 | func(t *testing.T, ts *testServer, client *Client) { | |
309 | expectedMetrics := ts.sendAllMetricsForExtendedAggregation(client) | |
310 | ts.assert(t, client, expectedMetrics) | |
311 | }, | |
312 | }, | |
313 | "Basic client side aggregation + ChannelMode": testCase{ | |
314 | []Option{ | |
315 | WithChannelMode(), | |
316 | }, | |
317 | func(t *testing.T, ts *testServer, client *Client) { | |
318 | expectedMetrics := ts.sendAllMetricsForBasicAggregation(client) | |
319 | ts.assert(t, client, expectedMetrics) | |
320 | }, | |
321 | }, | |
322 | "Extended client side aggregation + ChannelMode": testCase{ | |
323 | []Option{ | |
324 | WithExtendedClientSideAggregation(), | |
325 | WithChannelMode(), | |
326 | }, | |
327 | func(t *testing.T, ts *testServer, client *Client) { | |
328 | expectedMetrics := ts.sendAllMetricsForExtendedAggregation(client) | |
329 | ts.assert(t, client, expectedMetrics) | |
330 | }, | |
331 | }, | |
332 | } | |
333 | } | |
334 | ||
335 | func TestFullPipelineUDP(t *testing.T) { | |
336 | for testName, c := range getTestMap() { | |
337 | t.Run(testName, func(t *testing.T) { | |
338 | ts, client := newClientAndTestServer(t, | |
339 | "udp", | |
340 | "localhost:8765", | |
341 | nil, | |
342 | c.opt..., | |
343 | ) | |
344 | c.testFunc(t, ts, client) | |
345 | }) | |
346 | } | |
347 | } |
0 | // +build !windows | |
1 | ||
2 | package statsd | |
3 | ||
4 | import ( | |
5 | "fmt" | |
6 | "math/rand" | |
7 | "os" | |
8 | "testing" | |
9 | ) | |
10 | ||
11 | func TestFullPipelineUDS(t *testing.T) { | |
12 | for testName, c := range getTestMap() { | |
13 | socketPath := fmt.Sprintf("/tmp/dsd_%d.socket", rand.Int()) | |
14 | t.Run(testName, func(t *testing.T) { | |
15 | ts, client := newClientAndTestServer(t, | |
16 | "uds", | |
17 | "unix://"+socketPath, | |
18 | nil, | |
19 | c.opt..., | |
20 | ) | |
21 | c.testFunc(t, ts, client) | |
22 | }) | |
23 | os.Remove(socketPath) | |
24 | } | |
25 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "time" | |
5 | ) | |
6 | ||
7 | // Events support | |
8 | // EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 | |
9 | // The reason why they got exported is so that client code can directly use the types. | |
10 | ||
11 | // EventAlertType is the alert type for events | |
12 | type EventAlertType string | |
13 | ||
14 | const ( | |
15 | // Info is the "info" AlertType for events | |
16 | Info EventAlertType = "info" | |
17 | // Error is the "error" AlertType for events | |
18 | Error EventAlertType = "error" | |
19 | // Warning is the "warning" AlertType for events | |
20 | Warning EventAlertType = "warning" | |
21 | // Success is the "success" AlertType for events | |
22 | Success EventAlertType = "success" | |
23 | ) | |
24 | ||
25 | // EventPriority is the event priority for events | |
26 | type EventPriority string | |
27 | ||
28 | const ( | |
29 | // Normal is the "normal" Priority for events | |
30 | Normal EventPriority = "normal" | |
31 | // Low is the "low" Priority for events | |
32 | Low EventPriority = "low" | |
33 | ) | |
34 | ||
35 | // An Event is an object that can be posted to your DataDog event stream. | |
36 | type Event struct { | |
37 | // Title of the event. Required. | |
38 | Title string | |
39 | // Text is the description of the event. | |
40 | Text string | |
41 | // Timestamp is a timestamp for the event. If not provided, the dogstatsd | |
42 | // server will set this to the current time. | |
43 | Timestamp time.Time | |
44 | // Hostname for the event. | |
45 | Hostname string | |
46 | // AggregationKey groups this event with others of the same key. | |
47 | AggregationKey string | |
48 | // Priority of the event. Can be statsd.Low or statsd.Normal. | |
49 | Priority EventPriority | |
50 | // SourceTypeName is a source type for the event. | |
51 | SourceTypeName string | |
52 | // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. | |
53 | // If absent, the default value applied by the dogstatsd server is Info. | |
54 | AlertType EventAlertType | |
55 | // Tags for the event. | |
56 | Tags []string | |
57 | } | |
58 | ||
59 | // NewEvent creates a new event with the given title and text. Error checking | |
60 | // against these values is done at send-time, or upon running e.Check. | |
61 | func NewEvent(title, text string) *Event { | |
62 | return &Event{ | |
63 | Title: title, | |
64 | Text: text, | |
65 | } | |
66 | } | |
67 | ||
68 | // Check verifies that an event is valid. | |
69 | func (e *Event) Check() error { | |
70 | if len(e.Title) == 0 { | |
71 | return fmt.Errorf("statsd.Event title is required") | |
72 | } | |
73 | return nil | |
74 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | ||
5 | "github.com/stretchr/testify/assert" | |
6 | "github.com/stretchr/testify/require" | |
7 | ) | |
8 | ||
9 | func encodeEvent(e *Event) (string, error) { | |
10 | err := e.Check() | |
11 | if err != nil { | |
12 | return "", err | |
13 | } | |
14 | var buffer []byte | |
15 | buffer = appendEvent(buffer, e, nil) | |
16 | return string(buffer), nil | |
17 | } | |
18 | ||
19 | func TestEventEncode(t *testing.T) { | |
20 | matrix := []struct { | |
21 | event *Event | |
22 | encoded string | |
23 | }{ | |
24 | { | |
25 | NewEvent("Hello", "Something happened to my event"), | |
26 | `_e{5,30}:Hello|Something happened to my event`, | |
27 | }, { | |
28 | &Event{Title: "hi", Text: "okay", AggregationKey: "foo"}, | |
29 | `_e{2,4}:hi|okay|k:foo`, | |
30 | }, { | |
31 | &Event{Title: "hi", Text: "okay", AggregationKey: "foo", AlertType: Info}, | |
32 | `_e{2,4}:hi|okay|k:foo|t:info`, | |
33 | }, { | |
34 | &Event{Title: "hi", Text: "w/e", AlertType: Error, Priority: Normal}, | |
35 | `_e{2,3}:hi|w/e|p:normal|t:error`, | |
36 | }, { | |
37 | &Event{Title: "hi", Text: "uh", Tags: []string{"host:foo", "app:bar"}}, | |
38 | `_e{2,2}:hi|uh|#host:foo,app:bar`, | |
39 | }, { | |
40 | &Event{Title: "hi", Text: "line1\nline2", Tags: []string{"hello\nworld"}}, | |
41 | `_e{2,12}:hi|line1\nline2|#helloworld`, | |
42 | }, | |
43 | } | |
44 | ||
45 | for _, m := range matrix { | |
46 | r, err := encodeEvent(m.event) | |
47 | require.NoError(t, err) | |
48 | assert.Equal(t, r, m.encoded) | |
49 | } | |
50 | } | |
51 | ||
52 | func TestNewEventTitleMissing(t *testing.T) { | |
53 | e := NewEvent("", "hi") | |
54 | _, err := encodeEvent(e) | |
55 | require.Error(t, err) | |
56 | assert.Equal(t, "statsd.Event title is required", err.Error()) | |
57 | } | |
58 | ||
59 | func TestNewEvent(t *testing.T) { | |
60 | e := NewEvent("hello", "world") | |
61 | e.Tags = []string{"tag1", "tag2"} | |
62 | eventEncoded, err := encodeEvent(e) | |
63 | require.NoError(t, err) | |
64 | assert.Equal(t, "_e{5,5}:hello|world|#tag1,tag2", eventEncoded) | |
65 | assert.Len(t, e.Tags, 2) | |
66 | } | |
67 | ||
68 | func TestNewEventTagsAppend(t *testing.T) { | |
69 | e := NewEvent("hello", "world") | |
70 | e.Tags = append(e.Tags, "tag1", "tag2") | |
71 | eventEncoded, err := encodeEvent(e) | |
72 | require.NoError(t, err) | |
73 | assert.Equal(t, "_e{5,5}:hello|world|#tag1,tag2", eventEncoded) | |
74 | assert.Len(t, e.Tags, 2) | |
75 | } | |
76 | ||
77 | func TestNewEventEmptyText(t *testing.T) { | |
78 | e := NewEvent("hello", "") | |
79 | e.Tags = append(e.Tags, "tag1", "tag2") | |
80 | eventEncoded, err := encodeEvent(e) | |
81 | require.NoError(t, err) | |
82 | assert.Equal(t, "_e{5,0}:hello||#tag1,tag2", eventEncoded) | |
83 | assert.Len(t, e.Tags, 2) | |
84 | } |
0 | package statsd | |
1 | ||
2 | const ( | |
3 | // FNV-1a | |
4 | offset32 = uint32(2166136261) | |
5 | prime32 = uint32(16777619) | |
6 | ||
7 | // init32 is what 32 bits hash values should be initialized with. | |
8 | init32 = offset32 | |
9 | ) | |
10 | ||
11 | // HashString32 returns the hash of s. | |
12 | func hashString32(s string) uint32 { | |
13 | return addString32(init32, s) | |
14 | } | |
15 | ||
16 | // AddString32 adds the hash of s to the precomputed hash value h. | |
17 | func addString32(h uint32, s string) uint32 { | |
18 | i := 0 | |
19 | n := (len(s) / 8) * 8 | |
20 | ||
21 | for i != n { | |
22 | h = (h ^ uint32(s[i])) * prime32 | |
23 | h = (h ^ uint32(s[i+1])) * prime32 | |
24 | h = (h ^ uint32(s[i+2])) * prime32 | |
25 | h = (h ^ uint32(s[i+3])) * prime32 | |
26 | h = (h ^ uint32(s[i+4])) * prime32 | |
27 | h = (h ^ uint32(s[i+5])) * prime32 | |
28 | h = (h ^ uint32(s[i+6])) * prime32 | |
29 | h = (h ^ uint32(s[i+7])) * prime32 | |
30 | i += 8 | |
31 | } | |
32 | ||
33 | for _, c := range s[i:] { | |
34 | h = (h ^ uint32(c)) * prime32 | |
35 | } | |
36 | ||
37 | return h | |
38 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "strconv" | |
4 | "strings" | |
5 | ) | |
6 | ||
7 | var ( | |
8 | gaugeSymbol = []byte("g") | |
9 | countSymbol = []byte("c") | |
10 | histogramSymbol = []byte("h") | |
11 | distributionSymbol = []byte("d") | |
12 | setSymbol = []byte("s") | |
13 | timingSymbol = []byte("ms") | |
14 | tagSeparatorSymbol = "," | |
15 | nameSeparatorSymbol = ":" | |
16 | ) | |
17 | ||
18 | func appendHeader(buffer []byte, namespace string, name string) []byte { | |
19 | if namespace != "" { | |
20 | buffer = append(buffer, namespace...) | |
21 | } | |
22 | buffer = append(buffer, name...) | |
23 | buffer = append(buffer, ':') | |
24 | return buffer | |
25 | } | |
26 | ||
27 | func appendRate(buffer []byte, rate float64) []byte { | |
28 | if rate < 1 { | |
29 | buffer = append(buffer, "|@"...) | |
30 | buffer = strconv.AppendFloat(buffer, rate, 'f', -1, 64) | |
31 | } | |
32 | return buffer | |
33 | } | |
34 | ||
35 | func appendWithoutNewlines(buffer []byte, s string) []byte { | |
36 | // fastpath for strings without newlines | |
37 | if strings.IndexByte(s, '\n') == -1 { | |
38 | return append(buffer, s...) | |
39 | } | |
40 | ||
41 | for _, b := range []byte(s) { | |
42 | if b != '\n' { | |
43 | buffer = append(buffer, b) | |
44 | } | |
45 | } | |
46 | return buffer | |
47 | } | |
48 | ||
49 | func appendTags(buffer []byte, globalTags []string, tags []string) []byte { | |
50 | if len(globalTags) == 0 && len(tags) == 0 { | |
51 | return buffer | |
52 | } | |
53 | buffer = append(buffer, "|#"...) | |
54 | firstTag := true | |
55 | ||
56 | for _, tag := range globalTags { | |
57 | if !firstTag { | |
58 | buffer = append(buffer, tagSeparatorSymbol...) | |
59 | } | |
60 | buffer = appendWithoutNewlines(buffer, tag) | |
61 | firstTag = false | |
62 | } | |
63 | for _, tag := range tags { | |
64 | if !firstTag { | |
65 | buffer = append(buffer, tagSeparatorSymbol...) | |
66 | } | |
67 | buffer = appendWithoutNewlines(buffer, tag) | |
68 | firstTag = false | |
69 | } | |
70 | return buffer | |
71 | } | |
72 | ||
73 | func appendTagsAggregated(buffer []byte, globalTags []string, tags string) []byte { | |
74 | if len(globalTags) == 0 && tags == "" { | |
75 | return buffer | |
76 | } | |
77 | ||
78 | buffer = append(buffer, "|#"...) | |
79 | firstTag := true | |
80 | ||
81 | for _, tag := range globalTags { | |
82 | if !firstTag { | |
83 | buffer = append(buffer, tagSeparatorSymbol...) | |
84 | } | |
85 | buffer = appendWithoutNewlines(buffer, tag) | |
86 | firstTag = false | |
87 | } | |
88 | if tags != "" { | |
89 | if !firstTag { | |
90 | buffer = append(buffer, tagSeparatorSymbol...) | |
91 | } | |
92 | buffer = appendWithoutNewlines(buffer, tags) | |
93 | } | |
94 | return buffer | |
95 | } | |
96 | ||
97 | func appendFloatMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64, precision int) []byte { | |
98 | buffer = appendHeader(buffer, namespace, name) | |
99 | buffer = strconv.AppendFloat(buffer, value, 'f', precision, 64) | |
100 | buffer = append(buffer, '|') | |
101 | buffer = append(buffer, typeSymbol...) | |
102 | buffer = appendRate(buffer, rate) | |
103 | buffer = appendTags(buffer, globalTags, tags) | |
104 | buffer = appendContainerID(buffer) | |
105 | return buffer | |
106 | } | |
107 | ||
108 | func appendIntegerMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte { | |
109 | buffer = appendHeader(buffer, namespace, name) | |
110 | buffer = strconv.AppendInt(buffer, value, 10) | |
111 | buffer = append(buffer, '|') | |
112 | buffer = append(buffer, typeSymbol...) | |
113 | buffer = appendRate(buffer, rate) | |
114 | buffer = appendTags(buffer, globalTags, tags) | |
115 | buffer = appendContainerID(buffer) | |
116 | return buffer | |
117 | } | |
118 | ||
119 | func appendStringMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte { | |
120 | buffer = appendHeader(buffer, namespace, name) | |
121 | buffer = append(buffer, value...) | |
122 | buffer = append(buffer, '|') | |
123 | buffer = append(buffer, typeSymbol...) | |
124 | buffer = appendRate(buffer, rate) | |
125 | buffer = appendTags(buffer, globalTags, tags) | |
126 | buffer = appendContainerID(buffer) | |
127 | return buffer | |
128 | } | |
129 | ||
130 | func appendGauge(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { | |
131 | return appendFloatMetric(buffer, gaugeSymbol, namespace, globalTags, name, value, tags, rate, -1) | |
132 | } | |
133 | ||
134 | func appendCount(buffer []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte { | |
135 | return appendIntegerMetric(buffer, countSymbol, namespace, globalTags, name, value, tags, rate) | |
136 | } | |
137 | ||
138 | func appendHistogram(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { | |
139 | return appendFloatMetric(buffer, histogramSymbol, namespace, globalTags, name, value, tags, rate, -1) | |
140 | } | |
141 | ||
142 | func appendDistribution(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { | |
143 | return appendFloatMetric(buffer, distributionSymbol, namespace, globalTags, name, value, tags, rate, -1) | |
144 | } | |
145 | ||
146 | func appendSet(buffer []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte { | |
147 | return appendStringMetric(buffer, setSymbol, namespace, globalTags, name, value, tags, rate) | |
148 | } | |
149 | ||
150 | func appendTiming(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte { | |
151 | return appendFloatMetric(buffer, timingSymbol, namespace, globalTags, name, value, tags, rate, 6) | |
152 | } | |
153 | ||
154 | func escapedEventTextLen(text string) int { | |
155 | return len(text) + strings.Count(text, "\n") | |
156 | } | |
157 | ||
158 | func appendEscapedEventText(buffer []byte, text string) []byte { | |
159 | for _, b := range []byte(text) { | |
160 | if b != '\n' { | |
161 | buffer = append(buffer, b) | |
162 | } else { | |
163 | buffer = append(buffer, "\\n"...) | |
164 | } | |
165 | } | |
166 | return buffer | |
167 | } | |
168 | ||
169 | func appendEvent(buffer []byte, event *Event, globalTags []string) []byte { | |
170 | escapedTextLen := escapedEventTextLen(event.Text) | |
171 | ||
172 | buffer = append(buffer, "_e{"...) | |
173 | buffer = strconv.AppendInt(buffer, int64(len(event.Title)), 10) | |
174 | buffer = append(buffer, tagSeparatorSymbol...) | |
175 | buffer = strconv.AppendInt(buffer, int64(escapedTextLen), 10) | |
176 | buffer = append(buffer, "}:"...) | |
177 | buffer = append(buffer, event.Title...) | |
178 | buffer = append(buffer, '|') | |
179 | if escapedTextLen != len(event.Text) { | |
180 | buffer = appendEscapedEventText(buffer, event.Text) | |
181 | } else { | |
182 | buffer = append(buffer, event.Text...) | |
183 | } | |
184 | ||
185 | if !event.Timestamp.IsZero() { | |
186 | buffer = append(buffer, "|d:"...) | |
187 | buffer = strconv.AppendInt(buffer, int64(event.Timestamp.Unix()), 10) | |
188 | } | |
189 | ||
190 | if len(event.Hostname) != 0 { | |
191 | buffer = append(buffer, "|h:"...) | |
192 | buffer = append(buffer, event.Hostname...) | |
193 | } | |
194 | ||
195 | if len(event.AggregationKey) != 0 { | |
196 | buffer = append(buffer, "|k:"...) | |
197 | buffer = append(buffer, event.AggregationKey...) | |
198 | } | |
199 | ||
200 | if len(event.Priority) != 0 { | |
201 | buffer = append(buffer, "|p:"...) | |
202 | buffer = append(buffer, event.Priority...) | |
203 | } | |
204 | ||
205 | if len(event.SourceTypeName) != 0 { | |
206 | buffer = append(buffer, "|s:"...) | |
207 | buffer = append(buffer, event.SourceTypeName...) | |
208 | } | |
209 | ||
210 | if len(event.AlertType) != 0 { | |
211 | buffer = append(buffer, "|t:"...) | |
212 | buffer = append(buffer, string(event.AlertType)...) | |
213 | } | |
214 | ||
215 | buffer = appendTags(buffer, globalTags, event.Tags) | |
216 | buffer = appendContainerID(buffer) | |
217 | return buffer | |
218 | } | |
219 | ||
220 | func appendEscapedServiceCheckText(buffer []byte, text string) []byte { | |
221 | for i := 0; i < len(text); i++ { | |
222 | if text[i] == '\n' { | |
223 | buffer = append(buffer, "\\n"...) | |
224 | } else if text[i] == 'm' && i+1 < len(text) && text[i+1] == ':' { | |
225 | buffer = append(buffer, "m\\:"...) | |
226 | i++ | |
227 | } else { | |
228 | buffer = append(buffer, text[i]) | |
229 | } | |
230 | } | |
231 | return buffer | |
232 | } | |
233 | ||
234 | func appendServiceCheck(buffer []byte, serviceCheck *ServiceCheck, globalTags []string) []byte { | |
235 | buffer = append(buffer, "_sc|"...) | |
236 | buffer = append(buffer, serviceCheck.Name...) | |
237 | buffer = append(buffer, '|') | |
238 | buffer = strconv.AppendInt(buffer, int64(serviceCheck.Status), 10) | |
239 | ||
240 | if !serviceCheck.Timestamp.IsZero() { | |
241 | buffer = append(buffer, "|d:"...) | |
242 | buffer = strconv.AppendInt(buffer, int64(serviceCheck.Timestamp.Unix()), 10) | |
243 | } | |
244 | ||
245 | if len(serviceCheck.Hostname) != 0 { | |
246 | buffer = append(buffer, "|h:"...) | |
247 | buffer = append(buffer, serviceCheck.Hostname...) | |
248 | } | |
249 | ||
250 | buffer = appendTags(buffer, globalTags, serviceCheck.Tags) | |
251 | ||
252 | if len(serviceCheck.Message) != 0 { | |
253 | buffer = append(buffer, "|m:"...) | |
254 | buffer = appendEscapedServiceCheckText(buffer, serviceCheck.Message) | |
255 | } | |
256 | ||
257 | buffer = appendContainerID(buffer) | |
258 | return buffer | |
259 | } | |
260 | ||
261 | func appendSeparator(buffer []byte) []byte { | |
262 | return append(buffer, '\n') | |
263 | } | |
264 | ||
265 | func appendContainerID(buffer []byte) []byte { | |
266 | if containerID := getContainerID(); len(containerID) > 0 { | |
267 | buffer = append(buffer, "|c:"...) | |
268 | buffer = append(buffer, containerID...) | |
269 | } | |
270 | return buffer | |
271 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "testing" | |
5 | "time" | |
6 | ) | |
7 | ||
8 | var payloadSink []byte | |
9 | ||
10 | func benchmarkFormat(b *testing.B, tagsNumber int) { | |
11 | payloadSink = make([]byte, 0, 1024*8) | |
12 | var tags []string | |
13 | for i := 0; i < tagsNumber; i++ { | |
14 | tags = append(tags, fmt.Sprintf("tag%d:tag%d\n", i, i)) | |
15 | } | |
16 | event := &Event{ | |
17 | Title: "EvenTitle", | |
18 | Text: "EventText", | |
19 | Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC), | |
20 | Hostname: "hostname", | |
21 | AggregationKey: "aggregationKey", | |
22 | Priority: "priority", | |
23 | SourceTypeName: "SourceTypeName", | |
24 | AlertType: "alertType", | |
25 | Tags: tags, | |
26 | } | |
27 | serviceCheck := &ServiceCheck{ | |
28 | Name: "service.check", | |
29 | Status: Ok, | |
30 | Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC), | |
31 | Hostname: "hostname", | |
32 | Message: "message", | |
33 | Tags: []string{"tag1:tag1"}, | |
34 | } | |
35 | b.ResetTimer() | |
36 | for n := 0; n < b.N; n++ { | |
37 | payloadSink = appendGauge(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1) | |
38 | payloadSink = appendCount(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1) | |
39 | payloadSink = appendHistogram(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1) | |
40 | payloadSink = appendDistribution(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1) | |
41 | payloadSink = appendSet(payloadSink[:0], "namespace", []string{}, "metric", "setelement", tags, 0.1) | |
42 | payloadSink = appendTiming(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1) | |
43 | payloadSink = appendEvent(payloadSink[:0], event, []string{}) | |
44 | payloadSink = appendServiceCheck(payloadSink[:0], serviceCheck, []string{}) | |
45 | } | |
46 | } | |
47 | ||
48 | func BenchmarkFormat0(b *testing.B) { benchmarkFormat(b, 0) } | |
49 | func BenchmarkFormat1(b *testing.B) { benchmarkFormat(b, 1) } | |
50 | func BenchmarkFormat5(b *testing.B) { benchmarkFormat(b, 5) } | |
51 | func BenchmarkFormat10(b *testing.B) { benchmarkFormat(b, 10) } | |
52 | func BenchmarkFormat50(b *testing.B) { benchmarkFormat(b, 50) } | |
53 | func BenchmarkFormat100(b *testing.B) { benchmarkFormat(b, 100) } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | "time" | |
5 | ||
6 | "github.com/stretchr/testify/assert" | |
7 | ) | |
8 | ||
9 | func TestFormatAppendTags(t *testing.T) { | |
10 | var buffer []byte | |
11 | buffer = appendTags(buffer, []string{"global:tag"}, []string{"tag:tag", "tag2:tag2"}) | |
12 | assert.Equal(t, `|#global:tag,tag:tag,tag2:tag2`, string(buffer)) | |
13 | ||
14 | var buffer2 []byte | |
15 | buffer2 = appendTags(buffer2, []string{"global:tag"}, nil) | |
16 | assert.Equal(t, `|#global:tag`, string(buffer2)) | |
17 | ||
18 | var buffer3 []byte | |
19 | buffer3 = appendTags(buffer3, nil, []string{"tag:tag", "tag2:tag2"}) | |
20 | assert.Equal(t, `|#tag:tag,tag2:tag2`, string(buffer3)) | |
21 | ||
22 | var buffer4 []byte | |
23 | buffer4 = appendTags(buffer4, nil, nil) | |
24 | assert.Equal(t, "", string(buffer4)) | |
25 | } | |
26 | ||
27 | func TestFormatAppendTagsAggregated(t *testing.T) { | |
28 | var buffer []byte | |
29 | buffer = appendTagsAggregated(buffer, []string{"global:tag"}, "tag:tag,tag2:tag2") | |
30 | assert.Equal(t, `|#global:tag,tag:tag,tag2:tag2`, string(buffer)) | |
31 | ||
32 | var buffer2 []byte | |
33 | buffer2 = appendTagsAggregated(buffer2, []string{"global:tag"}, "") | |
34 | assert.Equal(t, `|#global:tag`, string(buffer2)) | |
35 | ||
36 | var buffer3 []byte | |
37 | buffer3 = appendTagsAggregated(buffer3, nil, "tag:tag,tag2:tag2") | |
38 | assert.Equal(t, `|#tag:tag,tag2:tag2`, string(buffer3)) | |
39 | ||
40 | var buffer4 []byte | |
41 | buffer4 = appendTagsAggregated(buffer4, nil, "") | |
42 | assert.Equal(t, "", string(buffer4)) | |
43 | } | |
44 | ||
45 | func TestFormatAppendGauge(t *testing.T) { | |
46 | var buffer []byte | |
47 | buffer = appendGauge(buffer, "namespace.", []string{"global:tag"}, "gauge", 1., []string{"tag:tag"}, 1) | |
48 | assert.Equal(t, `namespace.gauge:1|g|#global:tag,tag:tag`, string(buffer)) | |
49 | } | |
50 | ||
51 | func TestFormatAppendCount(t *testing.T) { | |
52 | var buffer []byte | |
53 | buffer = appendCount(buffer, "namespace.", []string{"global:tag"}, "count", 2, []string{"tag:tag"}, 1) | |
54 | assert.Equal(t, `namespace.count:2|c|#global:tag,tag:tag`, string(buffer)) | |
55 | } | |
56 | ||
57 | func TestFormatAppendHistogram(t *testing.T) { | |
58 | var buffer []byte | |
59 | buffer = appendHistogram(buffer, "namespace.", []string{"global:tag"}, "histogram", 3., []string{"tag:tag"}, 1) | |
60 | assert.Equal(t, `namespace.histogram:3|h|#global:tag,tag:tag`, string(buffer)) | |
61 | } | |
62 | ||
63 | func TestFormatAppendDistribution(t *testing.T) { | |
64 | var buffer []byte | |
65 | buffer = appendDistribution(buffer, "namespace.", []string{"global:tag"}, "distribution", 4., []string{"tag:tag"}, 1) | |
66 | assert.Equal(t, `namespace.distribution:4|d|#global:tag,tag:tag`, string(buffer)) | |
67 | } | |
68 | ||
69 | func TestFormatAppendSet(t *testing.T) { | |
70 | var buffer []byte | |
71 | buffer = appendSet(buffer, "namespace.", []string{"global:tag"}, "set", "five", []string{"tag:tag"}, 1) | |
72 | assert.Equal(t, `namespace.set:five|s|#global:tag,tag:tag`, string(buffer)) | |
73 | } | |
74 | ||
75 | func TestFormatAppendTiming(t *testing.T) { | |
76 | var buffer []byte | |
77 | buffer = appendTiming(buffer, "namespace.", []string{"global:tag"}, "timing", 6., []string{"tag:tag"}, 1) | |
78 | assert.Equal(t, `namespace.timing:6.000000|ms|#global:tag,tag:tag`, string(buffer)) | |
79 | } | |
80 | ||
81 | func TestFormatNoTag(t *testing.T) { | |
82 | var buffer []byte | |
83 | buffer = appendGauge(buffer, "", []string{}, "gauge", 1., []string{}, 1) | |
84 | assert.Equal(t, `gauge:1|g`, string(buffer)) | |
85 | } | |
86 | ||
87 | func TestFormatOneTag(t *testing.T) { | |
88 | var buffer []byte | |
89 | buffer = appendGauge(buffer, "", []string{}, "gauge", 1., []string{"tag1:tag1"}, 1) | |
90 | assert.Equal(t, `gauge:1|g|#tag1:tag1`, string(buffer)) | |
91 | } | |
92 | ||
93 | func TestFormatTwoTag(t *testing.T) { | |
94 | var buffer []byte | |
95 | buffer = appendGauge(buffer, "", []string{}, "metric", 1., []string{"tag1:tag1", "tag2:tag2"}, 1) | |
96 | assert.Equal(t, `metric:1|g|#tag1:tag1,tag2:tag2`, string(buffer)) | |
97 | } | |
98 | ||
99 | func TestFormatRate(t *testing.T) { | |
100 | var buffer []byte | |
101 | buffer = appendGauge(buffer, "", []string{}, "metric", 1., []string{}, 0.1) | |
102 | assert.Equal(t, `metric:1|g|@0.1`, string(buffer)) | |
103 | } | |
104 | ||
105 | func TestFormatRateAndTag(t *testing.T) { | |
106 | var buffer []byte | |
107 | buffer = appendGauge(buffer, "", []string{}, "metric", 1., []string{"tag1:tag1"}, 0.1) | |
108 | assert.Equal(t, `metric:1|g|@0.1|#tag1:tag1`, string(buffer)) | |
109 | } | |
110 | ||
111 | func TestFormatNil(t *testing.T) { | |
112 | var buffer []byte | |
113 | buffer = appendGauge(buffer, "", nil, "metric", 1., nil, 1) | |
114 | assert.Equal(t, `metric:1|g`, string(buffer)) | |
115 | } | |
116 | ||
117 | func TestFormatTagRemoveNewLines(t *testing.T) { | |
118 | var buffer []byte | |
119 | buffer = appendGauge(buffer, "", []string{"tag\n:d\nog\n"}, "metric", 1., []string{"\ntag\n:d\nog2\n"}, 0.1) | |
120 | assert.Equal(t, `metric:1|g|@0.1|#tag:dog,tag:dog2`, string(buffer)) | |
121 | } | |
122 | ||
123 | func TestFormatEvent(t *testing.T) { | |
124 | var buffer []byte | |
125 | buffer = appendEvent(buffer, &Event{ | |
126 | Title: "EvenTitle", | |
127 | Text: "EventText", | |
128 | }, []string{}) | |
129 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText`, string(buffer)) | |
130 | } | |
131 | ||
132 | func TestFormatEventEscapeText(t *testing.T) { | |
133 | var buffer []byte | |
134 | buffer = appendEvent(buffer, &Event{ | |
135 | Title: "EvenTitle", | |
136 | Text: "\nEventText\nLine2\n\nLine4\n", | |
137 | }, []string{}) | |
138 | assert.Equal(t, `_e{9,29}:EvenTitle|\nEventText\nLine2\n\nLine4\n`, string(buffer)) | |
139 | } | |
140 | ||
141 | func TestFormatEventTimeStamp(t *testing.T) { | |
142 | var buffer []byte | |
143 | buffer = appendEvent(buffer, &Event{ | |
144 | Title: "EvenTitle", | |
145 | Text: "EventText", | |
146 | Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC), | |
147 | }, []string{}) | |
148 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|d:1471219200`, string(buffer)) | |
149 | } | |
150 | ||
151 | func TestFormatEventHostname(t *testing.T) { | |
152 | var buffer []byte | |
153 | buffer = appendEvent(buffer, &Event{ | |
154 | Title: "EvenTitle", | |
155 | Text: "EventText", | |
156 | Hostname: "hostname", | |
157 | }, []string{}) | |
158 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|h:hostname`, string(buffer)) | |
159 | } | |
160 | ||
161 | func TestFormatEventAggregationKey(t *testing.T) { | |
162 | var buffer []byte | |
163 | buffer = appendEvent(buffer, &Event{ | |
164 | Title: "EvenTitle", | |
165 | Text: "EventText", | |
166 | AggregationKey: "aggregationKey", | |
167 | }, []string{}) | |
168 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|k:aggregationKey`, string(buffer)) | |
169 | } | |
170 | ||
171 | func TestFormatEventPriority(t *testing.T) { | |
172 | var buffer []byte | |
173 | buffer = appendEvent(buffer, &Event{ | |
174 | Title: "EvenTitle", | |
175 | Text: "EventText", | |
176 | Priority: "priority", | |
177 | }, []string{}) | |
178 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|p:priority`, string(buffer)) | |
179 | } | |
180 | ||
181 | func TestFormatEventSourceTypeName(t *testing.T) { | |
182 | var buffer []byte | |
183 | buffer = appendEvent(buffer, &Event{ | |
184 | Title: "EvenTitle", | |
185 | Text: "EventText", | |
186 | SourceTypeName: "sourceTypeName", | |
187 | }, []string{}) | |
188 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|s:sourceTypeName`, string(buffer)) | |
189 | } | |
190 | ||
191 | func TestFormatEventAlertType(t *testing.T) { | |
192 | var buffer []byte | |
193 | buffer = appendEvent(buffer, &Event{ | |
194 | Title: "EvenTitle", | |
195 | Text: "EventText", | |
196 | AlertType: "alertType", | |
197 | }, []string{}) | |
198 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|t:alertType`, string(buffer)) | |
199 | } | |
200 | ||
201 | func TestFormatEventOneTag(t *testing.T) { | |
202 | var buffer []byte | |
203 | buffer = appendEvent(buffer, &Event{ | |
204 | Title: "EvenTitle", | |
205 | Text: "EventText", | |
206 | }, []string{"tag:test"}) | |
207 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|#tag:test`, string(buffer)) | |
208 | } | |
209 | ||
210 | func TestFormatEventTwoTag(t *testing.T) { | |
211 | var buffer []byte | |
212 | buffer = appendEvent(buffer, &Event{ | |
213 | Title: "EvenTitle", | |
214 | Text: "EventText", | |
215 | Tags: []string{"tag1:test"}, | |
216 | }, []string{"tag2:test"}) | |
217 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|#tag2:test,tag1:test`, string(buffer)) | |
218 | } | |
219 | ||
220 | func TestFormatEventAllOptions(t *testing.T) { | |
221 | var buffer []byte | |
222 | buffer = appendEvent(buffer, &Event{ | |
223 | Title: "EvenTitle", | |
224 | Text: "EventText", | |
225 | Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC), | |
226 | Hostname: "hostname", | |
227 | AggregationKey: "aggregationKey", | |
228 | Priority: "priority", | |
229 | SourceTypeName: "SourceTypeName", | |
230 | AlertType: "alertType", | |
231 | Tags: []string{"tag:normal"}, | |
232 | }, []string{"tag:global"}) | |
233 | assert.Equal(t, `_e{9,9}:EvenTitle|EventText|d:1471219200|h:hostname|k:aggregationKey|p:priority|s:SourceTypeName|t:alertType|#tag:global,tag:normal`, string(buffer)) | |
234 | } | |
235 | ||
236 | func TestFormatEventNil(t *testing.T) { | |
237 | var buffer []byte | |
238 | buffer = appendEvent(buffer, &Event{}, []string{}) | |
239 | assert.Equal(t, `_e{0,0}:|`, string(buffer)) | |
240 | } | |
241 | ||
242 | func TestFormatServiceCheck(t *testing.T) { | |
243 | var buffer []byte | |
244 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
245 | Name: "service.check", | |
246 | Status: Ok, | |
247 | }, []string{}) | |
248 | assert.Equal(t, `_sc|service.check|0`, string(buffer)) | |
249 | } | |
250 | ||
251 | func TestFormatServiceCheckEscape(t *testing.T) { | |
252 | var buffer []byte | |
253 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
254 | Name: "service.check", | |
255 | Status: Ok, | |
256 | Message: "\n\nmessagem:hello...\n\nm:aa\nm:m", | |
257 | }, []string{}) | |
258 | assert.Equal(t, `_sc|service.check|0|m:\n\nmessagem\:hello...\n\nm\:aa\nm\:m`, string(buffer)) | |
259 | } | |
260 | ||
261 | func TestFormatServiceCheckTimestamp(t *testing.T) { | |
262 | var buffer []byte | |
263 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
264 | Name: "service.check", | |
265 | Status: Ok, | |
266 | Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC), | |
267 | }, []string{}) | |
268 | assert.Equal(t, `_sc|service.check|0|d:1471219200`, string(buffer)) | |
269 | } | |
270 | ||
271 | func TestFormatServiceCheckHostname(t *testing.T) { | |
272 | var buffer []byte | |
273 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
274 | Name: "service.check", | |
275 | Status: Ok, | |
276 | Hostname: "hostname", | |
277 | }, []string{}) | |
278 | assert.Equal(t, `_sc|service.check|0|h:hostname`, string(buffer)) | |
279 | } | |
280 | ||
281 | func TestFormatServiceCheckMessage(t *testing.T) { | |
282 | var buffer []byte | |
283 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
284 | Name: "service.check", | |
285 | Status: Ok, | |
286 | Message: "message", | |
287 | }, []string{}) | |
288 | assert.Equal(t, `_sc|service.check|0|m:message`, string(buffer)) | |
289 | } | |
290 | ||
291 | func TestFormatServiceCheckOneTag(t *testing.T) { | |
292 | var buffer []byte | |
293 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
294 | Name: "service.check", | |
295 | Status: Ok, | |
296 | Tags: []string{"tag:tag"}, | |
297 | }, []string{}) | |
298 | assert.Equal(t, `_sc|service.check|0|#tag:tag`, string(buffer)) | |
299 | } | |
300 | ||
301 | func TestFormatServiceCheckTwoTag(t *testing.T) { | |
302 | var buffer []byte | |
303 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
304 | Name: "service.check", | |
305 | Status: Ok, | |
306 | Tags: []string{"tag1:tag1"}, | |
307 | }, []string{"tag2:tag2"}) | |
308 | assert.Equal(t, `_sc|service.check|0|#tag2:tag2,tag1:tag1`, string(buffer)) | |
309 | } | |
310 | ||
311 | func TestFormatServiceCheckAllOptions(t *testing.T) { | |
312 | var buffer []byte | |
313 | buffer = appendServiceCheck(buffer, &ServiceCheck{ | |
314 | Name: "service.check", | |
315 | Status: Ok, | |
316 | Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC), | |
317 | Hostname: "hostname", | |
318 | Message: "message", | |
319 | Tags: []string{"tag1:tag1"}, | |
320 | }, []string{"tag2:tag2"}) | |
321 | assert.Equal(t, `_sc|service.check|0|d:1471219200|h:hostname|#tag2:tag2,tag1:tag1|m:message`, string(buffer)) | |
322 | } | |
323 | ||
324 | func TestFormatServiceCheckNil(t *testing.T) { | |
325 | var buffer []byte | |
326 | buffer = appendServiceCheck(buffer, &ServiceCheck{}, nil) | |
327 | assert.Equal(t, `_sc||0`, string(buffer)) | |
328 | } | |
329 | ||
330 | func TestFormatSeparator(t *testing.T) { | |
331 | var buffer []byte | |
332 | buffer = appendSeparator(buffer) | |
333 | assert.Equal(t, "\n", string(buffer)) | |
334 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "math" | |
4 | "sync" | |
5 | "sync/atomic" | |
6 | ) | |
7 | ||
8 | /* | |
9 | Those are metrics type that can be aggregated on the client side: | |
10 | - Gauge | |
11 | - Count | |
12 | - Set | |
13 | */ | |
14 | ||
15 | type countMetric struct { | |
16 | value int64 | |
17 | name string | |
18 | tags []string | |
19 | } | |
20 | ||
21 | func newCountMetric(name string, value int64, tags []string) *countMetric { | |
22 | return &countMetric{ | |
23 | value: value, | |
24 | name: name, | |
25 | tags: tags, | |
26 | } | |
27 | } | |
28 | ||
29 | func (c *countMetric) sample(v int64) { | |
30 | atomic.AddInt64(&c.value, v) | |
31 | } | |
32 | ||
33 | func (c *countMetric) flushUnsafe() metric { | |
34 | return metric{ | |
35 | metricType: count, | |
36 | name: c.name, | |
37 | tags: c.tags, | |
38 | rate: 1, | |
39 | ivalue: c.value, | |
40 | } | |
41 | } | |
42 | ||
43 | // Gauge | |
44 | ||
45 | type gaugeMetric struct { | |
46 | value uint64 | |
47 | name string | |
48 | tags []string | |
49 | } | |
50 | ||
51 | func newGaugeMetric(name string, value float64, tags []string) *gaugeMetric { | |
52 | return &gaugeMetric{ | |
53 | value: math.Float64bits(value), | |
54 | name: name, | |
55 | tags: tags, | |
56 | } | |
57 | } | |
58 | ||
59 | func (g *gaugeMetric) sample(v float64) { | |
60 | atomic.StoreUint64(&g.value, math.Float64bits(v)) | |
61 | } | |
62 | ||
63 | func (g *gaugeMetric) flushUnsafe() metric { | |
64 | return metric{ | |
65 | metricType: gauge, | |
66 | name: g.name, | |
67 | tags: g.tags, | |
68 | rate: 1, | |
69 | fvalue: math.Float64frombits(g.value), | |
70 | } | |
71 | } | |
72 | ||
73 | // Set | |
74 | ||
75 | type setMetric struct { | |
76 | data map[string]struct{} | |
77 | name string | |
78 | tags []string | |
79 | sync.Mutex | |
80 | } | |
81 | ||
82 | func newSetMetric(name string, value string, tags []string) *setMetric { | |
83 | set := &setMetric{ | |
84 | data: map[string]struct{}{}, | |
85 | name: name, | |
86 | tags: tags, | |
87 | } | |
88 | set.data[value] = struct{}{} | |
89 | return set | |
90 | } | |
91 | ||
92 | func (s *setMetric) sample(v string) { | |
93 | s.Lock() | |
94 | defer s.Unlock() | |
95 | s.data[v] = struct{}{} | |
96 | } | |
97 | ||
98 | // Sets are aggregated on the agent side too. We flush the keys so a set from | |
99 | // multiple application can be correctly aggregated on the agent side. | |
100 | func (s *setMetric) flushUnsafe() []metric { | |
101 | if len(s.data) == 0 { | |
102 | return nil | |
103 | } | |
104 | ||
105 | metrics := make([]metric, len(s.data)) | |
106 | i := 0 | |
107 | for value := range s.data { | |
108 | metrics[i] = metric{ | |
109 | metricType: set, | |
110 | name: s.name, | |
111 | tags: s.tags, | |
112 | rate: 1, | |
113 | svalue: value, | |
114 | } | |
115 | i++ | |
116 | } | |
117 | return metrics | |
118 | } | |
119 | ||
120 | // Histograms, Distributions and Timings | |
121 | ||
122 | type bufferedMetric struct { | |
123 | sync.Mutex | |
124 | ||
125 | data []float64 | |
126 | name string | |
127 | // Histograms and Distributions store tags as one string since we need | |
128 | // to compute its size multiple time when serializing. | |
129 | tags string | |
130 | mtype metricType | |
131 | } | |
132 | ||
133 | func (s *bufferedMetric) sample(v float64) { | |
134 | s.Lock() | |
135 | defer s.Unlock() | |
136 | s.data = append(s.data, v) | |
137 | } | |
138 | ||
139 | func (s *bufferedMetric) flushUnsafe() metric { | |
140 | return metric{ | |
141 | metricType: s.mtype, | |
142 | name: s.name, | |
143 | stags: s.tags, | |
144 | rate: 1, | |
145 | fvalues: s.data, | |
146 | } | |
147 | } | |
148 | ||
149 | type histogramMetric = bufferedMetric | |
150 | ||
151 | func newHistogramMetric(name string, value float64, stringTags string) *histogramMetric { | |
152 | return &histogramMetric{ | |
153 | data: []float64{value}, | |
154 | name: name, | |
155 | tags: stringTags, | |
156 | mtype: histogramAggregated, | |
157 | } | |
158 | } | |
159 | ||
160 | type distributionMetric = bufferedMetric | |
161 | ||
162 | func newDistributionMetric(name string, value float64, stringTags string) *distributionMetric { | |
163 | return &distributionMetric{ | |
164 | data: []float64{value}, | |
165 | name: name, | |
166 | tags: stringTags, | |
167 | mtype: distributionAggregated, | |
168 | } | |
169 | } | |
170 | ||
171 | type timingMetric = bufferedMetric | |
172 | ||
173 | func newTimingMetric(name string, value float64, stringTags string) *timingMetric { | |
174 | return &timingMetric{ | |
175 | data: []float64{value}, | |
176 | name: name, | |
177 | tags: stringTags, | |
178 | mtype: timingAggregated, | |
179 | } | |
180 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "math" | |
4 | "sort" | |
5 | "strings" | |
6 | "testing" | |
7 | ||
8 | "github.com/stretchr/testify/assert" | |
9 | "github.com/stretchr/testify/require" | |
10 | ) | |
11 | ||
12 | func TestNewCountMetric(t *testing.T) { | |
13 | c := newCountMetric("test", 21, []string{"tag1", "tag2"}) | |
14 | assert.Equal(t, c.value, int64(21)) | |
15 | assert.Equal(t, c.name, "test") | |
16 | assert.Equal(t, c.tags, []string{"tag1", "tag2"}) | |
17 | } | |
18 | ||
19 | func TestCountMetricSample(t *testing.T) { | |
20 | c := newCountMetric("test", 21, []string{"tag1", "tag2"}) | |
21 | c.sample(12) | |
22 | assert.Equal(t, c.value, int64(33)) | |
23 | assert.Equal(t, c.name, "test") | |
24 | assert.Equal(t, c.tags, []string{"tag1", "tag2"}) | |
25 | } | |
26 | ||
27 | func TestFlushUnsafeCountMetricSample(t *testing.T) { | |
28 | c := newCountMetric("test", 21, []string{"tag1", "tag2"}) | |
29 | m := c.flushUnsafe() | |
30 | assert.Equal(t, m.metricType, count) | |
31 | assert.Equal(t, m.ivalue, int64(21)) | |
32 | assert.Equal(t, m.name, "test") | |
33 | assert.Equal(t, m.tags, []string{"tag1", "tag2"}) | |
34 | ||
35 | c.sample(12) | |
36 | m = c.flushUnsafe() | |
37 | assert.Equal(t, m.metricType, count) | |
38 | assert.Equal(t, m.ivalue, int64(33)) | |
39 | assert.Equal(t, m.name, "test") | |
40 | assert.Equal(t, m.tags, []string{"tag1", "tag2"}) | |
41 | } | |
42 | ||
43 | func TestNewGaugeMetric(t *testing.T) { | |
44 | g := newGaugeMetric("test", 21, []string{"tag1", "tag2"}) | |
45 | assert.Equal(t, math.Float64frombits(g.value), float64(21)) | |
46 | assert.Equal(t, g.name, "test") | |
47 | assert.Equal(t, g.tags, []string{"tag1", "tag2"}) | |
48 | } | |
49 | ||
50 | func TestGaugeMetricSample(t *testing.T) { | |
51 | g := newGaugeMetric("test", 21, []string{"tag1", "tag2"}) | |
52 | g.sample(12) | |
53 | assert.Equal(t, math.Float64frombits(g.value), float64(12)) | |
54 | assert.Equal(t, g.name, "test") | |
55 | assert.Equal(t, g.tags, []string{"tag1", "tag2"}) | |
56 | } | |
57 | ||
58 | func TestFlushUnsafeGaugeMetricSample(t *testing.T) { | |
59 | g := newGaugeMetric("test", 21, []string{"tag1", "tag2"}) | |
60 | m := g.flushUnsafe() | |
61 | assert.Equal(t, m.metricType, gauge) | |
62 | assert.Equal(t, m.fvalue, float64(21)) | |
63 | assert.Equal(t, m.name, "test") | |
64 | assert.Equal(t, m.tags, []string{"tag1", "tag2"}) | |
65 | ||
66 | g.sample(12) | |
67 | m = g.flushUnsafe() | |
68 | assert.Equal(t, m.metricType, gauge) | |
69 | assert.Equal(t, m.fvalue, float64(12)) | |
70 | assert.Equal(t, m.name, "test") | |
71 | assert.Equal(t, m.tags, []string{"tag1", "tag2"}) | |
72 | } | |
73 | ||
74 | func TestNewSetMetric(t *testing.T) { | |
75 | s := newSetMetric("test", "value1", []string{"tag1", "tag2"}) | |
76 | assert.Equal(t, s.data, map[string]struct{}{"value1": struct{}{}}) | |
77 | assert.Equal(t, s.name, "test") | |
78 | assert.Equal(t, s.tags, []string{"tag1", "tag2"}) | |
79 | } | |
80 | ||
81 | func TestSetMetricSample(t *testing.T) { | |
82 | s := newSetMetric("test", "value1", []string{"tag1", "tag2"}) | |
83 | s.sample("value2") | |
84 | assert.Equal(t, s.data, map[string]struct{}{"value1": struct{}{}, "value2": struct{}{}}) | |
85 | assert.Equal(t, s.name, "test") | |
86 | assert.Equal(t, s.tags, []string{"tag1", "tag2"}) | |
87 | } | |
88 | ||
89 | func TestFlushUnsafeSetMetricSample(t *testing.T) { | |
90 | s := newSetMetric("test", "value1", []string{"tag1", "tag2"}) | |
91 | m := s.flushUnsafe() | |
92 | ||
93 | require.Len(t, m, 1) | |
94 | ||
95 | assert.Equal(t, m[0].metricType, set) | |
96 | assert.Equal(t, m[0].svalue, "value1") | |
97 | assert.Equal(t, m[0].name, "test") | |
98 | assert.Equal(t, m[0].tags, []string{"tag1", "tag2"}) | |
99 | ||
100 | s.sample("value1") | |
101 | s.sample("value2") | |
102 | m = s.flushUnsafe() | |
103 | ||
104 | sort.Slice(m, func(i, j int) bool { | |
105 | return strings.Compare(m[i].svalue, m[j].svalue) != 1 | |
106 | }) | |
107 | ||
108 | require.Len(t, m, 2) | |
109 | assert.Equal(t, m[0].metricType, set) | |
110 | assert.Equal(t, m[0].svalue, "value1") | |
111 | assert.Equal(t, m[0].name, "test") | |
112 | assert.Equal(t, m[0].tags, []string{"tag1", "tag2"}) | |
113 | assert.Equal(t, m[1].metricType, set) | |
114 | assert.Equal(t, m[1].svalue, "value2") | |
115 | assert.Equal(t, m[1].name, "test") | |
116 | assert.Equal(t, m[1].tags, []string{"tag1", "tag2"}) | |
117 | } | |
118 | ||
119 | func TestNewHistogramMetric(t *testing.T) { | |
120 | s := newHistogramMetric("test", 1.0, "tag1,tag2") | |
121 | assert.Equal(t, s.data, []float64{1.0}) | |
122 | assert.Equal(t, s.name, "test") | |
123 | assert.Equal(t, s.tags, "tag1,tag2") | |
124 | assert.Equal(t, s.mtype, histogramAggregated) | |
125 | } | |
126 | ||
127 | func TestHistogramMetricSample(t *testing.T) { | |
128 | s := newHistogramMetric("test", 1.0, "tag1,tag2") | |
129 | s.sample(123.45) | |
130 | assert.Equal(t, s.data, []float64{1.0, 123.45}) | |
131 | assert.Equal(t, s.name, "test") | |
132 | assert.Equal(t, s.tags, "tag1,tag2") | |
133 | assert.Equal(t, s.mtype, histogramAggregated) | |
134 | } | |
135 | ||
136 | func TestFlushUnsafeHistogramMetricSample(t *testing.T) { | |
137 | s := newHistogramMetric("test", 1.0, "tag1,tag2") | |
138 | m := s.flushUnsafe() | |
139 | ||
140 | assert.Equal(t, m.metricType, histogramAggregated) | |
141 | assert.Equal(t, m.fvalues, []float64{1.0}) | |
142 | assert.Equal(t, m.name, "test") | |
143 | assert.Equal(t, m.stags, "tag1,tag2") | |
144 | assert.Nil(t, m.tags) | |
145 | ||
146 | s.sample(21) | |
147 | s.sample(123.45) | |
148 | m = s.flushUnsafe() | |
149 | ||
150 | assert.Equal(t, m.metricType, histogramAggregated) | |
151 | assert.Equal(t, m.fvalues, []float64{1.0, 21.0, 123.45}) | |
152 | assert.Equal(t, m.name, "test") | |
153 | assert.Equal(t, m.stags, "tag1,tag2") | |
154 | assert.Nil(t, m.tags) | |
155 | } | |
156 | ||
157 | func TestNewDistributionMetric(t *testing.T) { | |
158 | s := newDistributionMetric("test", 1.0, "tag1,tag2") | |
159 | assert.Equal(t, s.data, []float64{1.0}) | |
160 | assert.Equal(t, s.name, "test") | |
161 | assert.Equal(t, s.tags, "tag1,tag2") | |
162 | assert.Equal(t, s.mtype, distributionAggregated) | |
163 | } | |
164 | ||
165 | func TestDistributionMetricSample(t *testing.T) { | |
166 | s := newDistributionMetric("test", 1.0, "tag1,tag2") | |
167 | s.sample(123.45) | |
168 | assert.Equal(t, s.data, []float64{1.0, 123.45}) | |
169 | assert.Equal(t, s.name, "test") | |
170 | assert.Equal(t, s.tags, "tag1,tag2") | |
171 | assert.Equal(t, s.mtype, distributionAggregated) | |
172 | } | |
173 | ||
174 | func TestFlushUnsafeDistributionMetricSample(t *testing.T) { | |
175 | s := newDistributionMetric("test", 1.0, "tag1,tag2") | |
176 | m := s.flushUnsafe() | |
177 | ||
178 | assert.Equal(t, m.metricType, distributionAggregated) | |
179 | assert.Equal(t, m.fvalues, []float64{1.0}) | |
180 | assert.Equal(t, m.name, "test") | |
181 | assert.Equal(t, m.stags, "tag1,tag2") | |
182 | assert.Nil(t, m.tags) | |
183 | ||
184 | s.sample(21) | |
185 | s.sample(123.45) | |
186 | m = s.flushUnsafe() | |
187 | ||
188 | assert.Equal(t, m.metricType, distributionAggregated) | |
189 | assert.Equal(t, m.fvalues, []float64{1.0, 21.0, 123.45}) | |
190 | assert.Equal(t, m.name, "test") | |
191 | assert.Equal(t, m.stags, "tag1,tag2") | |
192 | assert.Nil(t, m.tags) | |
193 | } | |
194 | ||
195 | func TestNewTimingMetric(t *testing.T) { | |
196 | s := newTimingMetric("test", 1.0, "tag1,tag2") | |
197 | assert.Equal(t, s.data, []float64{1.0}) | |
198 | assert.Equal(t, s.name, "test") | |
199 | assert.Equal(t, s.tags, "tag1,tag2") | |
200 | assert.Equal(t, s.mtype, timingAggregated) | |
201 | } | |
202 | ||
203 | func TestTimingMetricSample(t *testing.T) { | |
204 | s := newTimingMetric("test", 1.0, "tag1,tag2") | |
205 | s.sample(123.45) | |
206 | assert.Equal(t, s.data, []float64{1.0, 123.45}) | |
207 | assert.Equal(t, s.name, "test") | |
208 | assert.Equal(t, s.tags, "tag1,tag2") | |
209 | assert.Equal(t, s.mtype, timingAggregated) | |
210 | } | |
211 | ||
212 | func TestFlushUnsafeTimingMetricSample(t *testing.T) { | |
213 | s := newTimingMetric("test", 1.0, "tag1,tag2") | |
214 | m := s.flushUnsafe() | |
215 | ||
216 | assert.Equal(t, m.metricType, timingAggregated) | |
217 | assert.Equal(t, m.fvalues, []float64{1.0}) | |
218 | assert.Equal(t, m.name, "test") | |
219 | assert.Equal(t, m.stags, "tag1,tag2") | |
220 | assert.Nil(t, m.tags) | |
221 | ||
222 | s.sample(21) | |
223 | s.sample(123.45) | |
224 | m = s.flushUnsafe() | |
225 | ||
226 | assert.Equal(t, m.metricType, timingAggregated) | |
227 | assert.Equal(t, m.fvalues, []float64{1.0, 21.0, 123.45}) | |
228 | assert.Equal(t, m.name, "test") | |
229 | assert.Equal(t, m.stags, "tag1,tag2") | |
230 | assert.Nil(t, m.tags) | |
231 | } |
0 | // Code generated by MockGen. DO NOT EDIT. | |
1 | // Source: statsd.go | |
2 | ||
3 | // Package mock_statsd is a generated GoMock package. | |
4 | package mock_statsd | |
5 | ||
6 | import ( | |
7 | reflect "reflect" | |
8 | time "time" | |
9 | ||
10 | statsd "github.com/DataDog/datadog-go/v5/statsd" | |
11 | gomock "github.com/golang/mock/gomock" | |
12 | ) | |
13 | ||
14 | // MockClientInterface is a mock of ClientInterface interface. | |
15 | type MockClientInterface struct { | |
16 | ctrl *gomock.Controller | |
17 | recorder *MockClientInterfaceMockRecorder | |
18 | } | |
19 | ||
20 | // MockClientInterfaceMockRecorder is the mock recorder for MockClientInterface. | |
21 | type MockClientInterfaceMockRecorder struct { | |
22 | mock *MockClientInterface | |
23 | } | |
24 | ||
25 | // NewMockClientInterface creates a new mock instance. | |
26 | func NewMockClientInterface(ctrl *gomock.Controller) *MockClientInterface { | |
27 | mock := &MockClientInterface{ctrl: ctrl} | |
28 | mock.recorder = &MockClientInterfaceMockRecorder{mock} | |
29 | return mock | |
30 | } | |
31 | ||
32 | // EXPECT returns an object that allows the caller to indicate expected use. | |
33 | func (m *MockClientInterface) EXPECT() *MockClientInterfaceMockRecorder { | |
34 | return m.recorder | |
35 | } | |
36 | ||
37 | // Close mocks base method. | |
38 | func (m *MockClientInterface) Close() error { | |
39 | m.ctrl.T.Helper() | |
40 | ret := m.ctrl.Call(m, "Close") | |
41 | ret0, _ := ret[0].(error) | |
42 | return ret0 | |
43 | } | |
44 | ||
45 | // Close indicates an expected call of Close. | |
46 | func (mr *MockClientInterfaceMockRecorder) Close() *gomock.Call { | |
47 | mr.mock.ctrl.T.Helper() | |
48 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClientInterface)(nil).Close)) | |
49 | } | |
50 | ||
51 | // Count mocks base method. | |
52 | func (m *MockClientInterface) Count(name string, value int64, tags []string, rate float64) error { | |
53 | m.ctrl.T.Helper() | |
54 | ret := m.ctrl.Call(m, "Count", name, value, tags, rate) | |
55 | ret0, _ := ret[0].(error) | |
56 | return ret0 | |
57 | } | |
58 | ||
59 | // Count indicates an expected call of Count. | |
60 | func (mr *MockClientInterfaceMockRecorder) Count(name, value, tags, rate interface{}) *gomock.Call { | |
61 | mr.mock.ctrl.T.Helper() | |
62 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockClientInterface)(nil).Count), name, value, tags, rate) | |
63 | } | |
64 | ||
65 | // Decr mocks base method. | |
66 | func (m *MockClientInterface) Decr(name string, tags []string, rate float64) error { | |
67 | m.ctrl.T.Helper() | |
68 | ret := m.ctrl.Call(m, "Decr", name, tags, rate) | |
69 | ret0, _ := ret[0].(error) | |
70 | return ret0 | |
71 | } | |
72 | ||
73 | // Decr indicates an expected call of Decr. | |
74 | func (mr *MockClientInterfaceMockRecorder) Decr(name, tags, rate interface{}) *gomock.Call { | |
75 | mr.mock.ctrl.T.Helper() | |
76 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decr", reflect.TypeOf((*MockClientInterface)(nil).Decr), name, tags, rate) | |
77 | } | |
78 | ||
79 | // Distribution mocks base method. | |
80 | func (m *MockClientInterface) Distribution(name string, value float64, tags []string, rate float64) error { | |
81 | m.ctrl.T.Helper() | |
82 | ret := m.ctrl.Call(m, "Distribution", name, value, tags, rate) | |
83 | ret0, _ := ret[0].(error) | |
84 | return ret0 | |
85 | } | |
86 | ||
87 | // Distribution indicates an expected call of Distribution. | |
88 | func (mr *MockClientInterfaceMockRecorder) Distribution(name, value, tags, rate interface{}) *gomock.Call { | |
89 | mr.mock.ctrl.T.Helper() | |
90 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Distribution", reflect.TypeOf((*MockClientInterface)(nil).Distribution), name, value, tags, rate) | |
91 | } | |
92 | ||
93 | // Event mocks base method. | |
94 | func (m *MockClientInterface) Event(e *statsd.Event) error { | |
95 | m.ctrl.T.Helper() | |
96 | ret := m.ctrl.Call(m, "Event", e) | |
97 | ret0, _ := ret[0].(error) | |
98 | return ret0 | |
99 | } | |
100 | ||
101 | // Event indicates an expected call of Event. | |
102 | func (mr *MockClientInterfaceMockRecorder) Event(e interface{}) *gomock.Call { | |
103 | mr.mock.ctrl.T.Helper() | |
104 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Event", reflect.TypeOf((*MockClientInterface)(nil).Event), e) | |
105 | } | |
106 | ||
107 | // Flush mocks base method. | |
108 | func (m *MockClientInterface) Flush() error { | |
109 | m.ctrl.T.Helper() | |
110 | ret := m.ctrl.Call(m, "Flush") | |
111 | ret0, _ := ret[0].(error) | |
112 | return ret0 | |
113 | } | |
114 | ||
115 | // Flush indicates an expected call of Flush. | |
116 | func (mr *MockClientInterfaceMockRecorder) Flush() *gomock.Call { | |
117 | mr.mock.ctrl.T.Helper() | |
118 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockClientInterface)(nil).Flush)) | |
119 | } | |
120 | ||
121 | // Gauge mocks base method. | |
122 | func (m *MockClientInterface) Gauge(name string, value float64, tags []string, rate float64) error { | |
123 | m.ctrl.T.Helper() | |
124 | ret := m.ctrl.Call(m, "Gauge", name, value, tags, rate) | |
125 | ret0, _ := ret[0].(error) | |
126 | return ret0 | |
127 | } | |
128 | ||
129 | // Gauge indicates an expected call of Gauge. | |
130 | func (mr *MockClientInterfaceMockRecorder) Gauge(name, value, tags, rate interface{}) *gomock.Call { | |
131 | mr.mock.ctrl.T.Helper() | |
132 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gauge", reflect.TypeOf((*MockClientInterface)(nil).Gauge), name, value, tags, rate) | |
133 | } | |
134 | ||
135 | // GetTelemetry mocks base method. | |
136 | func (m *MockClientInterface) GetTelemetry() statsd.Telemetry { | |
137 | m.ctrl.T.Helper() | |
138 | ret := m.ctrl.Call(m, "GetTelemetry") | |
139 | ret0, _ := ret[0].(statsd.Telemetry) | |
140 | return ret0 | |
141 | } | |
142 | ||
143 | // GetTelemetry indicates an expected call of GetTelemetry. | |
144 | func (mr *MockClientInterfaceMockRecorder) GetTelemetry() *gomock.Call { | |
145 | mr.mock.ctrl.T.Helper() | |
146 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTelemetry", reflect.TypeOf((*MockClientInterface)(nil).GetTelemetry)) | |
147 | } | |
148 | ||
149 | // Histogram mocks base method. | |
150 | func (m *MockClientInterface) Histogram(name string, value float64, tags []string, rate float64) error { | |
151 | m.ctrl.T.Helper() | |
152 | ret := m.ctrl.Call(m, "Histogram", name, value, tags, rate) | |
153 | ret0, _ := ret[0].(error) | |
154 | return ret0 | |
155 | } | |
156 | ||
157 | // Histogram indicates an expected call of Histogram. | |
158 | func (mr *MockClientInterfaceMockRecorder) Histogram(name, value, tags, rate interface{}) *gomock.Call { | |
159 | mr.mock.ctrl.T.Helper() | |
160 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Histogram", reflect.TypeOf((*MockClientInterface)(nil).Histogram), name, value, tags, rate) | |
161 | } | |
162 | ||
163 | // Incr mocks base method. | |
164 | func (m *MockClientInterface) Incr(name string, tags []string, rate float64) error { | |
165 | m.ctrl.T.Helper() | |
166 | ret := m.ctrl.Call(m, "Incr", name, tags, rate) | |
167 | ret0, _ := ret[0].(error) | |
168 | return ret0 | |
169 | } | |
170 | ||
171 | // Incr indicates an expected call of Incr. | |
172 | func (mr *MockClientInterfaceMockRecorder) Incr(name, tags, rate interface{}) *gomock.Call { | |
173 | mr.mock.ctrl.T.Helper() | |
174 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Incr", reflect.TypeOf((*MockClientInterface)(nil).Incr), name, tags, rate) | |
175 | } | |
176 | ||
177 | // IsClosed mocks base method. | |
178 | func (m *MockClientInterface) IsClosed() bool { | |
179 | m.ctrl.T.Helper() | |
180 | ret := m.ctrl.Call(m, "IsClosed") | |
181 | ret0, _ := ret[0].(bool) | |
182 | return ret0 | |
183 | } | |
184 | ||
185 | // IsClosed indicates an expected call of IsClosed. | |
186 | func (mr *MockClientInterfaceMockRecorder) IsClosed() *gomock.Call { | |
187 | mr.mock.ctrl.T.Helper() | |
188 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsClosed", reflect.TypeOf((*MockClientInterface)(nil).IsClosed)) | |
189 | } | |
190 | ||
191 | // ServiceCheck mocks base method. | |
192 | func (m *MockClientInterface) ServiceCheck(sc *statsd.ServiceCheck) error { | |
193 | m.ctrl.T.Helper() | |
194 | ret := m.ctrl.Call(m, "ServiceCheck", sc) | |
195 | ret0, _ := ret[0].(error) | |
196 | return ret0 | |
197 | } | |
198 | ||
199 | // ServiceCheck indicates an expected call of ServiceCheck. | |
200 | func (mr *MockClientInterfaceMockRecorder) ServiceCheck(sc interface{}) *gomock.Call { | |
201 | mr.mock.ctrl.T.Helper() | |
202 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceCheck", reflect.TypeOf((*MockClientInterface)(nil).ServiceCheck), sc) | |
203 | } | |
204 | ||
205 | // Set mocks base method. | |
206 | func (m *MockClientInterface) Set(name, value string, tags []string, rate float64) error { | |
207 | m.ctrl.T.Helper() | |
208 | ret := m.ctrl.Call(m, "Set", name, value, tags, rate) | |
209 | ret0, _ := ret[0].(error) | |
210 | return ret0 | |
211 | } | |
212 | ||
213 | // Set indicates an expected call of Set. | |
214 | func (mr *MockClientInterfaceMockRecorder) Set(name, value, tags, rate interface{}) *gomock.Call { | |
215 | mr.mock.ctrl.T.Helper() | |
216 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClientInterface)(nil).Set), name, value, tags, rate) | |
217 | } | |
218 | ||
219 | // SimpleEvent mocks base method. | |
220 | func (m *MockClientInterface) SimpleEvent(title, text string) error { | |
221 | m.ctrl.T.Helper() | |
222 | ret := m.ctrl.Call(m, "SimpleEvent", title, text) | |
223 | ret0, _ := ret[0].(error) | |
224 | return ret0 | |
225 | } | |
226 | ||
227 | // SimpleEvent indicates an expected call of SimpleEvent. | |
228 | func (mr *MockClientInterfaceMockRecorder) SimpleEvent(title, text interface{}) *gomock.Call { | |
229 | mr.mock.ctrl.T.Helper() | |
230 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimpleEvent", reflect.TypeOf((*MockClientInterface)(nil).SimpleEvent), title, text) | |
231 | } | |
232 | ||
233 | // SimpleServiceCheck mocks base method. | |
234 | func (m *MockClientInterface) SimpleServiceCheck(name string, status statsd.ServiceCheckStatus) error { | |
235 | m.ctrl.T.Helper() | |
236 | ret := m.ctrl.Call(m, "SimpleServiceCheck", name, status) | |
237 | ret0, _ := ret[0].(error) | |
238 | return ret0 | |
239 | } | |
240 | ||
241 | // SimpleServiceCheck indicates an expected call of SimpleServiceCheck. | |
242 | func (mr *MockClientInterfaceMockRecorder) SimpleServiceCheck(name, status interface{}) *gomock.Call { | |
243 | mr.mock.ctrl.T.Helper() | |
244 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimpleServiceCheck", reflect.TypeOf((*MockClientInterface)(nil).SimpleServiceCheck), name, status) | |
245 | } | |
246 | ||
247 | // TimeInMilliseconds mocks base method. | |
248 | func (m *MockClientInterface) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { | |
249 | m.ctrl.T.Helper() | |
250 | ret := m.ctrl.Call(m, "TimeInMilliseconds", name, value, tags, rate) | |
251 | ret0, _ := ret[0].(error) | |
252 | return ret0 | |
253 | } | |
254 | ||
255 | // TimeInMilliseconds indicates an expected call of TimeInMilliseconds. | |
256 | func (mr *MockClientInterfaceMockRecorder) TimeInMilliseconds(name, value, tags, rate interface{}) *gomock.Call { | |
257 | mr.mock.ctrl.T.Helper() | |
258 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeInMilliseconds", reflect.TypeOf((*MockClientInterface)(nil).TimeInMilliseconds), name, value, tags, rate) | |
259 | } | |
260 | ||
261 | // Timing mocks base method. | |
262 | func (m *MockClientInterface) Timing(name string, value time.Duration, tags []string, rate float64) error { | |
263 | m.ctrl.T.Helper() | |
264 | ret := m.ctrl.Call(m, "Timing", name, value, tags, rate) | |
265 | ret0, _ := ret[0].(error) | |
266 | return ret0 | |
267 | } | |
268 | ||
269 | // Timing indicates an expected call of Timing. | |
270 | func (mr *MockClientInterfaceMockRecorder) Timing(name, value, tags, rate interface{}) *gomock.Call { | |
271 | mr.mock.ctrl.T.Helper() | |
272 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timing", reflect.TypeOf((*MockClientInterface)(nil).Timing), name, value, tags, rate) | |
273 | } |
0 | package statsd | |
1 | ||
2 | import "time" | |
3 | ||
4 | // NoOpClient is a statsd client that does nothing. Can be useful in testing | |
5 | // situations for library users. | |
6 | type NoOpClient struct{} | |
7 | ||
8 | // Gauge does nothing and returns nil | |
9 | func (n *NoOpClient) Gauge(name string, value float64, tags []string, rate float64) error { | |
10 | return nil | |
11 | } | |
12 | ||
13 | // Count does nothing and returns nil | |
14 | func (n *NoOpClient) Count(name string, value int64, tags []string, rate float64) error { | |
15 | return nil | |
16 | } | |
17 | ||
18 | // Histogram does nothing and returns nil | |
19 | func (n *NoOpClient) Histogram(name string, value float64, tags []string, rate float64) error { | |
20 | return nil | |
21 | } | |
22 | ||
23 | // Distribution does nothing and returns nil | |
24 | func (n *NoOpClient) Distribution(name string, value float64, tags []string, rate float64) error { | |
25 | return nil | |
26 | } | |
27 | ||
28 | // Decr does nothing and returns nil | |
29 | func (n *NoOpClient) Decr(name string, tags []string, rate float64) error { | |
30 | return nil | |
31 | } | |
32 | ||
33 | // Incr does nothing and returns nil | |
34 | func (n *NoOpClient) Incr(name string, tags []string, rate float64) error { | |
35 | return nil | |
36 | } | |
37 | ||
38 | // Set does nothing and returns nil | |
39 | func (n *NoOpClient) Set(name string, value string, tags []string, rate float64) error { | |
40 | return nil | |
41 | } | |
42 | ||
43 | // Timing does nothing and returns nil | |
44 | func (n *NoOpClient) Timing(name string, value time.Duration, tags []string, rate float64) error { | |
45 | return nil | |
46 | } | |
47 | ||
48 | // TimeInMilliseconds does nothing and returns nil | |
49 | func (n *NoOpClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { | |
50 | return nil | |
51 | } | |
52 | ||
53 | // Event does nothing and returns nil | |
54 | func (n *NoOpClient) Event(e *Event) error { | |
55 | return nil | |
56 | } | |
57 | ||
58 | // SimpleEvent does nothing and returns nil | |
59 | func (n *NoOpClient) SimpleEvent(title, text string) error { | |
60 | return nil | |
61 | } | |
62 | ||
63 | // ServiceCheck does nothing and returns nil | |
64 | func (n *NoOpClient) ServiceCheck(sc *ServiceCheck) error { | |
65 | return nil | |
66 | } | |
67 | ||
68 | // SimpleServiceCheck does nothing and returns nil | |
69 | func (n *NoOpClient) SimpleServiceCheck(name string, status ServiceCheckStatus) error { | |
70 | return nil | |
71 | } | |
72 | ||
73 | // Close does nothing and returns nil | |
74 | func (n *NoOpClient) Close() error { | |
75 | return nil | |
76 | } | |
77 | ||
78 | // Flush does nothing and returns nil | |
79 | func (n *NoOpClient) Flush() error { | |
80 | return nil | |
81 | } | |
82 | ||
83 | // IsClosed does nothing and return false | |
84 | func (n *NoOpClient) IsClosed() bool { | |
85 | return false | |
86 | } | |
87 | ||
88 | // GetTelemetry does nothing and returns an empty Telemetry | |
89 | func (n *NoOpClient) GetTelemetry() Telemetry { | |
90 | return Telemetry{} | |
91 | } | |
92 | ||
93 | // Verify that NoOpClient implements the ClientInterface. | |
94 | // https://golang.org/doc/faq#guarantee_satisfies_interface | |
95 | var _ ClientInterface = &NoOpClient{} |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | "time" | |
5 | ||
6 | "github.com/stretchr/testify/assert" | |
7 | ) | |
8 | ||
9 | func TestNoOpClient(t *testing.T) { | |
10 | a := assert.New(t) | |
11 | c := NoOpClient{} | |
12 | tags := []string{"a:b"} | |
13 | ||
14 | a.Nil(c.Gauge("asd", 123.4, tags, 56.0)) | |
15 | a.Nil(c.Count("asd", 1234, tags, 56.0)) | |
16 | a.Nil(c.Histogram("asd", 12.34, tags, 56.0)) | |
17 | a.Nil(c.Distribution("asd", 1.234, tags, 56.0)) | |
18 | a.Nil(c.Decr("asd", tags, 56.0)) | |
19 | a.Nil(c.Incr("asd", tags, 56.0)) | |
20 | a.Nil(c.Set("asd", "asd", tags, 56.0)) | |
21 | a.Nil(c.Timing("asd", time.Second, tags, 56.0)) | |
22 | a.Nil(c.TimeInMilliseconds("asd", 1234.5, tags, 56.0)) | |
23 | a.Nil(c.Event(nil)) | |
24 | a.Nil(c.SimpleEvent("asd", "zxc")) | |
25 | a.Nil(c.ServiceCheck(nil)) | |
26 | a.Nil(c.SimpleServiceCheck("asd", Ok)) | |
27 | a.Nil(c.Close()) | |
28 | a.Nil(c.Flush()) | |
29 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "math" | |
5 | "strings" | |
6 | "time" | |
7 | ) | |
8 | ||
9 | var ( | |
10 | defaultNamespace = "" | |
11 | defaultTags = []string{} | |
12 | defaultMaxBytesPerPayload = 0 | |
13 | defaultMaxMessagesPerPayload = math.MaxInt32 | |
14 | defaultBufferPoolSize = 0 | |
15 | defaultBufferFlushInterval = 100 * time.Millisecond | |
16 | defaultWorkerCount = 32 | |
17 | defaultSenderQueueSize = 0 | |
18 | defaultWriteTimeout = 100 * time.Millisecond | |
19 | defaultTelemetry = true | |
20 | defaultReceivingMode = mutexMode | |
21 | defaultChannelModeBufferSize = 4096 | |
22 | defaultAggregationFlushInterval = 2 * time.Second | |
23 | defaultAggregation = true | |
24 | defaultExtendedAggregation = false | |
25 | defaultOriginDetection = true | |
26 | ) | |
27 | ||
28 | // Options contains the configuration options for a client. | |
29 | type Options struct { | |
30 | namespace string | |
31 | tags []string | |
32 | maxBytesPerPayload int | |
33 | maxMessagesPerPayload int | |
34 | bufferPoolSize int | |
35 | bufferFlushInterval time.Duration | |
36 | workersCount int | |
37 | senderQueueSize int | |
38 | writeTimeout time.Duration | |
39 | telemetry bool | |
40 | receiveMode receivingMode | |
41 | channelModeBufferSize int | |
42 | aggregationFlushInterval time.Duration | |
43 | aggregation bool | |
44 | extendedAggregation bool | |
45 | telemetryAddr string | |
46 | originDetection bool | |
47 | containerID string | |
48 | } | |
49 | ||
50 | func resolveOptions(options []Option) (*Options, error) { | |
51 | o := &Options{ | |
52 | namespace: defaultNamespace, | |
53 | tags: defaultTags, | |
54 | maxBytesPerPayload: defaultMaxBytesPerPayload, | |
55 | maxMessagesPerPayload: defaultMaxMessagesPerPayload, | |
56 | bufferPoolSize: defaultBufferPoolSize, | |
57 | bufferFlushInterval: defaultBufferFlushInterval, | |
58 | workersCount: defaultWorkerCount, | |
59 | senderQueueSize: defaultSenderQueueSize, | |
60 | writeTimeout: defaultWriteTimeout, | |
61 | telemetry: defaultTelemetry, | |
62 | receiveMode: defaultReceivingMode, | |
63 | channelModeBufferSize: defaultChannelModeBufferSize, | |
64 | aggregationFlushInterval: defaultAggregationFlushInterval, | |
65 | aggregation: defaultAggregation, | |
66 | extendedAggregation: defaultExtendedAggregation, | |
67 | originDetection: defaultOriginDetection, | |
68 | } | |
69 | ||
70 | for _, option := range options { | |
71 | err := option(o) | |
72 | if err != nil { | |
73 | return nil, err | |
74 | } | |
75 | } | |
76 | ||
77 | return o, nil | |
78 | } | |
79 | ||
80 | // Option is a client option. Can return an error if validation fails. | |
81 | type Option func(*Options) error | |
82 | ||
83 | // WithNamespace sets a string to be prepend to all metrics, events and service checks name. | |
84 | // | |
85 | // A '.' will automatically be added after the namespace if needed. For example a metrics 'test' with a namespace 'prod' | |
86 | // will produce a final metric named 'prod.test'. | |
87 | func WithNamespace(namespace string) Option { | |
88 | return func(o *Options) error { | |
89 | if strings.HasSuffix(namespace, ".") { | |
90 | o.namespace = namespace | |
91 | } else { | |
92 | o.namespace = namespace + "." | |
93 | } | |
94 | return nil | |
95 | } | |
96 | } | |
97 | ||
98 | // WithTags sets global tags to be applied to every metrics, events and service checks. | |
99 | func WithTags(tags []string) Option { | |
100 | return func(o *Options) error { | |
101 | o.tags = tags | |
102 | return nil | |
103 | } | |
104 | } | |
105 | ||
106 | // WithMaxMessagesPerPayload sets the maximum number of metrics, events and/or service checks that a single payload can | |
107 | // contain. | |
108 | // | |
109 | // The default is 'math.MaxInt32' which will most likely let the WithMaxBytesPerPayload option take precedence. This | |
110 | // option can be set to `1` to create an unbuffered client (each metrics/event/service check will be send in its own | |
111 | // payload to the agent). | |
112 | func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option { | |
113 | return func(o *Options) error { | |
114 | o.maxMessagesPerPayload = maxMessagesPerPayload | |
115 | return nil | |
116 | } | |
117 | } | |
118 | ||
119 | // WithMaxBytesPerPayload sets the maximum number of bytes a single payload can contain. Each sample, even and service | |
120 | // check must be lower than this value once serialized or an `MessageTooLongError` is returned. | |
121 | // | |
122 | // The default value 0 which will set the option to the optimal size for the transport protocol used: 1432 for UDP and | |
123 | // named pipe and 8192 for UDS. Those values offer the best performances. | |
124 | // Be careful when changing this option, see | |
125 | // https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#ensure-proper-packet-sizes. | |
126 | func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option { | |
127 | return func(o *Options) error { | |
128 | o.maxBytesPerPayload = MaxBytesPerPayload | |
129 | return nil | |
130 | } | |
131 | } | |
132 | ||
133 | // WithBufferPoolSize sets the size of the pool of buffers used to serialized metrics, events and service_checks. | |
134 | // | |
135 | // The default, 0, will set the option to the optimal size for the transport protocol used: 2048 for UDP and named pipe | |
136 | // and 512 for UDS. | |
137 | func WithBufferPoolSize(bufferPoolSize int) Option { | |
138 | return func(o *Options) error { | |
139 | o.bufferPoolSize = bufferPoolSize | |
140 | return nil | |
141 | } | |
142 | } | |
143 | ||
144 | // WithBufferFlushInterval sets the interval after which the current buffer is flushed. | |
145 | // | |
146 | // A buffers are used to serialized data, they're flushed either when full (see WithMaxBytesPerPayload) or when it's | |
147 | // been open for longer than this interval. | |
148 | // | |
149 | // With apps sending a high number of metrics/events/service_checks the interval rarely timeout. But with slow sending | |
150 | // apps increasing this value will reduce the number of payload sent on the wire as more data is serialized in the same | |
151 | // payload. | |
152 | // | |
153 | // Default is 100ms | |
154 | func WithBufferFlushInterval(bufferFlushInterval time.Duration) Option { | |
155 | return func(o *Options) error { | |
156 | o.bufferFlushInterval = bufferFlushInterval | |
157 | return nil | |
158 | } | |
159 | } | |
160 | ||
161 | // WithWorkersCount sets the number of workers that will be used to serialized data. | |
162 | // | |
163 | // Those workers allow the use of multiple buffers at the same time (see WithBufferPoolSize) to reduce lock contention. | |
164 | // | |
165 | // Default is 32. | |
166 | func WithWorkersCount(workersCount int) Option { | |
167 | return func(o *Options) error { | |
168 | if workersCount < 1 { | |
169 | return fmt.Errorf("workersCount must be a positive integer") | |
170 | } | |
171 | o.workersCount = workersCount | |
172 | return nil | |
173 | } | |
174 | } | |
175 | ||
176 | // WithSenderQueueSize sets the size of the sender queue in number of buffers. | |
177 | // | |
178 | // After data has been serialized in a buffer they're pushed to a queue that the sender will consume and then each one | |
179 | // ot the agent. | |
180 | // | |
181 | // The default value 0 will set the option to the optimal size for the transport protocol used: 2048 for UDP and named | |
182 | // pipe and 512 for UDS. | |
183 | func WithSenderQueueSize(senderQueueSize int) Option { | |
184 | return func(o *Options) error { | |
185 | o.senderQueueSize = senderQueueSize | |
186 | return nil | |
187 | } | |
188 | } | |
189 | ||
190 | // WithWriteTimeout sets the timeout for network communication with the Agent, after this interval a payload is | |
191 | // dropped. This is only used for UDS and named pipes connection. | |
192 | func WithWriteTimeout(writeTimeout time.Duration) Option { | |
193 | return func(o *Options) error { | |
194 | o.writeTimeout = writeTimeout | |
195 | return nil | |
196 | } | |
197 | } | |
198 | ||
199 | // WithChannelMode make the client use channels to receive metrics | |
200 | // | |
201 | // This determines how the client receive metrics from the app (for example when calling the `Gauge()` method). | |
202 | // The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the | |
203 | // metric can be handled (WithMutexMode option). By default the client use mutexes. | |
204 | // | |
205 | // WithChannelMode uses a channel (see WithChannelModeBufferSize to configure its size) to receive metrics and drops metrics if | |
206 | // the channel is full. Sending metrics in this mode is much slower that WithMutexMode (because of the channel), but will not | |
207 | // block the application. This mode is made for application using many goroutines, sending the same metrics, at a very | |
208 | // high volume. The goal is to not slow down the application at the cost of dropping metrics and having a lower max | |
209 | // throughput. | |
210 | func WithChannelMode() Option { | |
211 | return func(o *Options) error { | |
212 | o.receiveMode = channelMode | |
213 | return nil | |
214 | } | |
215 | } | |
216 | ||
217 | // WithMutexMode will use mutex to receive metrics from the app throught the API. | |
218 | // | |
219 | // This determines how the client receive metrics from the app (for example when calling the `Gauge()` method). | |
220 | // The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the | |
221 | // metric can be handled (WithMutexMode option). By default the client use mutexes. | |
222 | // | |
223 | // WithMutexMode uses mutexes to receive metrics which is much faster than channels but can cause some lock contention | |
224 | // when used with a high number of goroutines sendint the same metrics. Mutexes are sharded based on the metrics name | |
225 | // which limit mutex contention when multiple goroutines send different metrics (see WithWorkersCount). This is the | |
226 | // default behavior which will produce the best throughput. | |
227 | func WithMutexMode() Option { | |
228 | return func(o *Options) error { | |
229 | o.receiveMode = mutexMode | |
230 | return nil | |
231 | } | |
232 | } | |
233 | ||
234 | // WithChannelModeBufferSize sets the size of the channel holding incoming metrics when WithChannelMode is used. | |
235 | func WithChannelModeBufferSize(bufferSize int) Option { | |
236 | return func(o *Options) error { | |
237 | o.channelModeBufferSize = bufferSize | |
238 | return nil | |
239 | } | |
240 | } | |
241 | ||
242 | // WithAggregationInterval sets the interval at which aggregated metrics are flushed. See WithClientSideAggregation and | |
243 | // WithExtendedClientSideAggregation for more. | |
244 | // | |
245 | // The default interval is 2s. The interval must divide the Agent reporting period (default=10s) evenly to reduce "aliasing" | |
246 | // that can cause values to appear irregular/spiky. | |
247 | // | |
248 | // For example a 3s aggregation interval will create spikes in the final graph: a application sending a count metric | |
249 | // that increments at a constant 1000 time per second will appear noisy with an interval of 3s. This is because | |
250 | // client-side aggregation would report every 3 seconds, while the agent is reporting every 10 seconds. This means in | |
251 | // each agent bucket, the values are: 9000, 9000, 12000. | |
252 | func WithAggregationInterval(interval time.Duration) Option { | |
253 | return func(o *Options) error { | |
254 | o.aggregationFlushInterval = interval | |
255 | return nil | |
256 | } | |
257 | } | |
258 | ||
259 | // WithClientSideAggregation enables client side aggregation for Gauges, Counts and Sets. | |
260 | func WithClientSideAggregation() Option { | |
261 | return func(o *Options) error { | |
262 | o.aggregation = true | |
263 | return nil | |
264 | } | |
265 | } | |
266 | ||
267 | // WithoutClientSideAggregation disables client side aggregation. | |
268 | func WithoutClientSideAggregation() Option { | |
269 | return func(o *Options) error { | |
270 | o.aggregation = false | |
271 | o.extendedAggregation = false | |
272 | return nil | |
273 | } | |
274 | } | |
275 | ||
276 | // WithExtendedClientSideAggregation enables client side aggregation for all types. This feature is only compatible with | |
277 | // Agent's version >=6.25.0 && <7.0.0 or Agent's versions >=7.25.0. | |
278 | func WithExtendedClientSideAggregation() Option { | |
279 | return func(o *Options) error { | |
280 | o.aggregation = true | |
281 | o.extendedAggregation = true | |
282 | return nil | |
283 | } | |
284 | } | |
285 | ||
286 | // WithoutTelemetry disables the client telemetry. | |
287 | // | |
288 | // More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry | |
289 | func WithoutTelemetry() Option { | |
290 | return func(o *Options) error { | |
291 | o.telemetry = false | |
292 | return nil | |
293 | } | |
294 | } | |
295 | ||
296 | // WithTelemetryAddr sets a different address for telemetry metrics. By default the same address as the client is used | |
297 | // for telemetry. | |
298 | // | |
299 | // More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry | |
300 | func WithTelemetryAddr(addr string) Option { | |
301 | return func(o *Options) error { | |
302 | o.telemetryAddr = addr | |
303 | return nil | |
304 | } | |
305 | } | |
306 | ||
307 | // WithoutOriginDetection disables the client origin detection. | |
308 | // When enabled, the client tries to discover its container ID and sends it to the Agent | |
309 | // to enrich the metrics with container tags. | |
310 | // Origin detection can also be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false | |
311 | // The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows. | |
312 | // The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID. | |
313 | // | |
314 | // More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp | |
315 | func WithoutOriginDetection() Option { | |
316 | return func(o *Options) error { | |
317 | o.originDetection = false | |
318 | return nil | |
319 | } | |
320 | } | |
321 | ||
322 | // WithOriginDetection enables the client origin detection. | |
323 | // This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0. | |
324 | // When enabled, the client tries to discover its container ID and sends it to the Agent | |
325 | // to enrich the metrics with container tags. | |
326 | // Origin detection can be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false | |
327 | // The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows. | |
328 | // The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID. | |
329 | // | |
330 | // More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp | |
331 | func WithOriginDetection() Option { | |
332 | return func(o *Options) error { | |
333 | o.originDetection = true | |
334 | return nil | |
335 | } | |
336 | } | |
337 | ||
338 | // WithContainerID allows passing the container ID, this will be used by the Agent to enrich metrics with container tags. | |
339 | // This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0. | |
340 | // When configured, the provided container ID is prioritized over the container ID discovered via Origin Detection. | |
341 | // The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID. | |
342 | func WithContainerID(id string) Option { | |
343 | return func(o *Options) error { | |
344 | o.containerID = id | |
345 | return nil | |
346 | } | |
347 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | "time" | |
5 | ||
6 | "github.com/stretchr/testify/assert" | |
7 | ) | |
8 | ||
9 | func TestDefaultOptions(t *testing.T) { | |
10 | options, err := resolveOptions([]Option{}) | |
11 | ||
12 | assert.NoError(t, err) | |
13 | assert.Equal(t, options.namespace, defaultNamespace) | |
14 | assert.Equal(t, options.tags, defaultTags) | |
15 | assert.Equal(t, options.maxBytesPerPayload, defaultMaxBytesPerPayload) | |
16 | assert.Equal(t, options.maxMessagesPerPayload, defaultMaxMessagesPerPayload) | |
17 | assert.Equal(t, options.bufferPoolSize, defaultBufferPoolSize) | |
18 | assert.Equal(t, options.bufferFlushInterval, defaultBufferFlushInterval) | |
19 | assert.Equal(t, options.workersCount, defaultWorkerCount) | |
20 | assert.Equal(t, options.senderQueueSize, defaultSenderQueueSize) | |
21 | assert.Equal(t, options.writeTimeout, defaultWriteTimeout) | |
22 | assert.Equal(t, options.telemetry, defaultTelemetry) | |
23 | assert.Equal(t, options.receiveMode, defaultReceivingMode) | |
24 | assert.Equal(t, options.channelModeBufferSize, defaultChannelModeBufferSize) | |
25 | assert.Equal(t, options.aggregationFlushInterval, defaultAggregationFlushInterval) | |
26 | assert.Equal(t, options.aggregation, defaultAggregation) | |
27 | assert.Equal(t, options.extendedAggregation, defaultExtendedAggregation) | |
28 | assert.Zero(t, options.telemetryAddr) | |
29 | } | |
30 | ||
31 | func TestOptions(t *testing.T) { | |
32 | testNamespace := "datadog." | |
33 | testTags := []string{"rocks"} | |
34 | testMaxBytesPerPayload := 2048 | |
35 | testMaxMessagePerPayload := 1024 | |
36 | testBufferPoolSize := 32 | |
37 | testBufferFlushInterval := 48 * time.Second | |
38 | testBufferShardCount := 28 | |
39 | testSenderQueueSize := 64 | |
40 | testWriteTimeout := 1 * time.Minute | |
41 | testChannelBufferSize := 500 | |
42 | testAggregationWindow := 10 * time.Second | |
43 | testTelemetryAddr := "localhost:1234" | |
44 | ||
45 | options, err := resolveOptions([]Option{ | |
46 | WithNamespace(testNamespace), | |
47 | WithTags(testTags), | |
48 | WithMaxBytesPerPayload(testMaxBytesPerPayload), | |
49 | WithMaxMessagesPerPayload(testMaxMessagePerPayload), | |
50 | WithBufferPoolSize(testBufferPoolSize), | |
51 | WithBufferFlushInterval(testBufferFlushInterval), | |
52 | WithWorkersCount(testBufferShardCount), | |
53 | WithSenderQueueSize(testSenderQueueSize), | |
54 | WithWriteTimeout(testWriteTimeout), | |
55 | WithoutTelemetry(), | |
56 | WithChannelMode(), | |
57 | WithChannelModeBufferSize(testChannelBufferSize), | |
58 | WithAggregationInterval(testAggregationWindow), | |
59 | WithClientSideAggregation(), | |
60 | WithTelemetryAddr(testTelemetryAddr), | |
61 | }) | |
62 | ||
63 | assert.NoError(t, err) | |
64 | assert.Equal(t, options.namespace, testNamespace) | |
65 | assert.Equal(t, options.tags, testTags) | |
66 | assert.Equal(t, options.maxBytesPerPayload, testMaxBytesPerPayload) | |
67 | assert.Equal(t, options.maxMessagesPerPayload, testMaxMessagePerPayload) | |
68 | assert.Equal(t, options.bufferPoolSize, testBufferPoolSize) | |
69 | assert.Equal(t, options.bufferFlushInterval, testBufferFlushInterval) | |
70 | assert.Equal(t, options.workersCount, testBufferShardCount) | |
71 | assert.Equal(t, options.senderQueueSize, testSenderQueueSize) | |
72 | assert.Equal(t, options.writeTimeout, testWriteTimeout) | |
73 | assert.Equal(t, options.telemetry, false) | |
74 | assert.Equal(t, options.receiveMode, channelMode) | |
75 | assert.Equal(t, options.channelModeBufferSize, testChannelBufferSize) | |
76 | assert.Equal(t, options.aggregationFlushInterval, testAggregationWindow) | |
77 | assert.Equal(t, options.aggregation, true) | |
78 | assert.Equal(t, options.extendedAggregation, false) | |
79 | assert.Equal(t, options.telemetryAddr, testTelemetryAddr) | |
80 | } | |
81 | ||
82 | func TestExtendedAggregation(t *testing.T) { | |
83 | options, err := resolveOptions([]Option{ | |
84 | WithoutClientSideAggregation(), | |
85 | WithExtendedClientSideAggregation(), | |
86 | }) | |
87 | ||
88 | assert.NoError(t, err) | |
89 | assert.Equal(t, options.aggregation, true) | |
90 | assert.Equal(t, options.extendedAggregation, true) | |
91 | } | |
92 | ||
93 | func TestResetOptions(t *testing.T) { | |
94 | options, err := resolveOptions([]Option{ | |
95 | WithChannelMode(), | |
96 | WithMutexMode(), | |
97 | WithoutClientSideAggregation(), | |
98 | }) | |
99 | ||
100 | assert.NoError(t, err) | |
101 | assert.Equal(t, options.receiveMode, mutexMode) | |
102 | assert.Equal(t, options.aggregation, false) | |
103 | assert.Equal(t, options.extendedAggregation, false) | |
104 | } | |
105 | func TestOptionsNamespaceWithoutDot(t *testing.T) { | |
106 | testNamespace := "datadog" | |
107 | ||
108 | options, err := resolveOptions([]Option{ | |
109 | WithNamespace(testNamespace), | |
110 | }) | |
111 | ||
112 | assert.NoError(t, err) | |
113 | assert.Equal(t, options.namespace, testNamespace+".") | |
114 | } |
0 | // +build !windows | |
1 | ||
2 | package statsd | |
3 | ||
4 | import ( | |
5 | "errors" | |
6 | "io" | |
7 | "time" | |
8 | ) | |
9 | ||
10 | func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (io.WriteCloser, error) { | |
11 | return nil, errors.New("Windows Named Pipes are only supported on Windows") | |
12 | } |
0 | // +build windows | |
1 | ||
2 | package statsd | |
3 | ||
4 | import ( | |
5 | "net" | |
6 | "sync" | |
7 | "time" | |
8 | ||
9 | "github.com/Microsoft/go-winio" | |
10 | ) | |
11 | ||
12 | type pipeWriter struct { | |
13 | mu sync.RWMutex | |
14 | conn net.Conn | |
15 | timeout time.Duration | |
16 | pipepath string | |
17 | } | |
18 | ||
19 | func (p *pipeWriter) Write(data []byte) (n int, err error) { | |
20 | conn, err := p.ensureConnection() | |
21 | if err != nil { | |
22 | return 0, err | |
23 | } | |
24 | ||
25 | p.mu.RLock() | |
26 | conn.SetWriteDeadline(time.Now().Add(p.timeout)) | |
27 | p.mu.RUnlock() | |
28 | ||
29 | n, err = conn.Write(data) | |
30 | if err != nil { | |
31 | if e, ok := err.(net.Error); !ok || !e.Temporary() { | |
32 | // disconnected; retry again on next attempt | |
33 | p.mu.Lock() | |
34 | p.conn = nil | |
35 | p.mu.Unlock() | |
36 | } | |
37 | } | |
38 | return n, err | |
39 | } | |
40 | ||
41 | func (p *pipeWriter) ensureConnection() (net.Conn, error) { | |
42 | p.mu.RLock() | |
43 | conn := p.conn | |
44 | p.mu.RUnlock() | |
45 | if conn != nil { | |
46 | return conn, nil | |
47 | } | |
48 | ||
49 | // looks like we might need to connect - try again with write locking. | |
50 | p.mu.Lock() | |
51 | defer p.mu.Unlock() | |
52 | if p.conn != nil { | |
53 | return p.conn, nil | |
54 | } | |
55 | newconn, err := winio.DialPipe(p.pipepath, nil) | |
56 | if err != nil { | |
57 | return nil, err | |
58 | } | |
59 | p.conn = newconn | |
60 | return newconn, nil | |
61 | } | |
62 | ||
63 | func (p *pipeWriter) Close() error { | |
64 | return p.conn.Close() | |
65 | } | |
66 | ||
67 | func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (*pipeWriter, error) { | |
68 | // Defer connection establishment to first write | |
69 | return &pipeWriter{ | |
70 | conn: nil, | |
71 | timeout: writeTimeout, | |
72 | pipepath: pipepath, | |
73 | }, nil | |
74 | } |
0 | // +build windows | |
1 | ||
2 | package statsd | |
3 | ||
4 | import ( | |
5 | "io/ioutil" | |
6 | "net" | |
7 | "os" | |
8 | "testing" | |
9 | "time" | |
10 | ||
11 | "github.com/Microsoft/go-winio" | |
12 | "github.com/stretchr/testify/assert" | |
13 | "github.com/stretchr/testify/require" | |
14 | ) | |
15 | ||
16 | func createNamedPipe(t *testing.T) (string, *os.File, net.Listener) { | |
17 | f, err := ioutil.TempFile("", "test-pipe-") | |
18 | require.Nil(t, err) | |
19 | ||
20 | pipepath := WindowsPipeAddressPrefix + f.Name() | |
21 | ln, err := winio.ListenPipe(pipepath, &winio.PipeConfig{ | |
22 | SecurityDescriptor: "D:AI(A;;GA;;;WD)", | |
23 | InputBufferSize: 1_000_000, | |
24 | }) | |
25 | if err != nil { | |
26 | os.Remove(f.Name()) | |
27 | t.Fatal(err) | |
28 | } | |
29 | return pipepath, f, ln | |
30 | } | |
31 | ||
32 | // acceptOne accepts one single connection from ln, reads 512 bytes from it | |
33 | // and sends it to the out channel, afterwards closing the connection. | |
34 | func acceptOne(t *testing.T, ln net.Listener, out chan string) { | |
35 | conn, err := ln.Accept() | |
36 | require.Nil(t, err) | |
37 | ||
38 | buf := make([]byte, 512) | |
39 | n, err := conn.Read(buf) | |
40 | require.Nil(t, err) | |
41 | ||
42 | conn.Close() | |
43 | out <- string(buf[:n]) | |
44 | } | |
45 | ||
46 | func TestPipeWriter(t *testing.T) { | |
47 | pipepath, f, ln := createNamedPipe(t) | |
48 | defer os.Remove(f.Name()) | |
49 | ||
50 | out := make(chan string) | |
51 | go acceptOne(t, ln, out) | |
52 | ||
53 | client, err := New(pipepath) | |
54 | require.Nil(t, err) | |
55 | ||
56 | err = client.Gauge("metric", 1, []string{"key:val"}, 1) | |
57 | require.Nil(t, err) | |
58 | ||
59 | got := <-out | |
60 | assert.Equal(t, got, "metric:1|g|#key:val\n") | |
61 | } | |
62 | ||
63 | func TestPipeWriterEnv(t *testing.T) { | |
64 | pipepath, f, ln := createNamedPipe(t) | |
65 | defer os.Remove(f.Name()) | |
66 | ||
67 | out := make(chan string) | |
68 | go acceptOne(t, ln, out) | |
69 | ||
70 | os.Setenv(agentHostEnvVarName, pipepath) | |
71 | defer os.Unsetenv(agentHostEnvVarName) | |
72 | ||
73 | client, err := New("") | |
74 | require.Nil(t, err) | |
75 | ||
76 | err = client.Gauge("metric", 1, []string{"key:val"}, 1) | |
77 | require.Nil(t, err) | |
78 | ||
79 | got := <-out | |
80 | assert.Equal(t, got, "metric:1|g|#key:val\n") | |
81 | } | |
82 | ||
83 | func TestPipeWriterReconnect(t *testing.T) { | |
84 | pipepath, f, ln := createNamedPipe(t) | |
85 | defer os.Remove(f.Name()) | |
86 | ||
87 | out := make(chan string) | |
88 | go acceptOne(t, ln, out) | |
89 | ||
90 | client, err := New(pipepath) | |
91 | require.Nil(t, err) | |
92 | ||
93 | // first attempt works, then connection closes | |
94 | err = client.Gauge("metric", 1, []string{"key:val"}, 1) | |
95 | require.Nil(t, err, "Failed to send gauge: %s", err) | |
96 | ||
97 | timeout := time.After(5 * time.Second) | |
98 | select { | |
99 | case got := <-out: | |
100 | assert.Equal(t, got, "metric:1|g|#key:val\n") | |
101 | case <-timeout: | |
102 | t.Fatal("timeout receiving the first metric") | |
103 | } | |
104 | ||
105 | // second attempt fails by attempting the same connection | |
106 | go acceptOne(t, ln, out) | |
107 | err = client.Gauge("metric", 2, []string{"key:val"}, 1) | |
108 | require.Nil(t, err, "Failed to send second gauge: %s", err) | |
109 | ||
110 | timeout = time.After(100 * time.Millisecond) | |
111 | select { | |
112 | case <-out: | |
113 | t.Fatal("Second attempt should have timed out") | |
114 | case <-timeout: | |
115 | // ok | |
116 | } | |
117 | ||
118 | // subsequent attempts succeed with new connection | |
119 | for n := 0; n < 3; n++ { | |
120 | err = client.Gauge("metric", 3, []string{"key:val"}, 1) | |
121 | require.Nil(t, err, "Failed to send second gauge: %s", err) | |
122 | ||
123 | timeout = time.After(3 * time.Second) | |
124 | select { | |
125 | case got := <-out: | |
126 | assert.Equal(t, got, "metric:3|g|#key:val\n") | |
127 | return | |
128 | case <-timeout: | |
129 | continue | |
130 | } | |
131 | } | |
132 | t.Fatal("failed to reconnect") | |
133 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "io" | |
4 | "sync/atomic" | |
5 | ) | |
6 | ||
7 | // senderTelemetry contains telemetry about the health of the sender | |
8 | type senderTelemetry struct { | |
9 | totalPayloadsSent uint64 | |
10 | totalPayloadsDroppedQueueFull uint64 | |
11 | totalPayloadsDroppedWriter uint64 | |
12 | totalBytesSent uint64 | |
13 | totalBytesDroppedQueueFull uint64 | |
14 | totalBytesDroppedWriter uint64 | |
15 | } | |
16 | ||
17 | type sender struct { | |
18 | transport io.WriteCloser | |
19 | pool *bufferPool | |
20 | queue chan *statsdBuffer | |
21 | telemetry *senderTelemetry | |
22 | stop chan struct{} | |
23 | flushSignal chan struct{} | |
24 | } | |
25 | ||
26 | func newSender(transport io.WriteCloser, queueSize int, pool *bufferPool) *sender { | |
27 | sender := &sender{ | |
28 | transport: transport, | |
29 | pool: pool, | |
30 | queue: make(chan *statsdBuffer, queueSize), | |
31 | telemetry: &senderTelemetry{}, | |
32 | stop: make(chan struct{}), | |
33 | flushSignal: make(chan struct{}), | |
34 | } | |
35 | ||
36 | go sender.sendLoop() | |
37 | return sender | |
38 | } | |
39 | ||
40 | func (s *sender) send(buffer *statsdBuffer) { | |
41 | select { | |
42 | case s.queue <- buffer: | |
43 | default: | |
44 | atomic.AddUint64(&s.telemetry.totalPayloadsDroppedQueueFull, 1) | |
45 | atomic.AddUint64(&s.telemetry.totalBytesDroppedQueueFull, uint64(len(buffer.bytes()))) | |
46 | s.pool.returnBuffer(buffer) | |
47 | } | |
48 | } | |
49 | ||
50 | func (s *sender) write(buffer *statsdBuffer) { | |
51 | _, err := s.transport.Write(buffer.bytes()) | |
52 | if err != nil { | |
53 | atomic.AddUint64(&s.telemetry.totalPayloadsDroppedWriter, 1) | |
54 | atomic.AddUint64(&s.telemetry.totalBytesDroppedWriter, uint64(len(buffer.bytes()))) | |
55 | } else { | |
56 | atomic.AddUint64(&s.telemetry.totalPayloadsSent, 1) | |
57 | atomic.AddUint64(&s.telemetry.totalBytesSent, uint64(len(buffer.bytes()))) | |
58 | } | |
59 | s.pool.returnBuffer(buffer) | |
60 | } | |
61 | ||
62 | func (s *sender) flushTelemetryMetrics(t *Telemetry) { | |
63 | t.TotalPayloadsSent = atomic.LoadUint64(&s.telemetry.totalPayloadsSent) | |
64 | t.TotalPayloadsDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedQueueFull) | |
65 | t.TotalPayloadsDroppedWriter = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedWriter) | |
66 | ||
67 | t.TotalBytesSent = atomic.LoadUint64(&s.telemetry.totalBytesSent) | |
68 | t.TotalBytesDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalBytesDroppedQueueFull) | |
69 | t.TotalBytesDroppedWriter = atomic.LoadUint64(&s.telemetry.totalBytesDroppedWriter) | |
70 | } | |
71 | ||
72 | func (s *sender) sendLoop() { | |
73 | defer close(s.stop) | |
74 | for { | |
75 | select { | |
76 | case buffer := <-s.queue: | |
77 | s.write(buffer) | |
78 | case <-s.stop: | |
79 | return | |
80 | case <-s.flushSignal: | |
81 | // At that point we know that the workers are paused (the statsd client | |
82 | // will pause them before calling sender.flush()). | |
83 | // So we can fully flush the input queue | |
84 | s.flushInputQueue() | |
85 | s.flushSignal <- struct{}{} | |
86 | } | |
87 | } | |
88 | } | |
89 | ||
90 | func (s *sender) flushInputQueue() { | |
91 | for { | |
92 | select { | |
93 | case buffer := <-s.queue: | |
94 | s.write(buffer) | |
95 | default: | |
96 | return | |
97 | } | |
98 | } | |
99 | } | |
100 | func (s *sender) flush() { | |
101 | s.flushSignal <- struct{}{} | |
102 | <-s.flushSignal | |
103 | } | |
104 | ||
105 | func (s *sender) close() error { | |
106 | s.stop <- struct{}{} | |
107 | <-s.stop | |
108 | s.flushInputQueue() | |
109 | return s.transport.Close() | |
110 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "testing" | |
5 | ||
6 | "github.com/stretchr/testify/assert" | |
7 | "github.com/stretchr/testify/mock" | |
8 | ) | |
9 | ||
10 | type mockedWriter struct { | |
11 | mock.Mock | |
12 | } | |
13 | ||
14 | func (w *mockedWriter) Write(data []byte) (n int, err error) { | |
15 | args := w.Called(data) | |
16 | return args.Int(0), args.Error(1) | |
17 | } | |
18 | ||
19 | func (w *mockedWriter) Close() error { | |
20 | args := w.Called() | |
21 | return args.Error(0) | |
22 | } | |
23 | ||
24 | func TestSender(t *testing.T) { | |
25 | writer := new(mockedWriter) | |
26 | writer.On("Write", mock.Anything).Return(1, nil) | |
27 | writer.On("Close").Return(nil) | |
28 | pool := newBufferPool(10, 1024, 1) | |
29 | sender := newSender(writer, 10, pool) | |
30 | buffer := pool.borrowBuffer() | |
31 | buffer.writeSeparator() // add some dummy data | |
32 | ||
33 | sender.send(buffer) | |
34 | ||
35 | err := sender.close() | |
36 | assert.Nil(t, err) | |
37 | writer.AssertCalled(t, "Write", []byte("\n")) | |
38 | assert.Equal(t, 10, len(pool.pool)) | |
39 | ||
40 | assert.Equal(t, uint64(1), sender.telemetry.totalPayloadsSent) | |
41 | assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedQueueFull) | |
42 | assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedWriter) | |
43 | assert.Equal(t, uint64(1), sender.telemetry.totalBytesSent) | |
44 | assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedQueueFull) | |
45 | assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedWriter) | |
46 | ||
47 | } | |
48 | ||
49 | func TestSenderBufferFullTelemetry(t *testing.T) { | |
50 | writer := new(mockedWriter) | |
51 | writer.On("Write", mock.Anything).Return(0, nil) | |
52 | writer.On("Close").Return(nil) | |
53 | ||
54 | // a sender with a queue of 1 message | |
55 | pool := newBufferPool(10, 1024, 1) | |
56 | sender := newSender(writer, 0, pool) | |
57 | ||
58 | // close the sender to prevent it from consuming the queue | |
59 | sender.close() | |
60 | ||
61 | // fill the queue to its max | |
62 | buffer := pool.borrowBuffer() | |
63 | buffer.writeSeparator() // add some dummy data | |
64 | sender.send(buffer) | |
65 | ||
66 | assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsSent) | |
67 | assert.Equal(t, uint64(1), sender.telemetry.totalPayloadsDroppedQueueFull) | |
68 | assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedWriter) | |
69 | ||
70 | assert.Equal(t, uint64(0), sender.telemetry.totalBytesSent) | |
71 | assert.Equal(t, uint64(1), sender.telemetry.totalBytesDroppedQueueFull) | |
72 | assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedWriter) | |
73 | } | |
74 | ||
75 | func TestSenderWriteError(t *testing.T) { | |
76 | writer := new(mockedWriter) | |
77 | writer.On("Write", mock.Anything).Return(1, fmt.Errorf("some write error")) | |
78 | writer.On("Close").Return(nil) | |
79 | pool := newBufferPool(10, 1024, 1) | |
80 | sender := newSender(writer, 10, pool) | |
81 | buffer := pool.borrowBuffer() | |
82 | buffer.writeSeparator() // add some dummy data | |
83 | ||
84 | sender.send(buffer) | |
85 | ||
86 | err := sender.close() | |
87 | assert.Nil(t, err) | |
88 | writer.AssertCalled(t, "Write", []byte("\n")) | |
89 | assert.Equal(t, 10, len(pool.pool)) | |
90 | ||
91 | assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsSent) | |
92 | assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedQueueFull) | |
93 | assert.Equal(t, uint64(1), sender.telemetry.totalPayloadsDroppedWriter) | |
94 | ||
95 | assert.Equal(t, uint64(0), sender.telemetry.totalBytesSent) | |
96 | assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedQueueFull) | |
97 | assert.Equal(t, uint64(1), sender.telemetry.totalBytesDroppedWriter) | |
98 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "time" | |
5 | ) | |
6 | ||
7 | // ServiceCheckStatus support | |
8 | type ServiceCheckStatus byte | |
9 | ||
10 | const ( | |
11 | // Ok is the "ok" ServiceCheck status | |
12 | Ok ServiceCheckStatus = 0 | |
13 | // Warn is the "warning" ServiceCheck status | |
14 | Warn ServiceCheckStatus = 1 | |
15 | // Critical is the "critical" ServiceCheck status | |
16 | Critical ServiceCheckStatus = 2 | |
17 | // Unknown is the "unknown" ServiceCheck status | |
18 | Unknown ServiceCheckStatus = 3 | |
19 | ) | |
20 | ||
21 | // A ServiceCheck is an object that contains status of DataDog service check. | |
22 | type ServiceCheck struct { | |
23 | // Name of the service check. Required. | |
24 | Name string | |
25 | // Status of service check. Required. | |
26 | Status ServiceCheckStatus | |
27 | // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd | |
28 | // server will set this to the current time. | |
29 | Timestamp time.Time | |
30 | // Hostname for the serviceCheck. | |
31 | Hostname string | |
32 | // A message describing the current state of the serviceCheck. | |
33 | Message string | |
34 | // Tags for the serviceCheck. | |
35 | Tags []string | |
36 | } | |
37 | ||
38 | // NewServiceCheck creates a new serviceCheck with the given name and status. Error checking | |
39 | // against these values is done at send-time, or upon running sc.Check. | |
40 | func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { | |
41 | return &ServiceCheck{ | |
42 | Name: name, | |
43 | Status: status, | |
44 | } | |
45 | } | |
46 | ||
47 | // Check verifies that a service check is valid. | |
48 | func (sc *ServiceCheck) Check() error { | |
49 | if len(sc.Name) == 0 { | |
50 | return fmt.Errorf("statsd.ServiceCheck name is required") | |
51 | } | |
52 | if byte(sc.Status) < 0 || byte(sc.Status) > 3 { | |
53 | return fmt.Errorf("statsd.ServiceCheck status has invalid value") | |
54 | } | |
55 | return nil | |
56 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "testing" | |
4 | ||
5 | "github.com/stretchr/testify/assert" | |
6 | "github.com/stretchr/testify/require" | |
7 | ) | |
8 | ||
9 | func encodeSC(sc *ServiceCheck) (string, error) { | |
10 | err := sc.Check() | |
11 | if err != nil { | |
12 | return "", err | |
13 | } | |
14 | var buffer []byte | |
15 | buffer = appendServiceCheck(buffer, sc, nil) | |
16 | return string(buffer), nil | |
17 | } | |
18 | ||
19 | func TestServiceChecks(t *testing.T) { | |
20 | matrix := []struct { | |
21 | serviceCheck *ServiceCheck | |
22 | expectedEncode string | |
23 | }{ | |
24 | { | |
25 | NewServiceCheck("DataCatService", Ok), | |
26 | `_sc|DataCatService|0`, | |
27 | }, { | |
28 | NewServiceCheck("DataCatService", Warn), | |
29 | `_sc|DataCatService|1`, | |
30 | }, { | |
31 | NewServiceCheck("DataCatService", Critical), | |
32 | `_sc|DataCatService|2`, | |
33 | }, { | |
34 | NewServiceCheck("DataCatService", Unknown), | |
35 | `_sc|DataCatService|3`, | |
36 | }, { | |
37 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat"}, | |
38 | `_sc|DataCatService|0|h:DataStation.Cat`, | |
39 | }, { | |
40 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message"}, | |
41 | `_sc|DataCatService|0|h:DataStation.Cat|m:Here goes valuable message`, | |
42 | }, { | |
43 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш"}, | |
44 | `_sc|DataCatService|0|h:DataStation.Cat|m:Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш`, | |
45 | }, { | |
46 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message", Tags: []string{"host:foo", "app:bar"}}, | |
47 | `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes valuable message`, | |
48 | }, { | |
49 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes \n that should be escaped", Tags: []string{"host:foo", "app:b\nar"}}, | |
50 | `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes \n that should be escaped`, | |
51 | }, { | |
52 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes m: that should be escaped", Tags: []string{"host:foo", "app:bar"}}, | |
53 | `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes m\: that should be escaped`, | |
54 | }, | |
55 | } | |
56 | ||
57 | for _, m := range matrix { | |
58 | scEncoded, err := encodeSC(m.serviceCheck) | |
59 | require.NoError(t, err) | |
60 | assert.Equal(t, m.expectedEncode, scEncoded) | |
61 | } | |
62 | ||
63 | } | |
64 | ||
65 | func TestNameMissing(t *testing.T) { | |
66 | sc := NewServiceCheck("", Ok) | |
67 | _, err := encodeSC(sc) | |
68 | require.Error(t, err) | |
69 | assert.Equal(t, "statsd.ServiceCheck name is required", err.Error()) | |
70 | } | |
71 | ||
72 | func TestUnknownStatus(t *testing.T) { | |
73 | sc := NewServiceCheck("sc", ServiceCheckStatus(5)) | |
74 | _, err := encodeSC(sc) | |
75 | require.Error(t, err) | |
76 | assert.Equal(t, "statsd.ServiceCheck status has invalid value", err.Error()) | |
77 | } | |
78 | ||
79 | func TestNewServiceCheckWithTags(t *testing.T) { | |
80 | sc := NewServiceCheck("hello", Warn) | |
81 | sc.Tags = []string{"tag1", "tag2"} | |
82 | s, err := encodeSC(sc) | |
83 | require.NoError(t, err) | |
84 | assert.Equal(t, "_sc|hello|1|#tag1,tag2", s) | |
85 | assert.Len(t, sc.Tags, 2) | |
86 | } | |
87 | ||
88 | func TestNewServiceCheckWithTagsAppend(t *testing.T) { | |
89 | sc := NewServiceCheck("hello", Warn) | |
90 | sc.Tags = append(sc.Tags, "tag1", "tag2") | |
91 | s, err := encodeSC(sc) | |
92 | require.NoError(t, err) | |
93 | assert.Equal(t, "_sc|hello|1|#tag1,tag2", s) | |
94 | assert.Len(t, sc.Tags, 2) | |
95 | } |
5 | 5 | |
6 | 6 | Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD. |
7 | 7 | |
8 | Example Usage: | |
9 | ||
10 | // Create the client | |
11 | c, err := statsd.New("127.0.0.1:8125") | |
12 | if err != nil { | |
13 | log.Fatal(err) | |
14 | } | |
15 | // Prefix every metric with the app name | |
16 | c.Namespace = "flubber." | |
17 | // Send the EC2 availability zone as a tag with every metric | |
18 | c.Tags = append(c.Tags, "us-east-1a") | |
19 | err = c.Gauge("request.duration", 1.2, nil, 1) | |
20 | ||
21 | 8 | statsd is based on go-statsd-client. |
22 | 9 | */ |
23 | 10 | package statsd |
24 | 11 | |
12 | //go:generate mockgen -source=statsd.go -destination=mocks/statsd.go | |
13 | ||
25 | 14 | import ( |
26 | "bytes" | |
27 | 15 | "errors" |
28 | 16 | "fmt" |
29 | 17 | "io" |
30 | "math/rand" | |
18 | "os" | |
31 | 19 | "strconv" |
32 | 20 | "strings" |
33 | 21 | "sync" |
22 | "sync/atomic" | |
34 | 23 | "time" |
35 | 24 | ) |
36 | 25 | |
37 | 26 | /* |
38 | OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes | |
27 | OptimalUDPPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes | |
39 | 28 | is optimal for regular networks with an MTU of 1500 so datagrams don't get |
40 | 29 | fragmented. It's generally recommended not to fragment UDP datagrams as losing |
41 | 30 | a single fragment will cause the entire datagram to be lost. |
42 | ||
43 | This can be increased if your network has a greater MTU or you don't mind UDP | |
44 | datagrams getting fragmented. The practical limit is MaxUDPPayloadSize | |
45 | 31 | */ |
46 | const OptimalPayloadSize = 1432 | |
32 | const OptimalUDPPayloadSize = 1432 | |
47 | 33 | |
48 | 34 | /* |
49 | 35 | MaxUDPPayloadSize defines the maximum payload size for a UDP datagram. |
53 | 39 | */ |
54 | 40 | const MaxUDPPayloadSize = 65467 |
55 | 41 | |
42 | // DefaultUDPBufferPoolSize is the default size of the buffer pool for UDP clients. | |
43 | const DefaultUDPBufferPoolSize = 2048 | |
44 | ||
45 | // DefaultUDSBufferPoolSize is the default size of the buffer pool for UDS clients. | |
46 | const DefaultUDSBufferPoolSize = 512 | |
47 | ||
48 | /* | |
49 | DefaultMaxAgentPayloadSize is the default maximum payload size the agent | |
50 | can receive. This can be adjusted by changing dogstatsd_buffer_size in the | |
51 | agent configuration file datadog.yaml. This is also used as the optimal payload size | |
52 | for UDS datagrams. | |
53 | */ | |
54 | const DefaultMaxAgentPayloadSize = 8192 | |
55 | ||
56 | 56 | /* |
57 | 57 | UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket |
58 | 58 | traffic instead of UDP. |
60 | 60 | const UnixAddressPrefix = "unix://" |
61 | 61 | |
62 | 62 | /* |
63 | Stat suffixes | |
63 | WindowsPipeAddressPrefix holds the prefix to use to enable Windows Named Pipes | |
64 | traffic instead of UDP. | |
64 | 65 | */ |
65 | var ( | |
66 | gaugeSuffix = []byte("|g") | |
67 | countSuffix = []byte("|c") | |
68 | histogramSuffix = []byte("|h") | |
69 | distributionSuffix = []byte("|d") | |
70 | decrSuffix = []byte("-1|c") | |
71 | incrSuffix = []byte("1|c") | |
72 | setSuffix = []byte("|s") | |
73 | timingSuffix = []byte("|ms") | |
66 | const WindowsPipeAddressPrefix = `\\.\pipe\` | |
67 | ||
68 | const ( | |
69 | agentHostEnvVarName = "DD_AGENT_HOST" | |
70 | agentPortEnvVarName = "DD_DOGSTATSD_PORT" | |
71 | defaultUDPPort = "8125" | |
74 | 72 | ) |
75 | 73 | |
76 | // A statsdWriter offers a standard interface regardless of the underlying | |
77 | // protocol. For now UDS and UPD writers are available. | |
78 | type statsdWriter interface { | |
79 | Write(data []byte) (n int, err error) | |
80 | SetWriteTimeout(time.Duration) error | |
74 | const ( | |
75 | // ddEntityID specifies client-side user-specified entity ID injection. | |
76 | // This env var can be set to the Pod UID on Kubernetes via the downward API. | |
77 | // Docs: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp | |
78 | ddEntityID = "DD_ENTITY_ID" | |
79 | ||
80 | // ddEntityIDTag specifies the tag name for the client-side entity ID injection | |
81 | // The Agent expects this tag to contain a non-prefixed Kubernetes Pod UID. | |
82 | ddEntityIDTag = "dd.internal.entity_id" | |
83 | ||
84 | // originDetectionEnabled specifies the env var to enable/disable sending the container ID field. | |
85 | originDetectionEnabled = "DD_ORIGIN_DETECTION_ENABLED" | |
86 | ) | |
87 | ||
88 | /* | |
89 | ddEnvTagsMapping is a mapping of each "DD_" prefixed environment variable | |
90 | to a specific tag name. We use a slice to keep the order and simplify tests. | |
91 | */ | |
92 | var ddEnvTagsMapping = []struct{ envName, tagName string }{ | |
93 | {ddEntityID, ddEntityIDTag}, // Client-side entity ID injection for container tagging. | |
94 | {"DD_ENV", "env"}, // The name of the env in which the service runs. | |
95 | {"DD_SERVICE", "service"}, // The name of the running service. | |
96 | {"DD_VERSION", "version"}, // The current version of the running service. | |
97 | } | |
98 | ||
99 | type metricType int | |
100 | ||
101 | const ( | |
102 | gauge metricType = iota | |
103 | count | |
104 | histogram | |
105 | histogramAggregated | |
106 | distribution | |
107 | distributionAggregated | |
108 | set | |
109 | timing | |
110 | timingAggregated | |
111 | event | |
112 | serviceCheck | |
113 | ) | |
114 | ||
115 | type receivingMode int | |
116 | ||
117 | const ( | |
118 | mutexMode receivingMode = iota | |
119 | channelMode | |
120 | ) | |
121 | ||
122 | const ( | |
123 | writerNameUDP string = "udp" | |
124 | writerNameUDS string = "uds" | |
125 | writerWindowsPipe string = "pipe" | |
126 | ) | |
127 | ||
128 | type metric struct { | |
129 | metricType metricType | |
130 | namespace string | |
131 | globalTags []string | |
132 | name string | |
133 | fvalue float64 | |
134 | fvalues []float64 | |
135 | ivalue int64 | |
136 | svalue string | |
137 | evalue *Event | |
138 | scvalue *ServiceCheck | |
139 | tags []string | |
140 | stags string | |
141 | rate float64 | |
142 | } | |
143 | ||
144 | type noClientErr string | |
145 | ||
146 | // ErrNoClient is returned if statsd reporting methods are invoked on | |
147 | // a nil client. | |
148 | const ErrNoClient = noClientErr("statsd client is nil") | |
149 | ||
150 | func (e noClientErr) Error() string { | |
151 | return string(e) | |
152 | } | |
153 | ||
154 | // ClientInterface is an interface that exposes the common client functions for the | |
155 | // purpose of being able to provide a no-op client or even mocking. This can aid | |
156 | // downstream users' with their testing. | |
157 | type ClientInterface interface { | |
158 | // Gauge measures the value of a metric at a particular time. | |
159 | Gauge(name string, value float64, tags []string, rate float64) error | |
160 | ||
161 | // Count tracks how many times something happened per second. | |
162 | Count(name string, value int64, tags []string, rate float64) error | |
163 | ||
164 | // Histogram tracks the statistical distribution of a set of values on each host. | |
165 | Histogram(name string, value float64, tags []string, rate float64) error | |
166 | ||
167 | // Distribution tracks the statistical distribution of a set of values across your infrastructure. | |
168 | Distribution(name string, value float64, tags []string, rate float64) error | |
169 | ||
170 | // Decr is just Count of -1 | |
171 | Decr(name string, tags []string, rate float64) error | |
172 | ||
173 | // Incr is just Count of 1 | |
174 | Incr(name string, tags []string, rate float64) error | |
175 | ||
176 | // Set counts the number of unique elements in a group. | |
177 | Set(name string, value string, tags []string, rate float64) error | |
178 | ||
179 | // Timing sends timing information, it is an alias for TimeInMilliseconds | |
180 | Timing(name string, value time.Duration, tags []string, rate float64) error | |
181 | ||
182 | // TimeInMilliseconds sends timing information in milliseconds. | |
183 | // It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) | |
184 | TimeInMilliseconds(name string, value float64, tags []string, rate float64) error | |
185 | ||
186 | // Event sends the provided Event. | |
187 | Event(e *Event) error | |
188 | ||
189 | // SimpleEvent sends an event with the provided title and text. | |
190 | SimpleEvent(title, text string) error | |
191 | ||
192 | // ServiceCheck sends the provided ServiceCheck. | |
193 | ServiceCheck(sc *ServiceCheck) error | |
194 | ||
195 | // SimpleServiceCheck sends an serviceCheck with the provided name and status. | |
196 | SimpleServiceCheck(name string, status ServiceCheckStatus) error | |
197 | ||
198 | // Close the client connection. | |
81 | 199 | Close() error |
200 | ||
201 | // Flush forces a flush of all the queued dogstatsd payloads. | |
202 | Flush() error | |
203 | ||
204 | // IsClosed returns if the client has been closed. | |
205 | IsClosed() bool | |
206 | ||
207 | // GetTelemetry return the telemetry metrics for the client since it started. | |
208 | GetTelemetry() Telemetry | |
82 | 209 | } |
83 | 210 | |
84 | 211 | // A Client is a handle for sending messages to dogstatsd. It is safe to |
85 | 212 | // use one Client from multiple goroutines simultaneously. |
86 | 213 | type Client struct { |
87 | // Writer handles the underlying networking protocol | |
88 | writer statsdWriter | |
89 | // Namespace to prepend to all statsd calls | |
90 | Namespace string | |
91 | // Tags are global tags to be added to every statsd call | |
92 | Tags []string | |
93 | // skipErrors turns off error passing and allows UDS to emulate UDP behaviour | |
94 | SkipErrors bool | |
95 | // BufferLength is the length of the buffer in commands. | |
96 | bufferLength int | |
97 | flushTime time.Duration | |
98 | commands []string | |
99 | buffer bytes.Buffer | |
100 | stop chan struct{} | |
101 | sync.Mutex | |
102 | } | |
103 | ||
104 | // New returns a pointer to a new Client given an addr in the format "hostname:port" or | |
105 | // "unix:///path/to/socket". | |
106 | func New(addr string) (*Client, error) { | |
107 | if strings.HasPrefix(addr, UnixAddressPrefix) { | |
108 | w, err := newUdsWriter(addr[len(UnixAddressPrefix)-1:]) | |
109 | if err != nil { | |
110 | return nil, err | |
111 | } | |
112 | return NewWithWriter(w) | |
113 | } | |
114 | w, err := newUDPWriter(addr) | |
214 | // Sender handles the underlying networking protocol | |
215 | sender *sender | |
216 | // namespace to prepend to all statsd calls | |
217 | namespace string | |
218 | // tags are global tags to be added to every statsd call | |
219 | tags []string | |
220 | flushTime time.Duration | |
221 | telemetry *statsdTelemetry | |
222 | telemetryClient *telemetryClient | |
223 | stop chan struct{} | |
224 | wg sync.WaitGroup | |
225 | workers []*worker | |
226 | closerLock sync.Mutex | |
227 | workersMode receivingMode | |
228 | aggregatorMode receivingMode | |
229 | agg *aggregator | |
230 | aggExtended *aggregator | |
231 | options []Option | |
232 | addrOption string | |
233 | isClosed bool | |
234 | } | |
235 | ||
236 | // statsdTelemetry contains telemetry metrics about the client | |
237 | type statsdTelemetry struct { | |
238 | totalMetricsGauge uint64 | |
239 | totalMetricsCount uint64 | |
240 | totalMetricsHistogram uint64 | |
241 | totalMetricsDistribution uint64 | |
242 | totalMetricsSet uint64 | |
243 | totalMetricsTiming uint64 | |
244 | totalEvents uint64 | |
245 | totalServiceChecks uint64 | |
246 | totalDroppedOnReceive uint64 | |
247 | } | |
248 | ||
249 | // Verify that Client implements the ClientInterface. | |
250 | // https://golang.org/doc/faq#guarantee_satisfies_interface | |
251 | var _ ClientInterface = &Client{} | |
252 | ||
253 | func resolveAddr(addr string) string { | |
254 | envPort := "" | |
255 | if addr == "" { | |
256 | addr = os.Getenv(agentHostEnvVarName) | |
257 | envPort = os.Getenv(agentPortEnvVarName) | |
258 | } | |
259 | ||
260 | if addr == "" { | |
261 | return "" | |
262 | } | |
263 | ||
264 | if !strings.HasPrefix(addr, WindowsPipeAddressPrefix) && !strings.HasPrefix(addr, UnixAddressPrefix) { | |
265 | if !strings.Contains(addr, ":") { | |
266 | if envPort != "" { | |
267 | addr = fmt.Sprintf("%s:%s", addr, envPort) | |
268 | } else { | |
269 | addr = fmt.Sprintf("%s:%s", addr, defaultUDPPort) | |
270 | } | |
271 | } | |
272 | } | |
273 | return addr | |
274 | } | |
275 | ||
276 | func createWriter(addr string, writeTimeout time.Duration) (io.WriteCloser, string, error) { | |
277 | addr = resolveAddr(addr) | |
278 | if addr == "" { | |
279 | return nil, "", errors.New("No address passed and autodetection from environment failed") | |
280 | } | |
281 | ||
282 | switch { | |
283 | case strings.HasPrefix(addr, WindowsPipeAddressPrefix): | |
284 | w, err := newWindowsPipeWriter(addr, writeTimeout) | |
285 | return w, writerWindowsPipe, err | |
286 | case strings.HasPrefix(addr, UnixAddressPrefix): | |
287 | w, err := newUDSWriter(addr[len(UnixAddressPrefix):], writeTimeout) | |
288 | return w, writerNameUDS, err | |
289 | default: | |
290 | w, err := newUDPWriter(addr, writeTimeout) | |
291 | return w, writerNameUDP, err | |
292 | } | |
293 | } | |
294 | ||
295 | // New returns a pointer to a new Client given an addr in the format "hostname:port" for UDP, | |
296 | // "unix:///path/to/socket" for UDS or "\\.\pipe\path\to\pipe" for Windows Named Pipes. | |
297 | func New(addr string, options ...Option) (*Client, error) { | |
298 | o, err := resolveOptions(options) | |
115 | 299 | if err != nil { |
116 | 300 | return nil, err |
117 | 301 | } |
118 | return NewWithWriter(w) | |
119 | } | |
120 | ||
121 | // NewWithWriter creates a new Client with given writer. Writer is a | |
122 | // io.WriteCloser + SetWriteTimeout(time.Duration) error | |
123 | func NewWithWriter(w statsdWriter) (*Client, error) { | |
124 | client := &Client{writer: w, SkipErrors: false} | |
125 | return client, nil | |
126 | } | |
127 | ||
128 | // NewBuffered returns a Client that buffers its output and sends it in chunks. | |
129 | // Buflen is the length of the buffer in number of commands. | |
130 | func NewBuffered(addr string, buflen int) (*Client, error) { | |
131 | client, err := New(addr) | |
302 | ||
303 | w, writerType, err := createWriter(addr, o.writeTimeout) | |
132 | 304 | if err != nil { |
133 | 305 | return nil, err |
134 | 306 | } |
135 | client.bufferLength = buflen | |
136 | client.commands = make([]string, 0, buflen) | |
137 | client.flushTime = time.Millisecond * 100 | |
138 | client.stop = make(chan struct{}, 1) | |
139 | go client.watch() | |
140 | return client, nil | |
141 | } | |
142 | ||
143 | // format a message from its name, value, tags and rate. Also adds global | |
144 | // namespace and tags. | |
145 | func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) string { | |
146 | var buf bytes.Buffer | |
147 | if c.Namespace != "" { | |
148 | buf.WriteString(c.Namespace) | |
149 | } | |
150 | buf.WriteString(name) | |
151 | buf.WriteString(":") | |
152 | ||
153 | switch val := value.(type) { | |
154 | case float64: | |
155 | buf.Write(strconv.AppendFloat([]byte{}, val, 'f', 6, 64)) | |
156 | ||
157 | case int64: | |
158 | buf.Write(strconv.AppendInt([]byte{}, val, 10)) | |
159 | ||
160 | case string: | |
161 | buf.WriteString(val) | |
162 | ||
163 | default: | |
164 | // do nothing | |
165 | } | |
166 | buf.Write(suffix) | |
167 | ||
168 | if rate < 1 { | |
169 | buf.WriteString(`|@`) | |
170 | buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64)) | |
171 | } | |
172 | ||
173 | writeTagString(&buf, c.Tags, tags) | |
174 | ||
175 | return buf.String() | |
176 | } | |
177 | ||
178 | // SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP. | |
179 | func (c *Client) SetWriteTimeout(d time.Duration) error { | |
180 | if c == nil { | |
181 | return nil | |
182 | } | |
183 | return c.writer.SetWriteTimeout(d) | |
307 | ||
308 | client, err := newWithWriter(w, o, writerType) | |
309 | if err == nil { | |
310 | client.options = append(client.options, options...) | |
311 | client.addrOption = addr | |
312 | } | |
313 | return client, err | |
314 | } | |
315 | ||
316 | // NewWithWriter creates a new Client with given writer. Writer is a | |
317 | // io.WriteCloser | |
318 | func NewWithWriter(w io.WriteCloser, options ...Option) (*Client, error) { | |
319 | o, err := resolveOptions(options) | |
320 | if err != nil { | |
321 | return nil, err | |
322 | } | |
323 | return newWithWriter(w, o, "custom") | |
324 | } | |
325 | ||
326 | // CloneWithExtraOptions create a new Client with extra options | |
327 | func CloneWithExtraOptions(c *Client, options ...Option) (*Client, error) { | |
328 | if c == nil { | |
329 | return nil, ErrNoClient | |
330 | } | |
331 | ||
332 | if c.addrOption == "" { | |
333 | return nil, fmt.Errorf("can't clone client with no addrOption") | |
334 | } | |
335 | opt := append(c.options, options...) | |
336 | return New(c.addrOption, opt...) | |
337 | } | |
338 | ||
339 | func newWithWriter(w io.WriteCloser, o *Options, writerName string) (*Client, error) { | |
340 | c := Client{ | |
341 | namespace: o.namespace, | |
342 | tags: o.tags, | |
343 | telemetry: &statsdTelemetry{}, | |
344 | } | |
345 | ||
346 | hasEntityID := false | |
347 | // Inject values of DD_* environment variables as global tags. | |
348 | for _, mapping := range ddEnvTagsMapping { | |
349 | if value := os.Getenv(mapping.envName); value != "" { | |
350 | if mapping.envName == ddEntityID { | |
351 | hasEntityID = true | |
352 | } | |
353 | c.tags = append(c.tags, fmt.Sprintf("%s:%s", mapping.tagName, value)) | |
354 | } | |
355 | } | |
356 | ||
357 | if !hasEntityID { | |
358 | initContainerID(o.containerID, isOriginDetectionEnabled(o, hasEntityID)) | |
359 | } | |
360 | ||
361 | if o.maxBytesPerPayload == 0 { | |
362 | if writerName == writerNameUDS { | |
363 | o.maxBytesPerPayload = DefaultMaxAgentPayloadSize | |
364 | } else { | |
365 | o.maxBytesPerPayload = OptimalUDPPayloadSize | |
366 | } | |
367 | } | |
368 | if o.bufferPoolSize == 0 { | |
369 | if writerName == writerNameUDS { | |
370 | o.bufferPoolSize = DefaultUDSBufferPoolSize | |
371 | } else { | |
372 | o.bufferPoolSize = DefaultUDPBufferPoolSize | |
373 | } | |
374 | } | |
375 | if o.senderQueueSize == 0 { | |
376 | if writerName == writerNameUDS { | |
377 | o.senderQueueSize = DefaultUDSBufferPoolSize | |
378 | } else { | |
379 | o.senderQueueSize = DefaultUDPBufferPoolSize | |
380 | } | |
381 | } | |
382 | ||
383 | bufferPool := newBufferPool(o.bufferPoolSize, o.maxBytesPerPayload, o.maxMessagesPerPayload) | |
384 | c.sender = newSender(w, o.senderQueueSize, bufferPool) | |
385 | c.aggregatorMode = o.receiveMode | |
386 | ||
387 | c.workersMode = o.receiveMode | |
388 | // channelMode mode at the worker level is not enabled when | |
389 | // ExtendedAggregation is since the user app will not directly | |
390 | // use the worker (the aggregator sit between the app and the | |
391 | // workers). | |
392 | if o.extendedAggregation { | |
393 | c.workersMode = mutexMode | |
394 | } | |
395 | ||
396 | if o.aggregation || o.extendedAggregation { | |
397 | c.agg = newAggregator(&c) | |
398 | c.agg.start(o.aggregationFlushInterval) | |
399 | ||
400 | if o.extendedAggregation { | |
401 | c.aggExtended = c.agg | |
402 | ||
403 | if c.aggregatorMode == channelMode { | |
404 | c.agg.startReceivingMetric(o.channelModeBufferSize, o.workersCount) | |
405 | } | |
406 | } | |
407 | } | |
408 | ||
409 | for i := 0; i < o.workersCount; i++ { | |
410 | w := newWorker(bufferPool, c.sender) | |
411 | c.workers = append(c.workers, w) | |
412 | ||
413 | if c.workersMode == channelMode { | |
414 | w.startReceivingMetric(o.channelModeBufferSize) | |
415 | } | |
416 | } | |
417 | ||
418 | c.flushTime = o.bufferFlushInterval | |
419 | c.stop = make(chan struct{}, 1) | |
420 | ||
421 | c.wg.Add(1) | |
422 | go func() { | |
423 | defer c.wg.Done() | |
424 | c.watch() | |
425 | }() | |
426 | ||
427 | if o.telemetry { | |
428 | if o.telemetryAddr == "" { | |
429 | c.telemetryClient = newTelemetryClient(&c, writerName, c.agg != nil) | |
430 | } else { | |
431 | var err error | |
432 | c.telemetryClient, err = newTelemetryClientWithCustomAddr(&c, writerName, o.telemetryAddr, c.agg != nil, bufferPool, o.writeTimeout) | |
433 | if err != nil { | |
434 | return nil, err | |
435 | } | |
436 | } | |
437 | c.telemetryClient.run(&c.wg, c.stop) | |
438 | } | |
439 | ||
440 | return &c, nil | |
184 | 441 | } |
185 | 442 | |
186 | 443 | func (c *Client) watch() { |
189 | 446 | for { |
190 | 447 | select { |
191 | 448 | case <-ticker.C: |
192 | c.Lock() | |
193 | if len(c.commands) > 0 { | |
194 | // FIXME: eating error here | |
195 | c.flushLocked() | |
449 | for _, w := range c.workers { | |
450 | w.flush() | |
196 | 451 | } |
197 | c.Unlock() | |
198 | 452 | case <-c.stop: |
199 | 453 | ticker.Stop() |
200 | 454 | return |
202 | 456 | } |
203 | 457 | } |
204 | 458 | |
205 | func (c *Client) append(cmd string) error { | |
206 | c.Lock() | |
207 | defer c.Unlock() | |
208 | c.commands = append(c.commands, cmd) | |
209 | // if we should flush, lets do it | |
210 | if len(c.commands) == c.bufferLength { | |
211 | if err := c.flushLocked(); err != nil { | |
212 | return err | |
213 | } | |
214 | } | |
459 | // Flush forces a flush of all the queued dogstatsd payloads This method is | |
460 | // blocking and will not return until everything is sent through the network. | |
461 | // In mutexMode, this will also block sampling new data to the client while the | |
462 | // workers and sender are flushed. | |
463 | func (c *Client) Flush() error { | |
464 | if c == nil { | |
465 | return ErrNoClient | |
466 | } | |
467 | if c.agg != nil { | |
468 | c.agg.flush() | |
469 | } | |
470 | for _, w := range c.workers { | |
471 | w.pause() | |
472 | defer w.unpause() | |
473 | w.flushUnsafe() | |
474 | } | |
475 | // Now that the worker are pause the sender can flush the queue between | |
476 | // worker and senders | |
477 | c.sender.flush() | |
215 | 478 | return nil |
216 | 479 | } |
217 | 480 | |
218 | func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, []int) { | |
219 | c.buffer.Reset() //clear buffer | |
220 | ||
221 | var frames [][]byte | |
222 | var ncmds []int | |
223 | sepBytes := []byte(sep) | |
224 | sepLen := len(sep) | |
225 | ||
226 | elem := 0 | |
227 | for _, cmd := range cmds { | |
228 | needed := len(cmd) | |
229 | ||
230 | if elem != 0 { | |
231 | needed = needed + sepLen | |
232 | } | |
233 | ||
234 | if c.buffer.Len()+needed <= maxSize { | |
235 | if elem != 0 { | |
236 | c.buffer.Write(sepBytes) | |
237 | } | |
238 | c.buffer.WriteString(cmd) | |
239 | elem++ | |
240 | } else { | |
241 | frames = append(frames, copyAndResetBuffer(&c.buffer)) | |
242 | ncmds = append(ncmds, elem) | |
243 | // if cmd is bigger than maxSize it will get flushed on next loop | |
244 | c.buffer.WriteString(cmd) | |
245 | elem = 1 | |
246 | } | |
247 | } | |
248 | ||
249 | //add whatever is left! if there's actually something | |
250 | if c.buffer.Len() > 0 { | |
251 | frames = append(frames, copyAndResetBuffer(&c.buffer)) | |
252 | ncmds = append(ncmds, elem) | |
253 | } | |
254 | ||
255 | return frames, ncmds | |
256 | } | |
257 | ||
258 | func copyAndResetBuffer(buf *bytes.Buffer) []byte { | |
259 | tmpBuf := make([]byte, buf.Len()) | |
260 | copy(tmpBuf, buf.Bytes()) | |
261 | buf.Reset() | |
262 | return tmpBuf | |
263 | } | |
264 | ||
265 | // Flush forces a flush of the pending commands in the buffer | |
266 | func (c *Client) Flush() error { | |
267 | if c == nil { | |
481 | // IsClosed returns if the client has been closed. | |
482 | func (c *Client) IsClosed() bool { | |
483 | c.closerLock.Lock() | |
484 | defer c.closerLock.Unlock() | |
485 | return c.isClosed | |
486 | } | |
487 | ||
488 | func (c *Client) flushTelemetryMetrics(t *Telemetry) { | |
489 | t.TotalMetricsGauge = atomic.LoadUint64(&c.telemetry.totalMetricsGauge) | |
490 | t.TotalMetricsCount = atomic.LoadUint64(&c.telemetry.totalMetricsCount) | |
491 | t.TotalMetricsSet = atomic.LoadUint64(&c.telemetry.totalMetricsSet) | |
492 | t.TotalMetricsHistogram = atomic.LoadUint64(&c.telemetry.totalMetricsHistogram) | |
493 | t.TotalMetricsDistribution = atomic.LoadUint64(&c.telemetry.totalMetricsDistribution) | |
494 | t.TotalMetricsTiming = atomic.LoadUint64(&c.telemetry.totalMetricsTiming) | |
495 | t.TotalEvents = atomic.LoadUint64(&c.telemetry.totalEvents) | |
496 | t.TotalServiceChecks = atomic.LoadUint64(&c.telemetry.totalServiceChecks) | |
497 | t.TotalDroppedOnReceive = atomic.LoadUint64(&c.telemetry.totalDroppedOnReceive) | |
498 | } | |
499 | ||
500 | // GetTelemetry return the telemetry metrics for the client since it started. | |
501 | func (c *Client) GetTelemetry() Telemetry { | |
502 | return c.telemetryClient.getTelemetry() | |
503 | } | |
504 | ||
505 | func (c *Client) send(m metric) error { | |
506 | h := hashString32(m.name) | |
507 | worker := c.workers[h%uint32(len(c.workers))] | |
508 | ||
509 | if c.workersMode == channelMode { | |
510 | select { | |
511 | case worker.inputMetrics <- m: | |
512 | default: | |
513 | atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1) | |
514 | } | |
268 | 515 | return nil |
269 | 516 | } |
270 | c.Lock() | |
271 | defer c.Unlock() | |
272 | return c.flushLocked() | |
273 | } | |
274 | ||
275 | // flush the commands in the buffer. Lock must be held by caller. | |
276 | func (c *Client) flushLocked() error { | |
277 | frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize) | |
278 | var err error | |
279 | cmdsFlushed := 0 | |
280 | for i, data := range frames { | |
281 | _, e := c.writer.Write(data) | |
282 | if e != nil { | |
283 | err = e | |
284 | break | |
285 | } | |
286 | cmdsFlushed += flushable[i] | |
287 | } | |
288 | ||
289 | // clear the slice with a slice op, doesn't realloc | |
290 | if cmdsFlushed == len(c.commands) { | |
291 | c.commands = c.commands[:0] | |
292 | } else { | |
293 | //this case will cause a future realloc... | |
294 | // drop problematic command though (sorry). | |
295 | c.commands = c.commands[cmdsFlushed+1:] | |
296 | } | |
297 | return err | |
298 | } | |
299 | ||
300 | func (c *Client) sendMsg(msg string) error { | |
301 | // return an error if message is bigger than MaxUDPPayloadSize | |
302 | if len(msg) > MaxUDPPayloadSize { | |
303 | return errors.New("message size exceeds MaxUDPPayloadSize") | |
304 | } | |
305 | ||
306 | // if this client is buffered, then we'll just append this | |
307 | if c.bufferLength > 0 { | |
308 | return c.append(msg) | |
309 | } | |
310 | ||
311 | _, err := c.writer.Write([]byte(msg)) | |
312 | ||
313 | if c.SkipErrors { | |
517 | return worker.processMetric(m) | |
518 | } | |
519 | ||
520 | // sendBlocking is used by the aggregator to inject aggregated metrics. | |
521 | func (c *Client) sendBlocking(m metric) error { | |
522 | m.globalTags = c.tags | |
523 | m.namespace = c.namespace | |
524 | ||
525 | h := hashString32(m.name) | |
526 | worker := c.workers[h%uint32(len(c.workers))] | |
527 | return worker.processMetric(m) | |
528 | } | |
529 | ||
530 | func (c *Client) sendToAggregator(mType metricType, name string, value float64, tags []string, rate float64, f bufferedMetricSampleFunc) error { | |
531 | if c.aggregatorMode == channelMode { | |
532 | select { | |
533 | case c.aggExtended.inputMetrics <- metric{metricType: mType, name: name, fvalue: value, tags: tags, rate: rate}: | |
534 | default: | |
535 | atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1) | |
536 | } | |
314 | 537 | return nil |
315 | 538 | } |
316 | return err | |
317 | } | |
318 | ||
319 | // send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags. | |
320 | func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error { | |
321 | if c == nil { | |
322 | return nil | |
323 | } | |
324 | if rate < 1 && rand.Float64() > rate { | |
325 | return nil | |
326 | } | |
327 | data := c.format(name, value, suffix, tags, rate) | |
328 | return c.sendMsg(data) | |
539 | return f(name, value, tags, rate) | |
329 | 540 | } |
330 | 541 | |
331 | 542 | // Gauge measures the value of a metric at a particular time. |
332 | 543 | func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { |
333 | return c.send(name, value, gaugeSuffix, tags, rate) | |
544 | if c == nil { | |
545 | return ErrNoClient | |
546 | } | |
547 | atomic.AddUint64(&c.telemetry.totalMetricsGauge, 1) | |
548 | if c.agg != nil { | |
549 | return c.agg.gauge(name, value, tags) | |
550 | } | |
551 | return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) | |
334 | 552 | } |
335 | 553 | |
336 | 554 | // Count tracks how many times something happened per second. |
337 | 555 | func (c *Client) Count(name string, value int64, tags []string, rate float64) error { |
338 | return c.send(name, value, countSuffix, tags, rate) | |
556 | if c == nil { | |
557 | return ErrNoClient | |
558 | } | |
559 | atomic.AddUint64(&c.telemetry.totalMetricsCount, 1) | |
560 | if c.agg != nil { | |
561 | return c.agg.count(name, value, tags) | |
562 | } | |
563 | return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) | |
339 | 564 | } |
340 | 565 | |
341 | 566 | // Histogram tracks the statistical distribution of a set of values on each host. |
342 | 567 | func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { |
343 | return c.send(name, value, histogramSuffix, tags, rate) | |
568 | if c == nil { | |
569 | return ErrNoClient | |
570 | } | |
571 | atomic.AddUint64(&c.telemetry.totalMetricsHistogram, 1) | |
572 | if c.aggExtended != nil { | |
573 | return c.sendToAggregator(histogram, name, value, tags, rate, c.aggExtended.histogram) | |
574 | } | |
575 | return c.send(metric{metricType: histogram, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) | |
344 | 576 | } |
345 | 577 | |
346 | 578 | // Distribution tracks the statistical distribution of a set of values across your infrastructure. |
347 | 579 | func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error { |
348 | return c.send(name, value, distributionSuffix, tags, rate) | |
580 | if c == nil { | |
581 | return ErrNoClient | |
582 | } | |
583 | atomic.AddUint64(&c.telemetry.totalMetricsDistribution, 1) | |
584 | if c.aggExtended != nil { | |
585 | return c.sendToAggregator(distribution, name, value, tags, rate, c.aggExtended.distribution) | |
586 | } | |
587 | return c.send(metric{metricType: distribution, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) | |
349 | 588 | } |
350 | 589 | |
351 | 590 | // Decr is just Count of -1 |
352 | 591 | func (c *Client) Decr(name string, tags []string, rate float64) error { |
353 | return c.send(name, nil, decrSuffix, tags, rate) | |
592 | return c.Count(name, -1, tags, rate) | |
354 | 593 | } |
355 | 594 | |
356 | 595 | // Incr is just Count of 1 |
357 | 596 | func (c *Client) Incr(name string, tags []string, rate float64) error { |
358 | return c.send(name, nil, incrSuffix, tags, rate) | |
597 | return c.Count(name, 1, tags, rate) | |
359 | 598 | } |
360 | 599 | |
361 | 600 | // Set counts the number of unique elements in a group. |
362 | 601 | func (c *Client) Set(name string, value string, tags []string, rate float64) error { |
363 | return c.send(name, value, setSuffix, tags, rate) | |
602 | if c == nil { | |
603 | return ErrNoClient | |
604 | } | |
605 | atomic.AddUint64(&c.telemetry.totalMetricsSet, 1) | |
606 | if c.agg != nil { | |
607 | return c.agg.set(name, value, tags) | |
608 | } | |
609 | return c.send(metric{metricType: set, name: name, svalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) | |
364 | 610 | } |
365 | 611 | |
366 | 612 | // Timing sends timing information, it is an alias for TimeInMilliseconds |
371 | 617 | // TimeInMilliseconds sends timing information in milliseconds. |
372 | 618 | // It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) |
373 | 619 | func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { |
374 | return c.send(name, value, timingSuffix, tags, rate) | |
620 | if c == nil { | |
621 | return ErrNoClient | |
622 | } | |
623 | atomic.AddUint64(&c.telemetry.totalMetricsTiming, 1) | |
624 | if c.aggExtended != nil { | |
625 | return c.sendToAggregator(timing, name, value, tags, rate, c.aggExtended.timing) | |
626 | } | |
627 | return c.send(metric{metricType: timing, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace}) | |
375 | 628 | } |
376 | 629 | |
377 | 630 | // Event sends the provided Event. |
378 | 631 | func (c *Client) Event(e *Event) error { |
379 | 632 | if c == nil { |
380 | return nil | |
381 | } | |
382 | stat, err := e.Encode(c.Tags...) | |
383 | if err != nil { | |
384 | return err | |
385 | } | |
386 | return c.sendMsg(stat) | |
633 | return ErrNoClient | |
634 | } | |
635 | atomic.AddUint64(&c.telemetry.totalEvents, 1) | |
636 | return c.send(metric{metricType: event, evalue: e, rate: 1, globalTags: c.tags, namespace: c.namespace}) | |
387 | 637 | } |
388 | 638 | |
389 | 639 | // SimpleEvent sends an event with the provided title and text. |
395 | 645 | // ServiceCheck sends the provided ServiceCheck. |
396 | 646 | func (c *Client) ServiceCheck(sc *ServiceCheck) error { |
397 | 647 | if c == nil { |
398 | return nil | |
399 | } | |
400 | stat, err := sc.Encode(c.Tags...) | |
401 | if err != nil { | |
402 | return err | |
403 | } | |
404 | return c.sendMsg(stat) | |
648 | return ErrNoClient | |
649 | } | |
650 | atomic.AddUint64(&c.telemetry.totalServiceChecks, 1) | |
651 | return c.send(metric{metricType: serviceCheck, scvalue: sc, rate: 1, globalTags: c.tags, namespace: c.namespace}) | |
405 | 652 | } |
406 | 653 | |
407 | 654 | // SimpleServiceCheck sends an serviceCheck with the provided name and status. |
413 | 660 | // Close the client connection. |
414 | 661 | func (c *Client) Close() error { |
415 | 662 | if c == nil { |
663 | return ErrNoClient | |
664 | } | |
665 | ||
666 | // Acquire closer lock to ensure only one thread can close the stop channel | |
667 | c.closerLock.Lock() | |
668 | defer c.closerLock.Unlock() | |
669 | ||
670 | if c.isClosed { | |
416 | 671 | return nil |
417 | 672 | } |
673 | ||
674 | // Notify all other threads that they should stop | |
418 | 675 | select { |
419 | case c.stop <- struct{}{}: | |
676 | case <-c.stop: | |
677 | return nil | |
420 | 678 | default: |
421 | 679 | } |
422 | ||
423 | // if this client is buffered, flush before closing the writer | |
424 | if c.bufferLength > 0 { | |
425 | if err := c.Flush(); err != nil { | |
426 | return err | |
427 | } | |
428 | } | |
429 | ||
430 | return c.writer.Close() | |
431 | } | |
432 | ||
433 | // Events support | |
434 | // EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41 | |
435 | // The reason why they got exported is so that client code can directly use the types. | |
436 | ||
437 | // EventAlertType is the alert type for events | |
438 | type EventAlertType string | |
439 | ||
440 | const ( | |
441 | // Info is the "info" AlertType for events | |
442 | Info EventAlertType = "info" | |
443 | // Error is the "error" AlertType for events | |
444 | Error EventAlertType = "error" | |
445 | // Warning is the "warning" AlertType for events | |
446 | Warning EventAlertType = "warning" | |
447 | // Success is the "success" AlertType for events | |
448 | Success EventAlertType = "success" | |
449 | ) | |
450 | ||
451 | // EventPriority is the event priority for events | |
452 | type EventPriority string | |
453 | ||
454 | const ( | |
455 | // Normal is the "normal" Priority for events | |
456 | Normal EventPriority = "normal" | |
457 | // Low is the "low" Priority for events | |
458 | Low EventPriority = "low" | |
459 | ) | |
460 | ||
461 | // An Event is an object that can be posted to your DataDog event stream. | |
462 | type Event struct { | |
463 | // Title of the event. Required. | |
464 | Title string | |
465 | // Text is the description of the event. Required. | |
466 | Text string | |
467 | // Timestamp is a timestamp for the event. If not provided, the dogstatsd | |
468 | // server will set this to the current time. | |
469 | Timestamp time.Time | |
470 | // Hostname for the event. | |
471 | Hostname string | |
472 | // AggregationKey groups this event with others of the same key. | |
473 | AggregationKey string | |
474 | // Priority of the event. Can be statsd.Low or statsd.Normal. | |
475 | Priority EventPriority | |
476 | // SourceTypeName is a source type for the event. | |
477 | SourceTypeName string | |
478 | // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. | |
479 | // If absent, the default value applied by the dogstatsd server is Info. | |
480 | AlertType EventAlertType | |
481 | // Tags for the event. | |
482 | Tags []string | |
483 | } | |
484 | ||
485 | // NewEvent creates a new event with the given title and text. Error checking | |
486 | // against these values is done at send-time, or upon running e.Check. | |
487 | func NewEvent(title, text string) *Event { | |
488 | return &Event{ | |
489 | Title: title, | |
490 | Text: text, | |
491 | } | |
492 | } | |
493 | ||
494 | // Check verifies that an event is valid. | |
495 | func (e Event) Check() error { | |
496 | if len(e.Title) == 0 { | |
497 | return fmt.Errorf("statsd.Event title is required") | |
498 | } | |
499 | if len(e.Text) == 0 { | |
500 | return fmt.Errorf("statsd.Event text is required") | |
501 | } | |
502 | return nil | |
503 | } | |
504 | ||
505 | // Encode returns the dogstatsd wire protocol representation for an event. | |
506 | // Tags may be passed which will be added to the encoded output but not to | |
507 | // the Event's list of tags, eg. for default tags. | |
508 | func (e Event) Encode(tags ...string) (string, error) { | |
509 | err := e.Check() | |
680 | close(c.stop) | |
681 | ||
682 | if c.workersMode == channelMode { | |
683 | for _, w := range c.workers { | |
684 | w.stopReceivingMetric() | |
685 | } | |
686 | } | |
687 | ||
688 | // flush the aggregator first | |
689 | if c.agg != nil { | |
690 | if c.aggExtended != nil && c.aggregatorMode == channelMode { | |
691 | c.agg.stopReceivingMetric() | |
692 | } | |
693 | c.agg.stop() | |
694 | } | |
695 | ||
696 | // Wait for the threads to stop | |
697 | c.wg.Wait() | |
698 | ||
699 | c.Flush() | |
700 | ||
701 | c.isClosed = true | |
702 | return c.sender.close() | |
703 | } | |
704 | ||
705 | // isOriginDetectionEnabled returns whether the clients should fill the container field. | |
706 | // | |
707 | // If DD_ENTITY_ID is set, we don't send the container ID | |
708 | // If a user-defined container ID is provided, we don't ignore origin detection | |
709 | // as dd.internal.entity_id is prioritized over the container field for backward compatibility. | |
710 | // If DD_ENTITY_ID is not set, we try to fill the container field automatically unless | |
711 | // DD_ORIGIN_DETECTION_ENABLED is explicitly set to false. | |
712 | func isOriginDetectionEnabled(o *Options, hasEntityID bool) bool { | |
713 | if !o.originDetection || hasEntityID || o.containerID != "" { | |
714 | // originDetection is explicitly disabled | |
715 | // or DD_ENTITY_ID was found | |
716 | // or a user-defined container ID was provided | |
717 | return false | |
718 | } | |
719 | ||
720 | envVarValue := os.Getenv(originDetectionEnabled) | |
721 | if envVarValue == "" { | |
722 | // DD_ORIGIN_DETECTION_ENABLED is not set | |
723 | // default to true | |
724 | return true | |
725 | } | |
726 | ||
727 | enabled, err := strconv.ParseBool(envVarValue) | |
510 | 728 | if err != nil { |
511 | return "", err | |
512 | } | |
513 | text := e.escapedText() | |
514 | ||
515 | var buffer bytes.Buffer | |
516 | buffer.WriteString("_e{") | |
517 | buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10)) | |
518 | buffer.WriteRune(',') | |
519 | buffer.WriteString(strconv.FormatInt(int64(len(text)), 10)) | |
520 | buffer.WriteString("}:") | |
521 | buffer.WriteString(e.Title) | |
522 | buffer.WriteRune('|') | |
523 | buffer.WriteString(text) | |
524 | ||
525 | if !e.Timestamp.IsZero() { | |
526 | buffer.WriteString("|d:") | |
527 | buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10)) | |
528 | } | |
529 | ||
530 | if len(e.Hostname) != 0 { | |
531 | buffer.WriteString("|h:") | |
532 | buffer.WriteString(e.Hostname) | |
533 | } | |
534 | ||
535 | if len(e.AggregationKey) != 0 { | |
536 | buffer.WriteString("|k:") | |
537 | buffer.WriteString(e.AggregationKey) | |
538 | ||
539 | } | |
540 | ||
541 | if len(e.Priority) != 0 { | |
542 | buffer.WriteString("|p:") | |
543 | buffer.WriteString(string(e.Priority)) | |
544 | } | |
545 | ||
546 | if len(e.SourceTypeName) != 0 { | |
547 | buffer.WriteString("|s:") | |
548 | buffer.WriteString(e.SourceTypeName) | |
549 | } | |
550 | ||
551 | if len(e.AlertType) != 0 { | |
552 | buffer.WriteString("|t:") | |
553 | buffer.WriteString(string(e.AlertType)) | |
554 | } | |
555 | ||
556 | writeTagString(&buffer, tags, e.Tags) | |
557 | ||
558 | return buffer.String(), nil | |
559 | } | |
560 | ||
561 | // ServiceCheckStatus support | |
562 | type ServiceCheckStatus byte | |
563 | ||
564 | const ( | |
565 | // Ok is the "ok" ServiceCheck status | |
566 | Ok ServiceCheckStatus = 0 | |
567 | // Warn is the "warning" ServiceCheck status | |
568 | Warn ServiceCheckStatus = 1 | |
569 | // Critical is the "critical" ServiceCheck status | |
570 | Critical ServiceCheckStatus = 2 | |
571 | // Unknown is the "unknown" ServiceCheck status | |
572 | Unknown ServiceCheckStatus = 3 | |
573 | ) | |
574 | ||
575 | // An ServiceCheck is an object that contains status of DataDog service check. | |
576 | type ServiceCheck struct { | |
577 | // Name of the service check. Required. | |
578 | Name string | |
579 | // Status of service check. Required. | |
580 | Status ServiceCheckStatus | |
581 | // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd | |
582 | // server will set this to the current time. | |
583 | Timestamp time.Time | |
584 | // Hostname for the serviceCheck. | |
585 | Hostname string | |
586 | // A message describing the current state of the serviceCheck. | |
587 | Message string | |
588 | // Tags for the serviceCheck. | |
589 | Tags []string | |
590 | } | |
591 | ||
592 | // NewServiceCheck creates a new serviceCheck with the given name and status. Error checking | |
593 | // against these values is done at send-time, or upon running sc.Check. | |
594 | func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { | |
595 | return &ServiceCheck{ | |
596 | Name: name, | |
597 | Status: status, | |
598 | } | |
599 | } | |
600 | ||
601 | // Check verifies that an event is valid. | |
602 | func (sc ServiceCheck) Check() error { | |
603 | if len(sc.Name) == 0 { | |
604 | return fmt.Errorf("statsd.ServiceCheck name is required") | |
605 | } | |
606 | if byte(sc.Status) < 0 || byte(sc.Status) > 3 { | |
607 | return fmt.Errorf("statsd.ServiceCheck status has invalid value") | |
608 | } | |
609 | return nil | |
610 | } | |
611 | ||
612 | // Encode returns the dogstatsd wire protocol representation for an serviceCheck. | |
613 | // Tags may be passed which will be added to the encoded output but not to | |
614 | // the Event's list of tags, eg. for default tags. | |
615 | func (sc ServiceCheck) Encode(tags ...string) (string, error) { | |
616 | err := sc.Check() | |
617 | if err != nil { | |
618 | return "", err | |
619 | } | |
620 | message := sc.escapedMessage() | |
621 | ||
622 | var buffer bytes.Buffer | |
623 | buffer.WriteString("_sc|") | |
624 | buffer.WriteString(sc.Name) | |
625 | buffer.WriteRune('|') | |
626 | buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10)) | |
627 | ||
628 | if !sc.Timestamp.IsZero() { | |
629 | buffer.WriteString("|d:") | |
630 | buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10)) | |
631 | } | |
632 | ||
633 | if len(sc.Hostname) != 0 { | |
634 | buffer.WriteString("|h:") | |
635 | buffer.WriteString(sc.Hostname) | |
636 | } | |
637 | ||
638 | writeTagString(&buffer, tags, sc.Tags) | |
639 | ||
640 | if len(message) != 0 { | |
641 | buffer.WriteString("|m:") | |
642 | buffer.WriteString(message) | |
643 | } | |
644 | ||
645 | return buffer.String(), nil | |
646 | } | |
647 | ||
648 | func (e Event) escapedText() string { | |
649 | return strings.Replace(e.Text, "\n", "\\n", -1) | |
650 | } | |
651 | ||
652 | func (sc ServiceCheck) escapedMessage() string { | |
653 | msg := strings.Replace(sc.Message, "\n", "\\n", -1) | |
654 | return strings.Replace(msg, "m:", `m\:`, -1) | |
655 | } | |
656 | ||
657 | func removeNewlines(str string) string { | |
658 | return strings.Replace(str, "\n", "", -1) | |
659 | } | |
660 | ||
661 | func writeTagString(w io.Writer, tagList1, tagList2 []string) { | |
662 | // the tag lists may be shared with other callers, so we cannot modify | |
663 | // them in any way (which means we cannot append to them either) | |
664 | // therefore we must make an entirely separate copy just for this call | |
665 | totalLen := len(tagList1) + len(tagList2) | |
666 | if totalLen == 0 { | |
667 | return | |
668 | } | |
669 | tags := make([]string, 0, totalLen) | |
670 | tags = append(tags, tagList1...) | |
671 | tags = append(tags, tagList2...) | |
672 | ||
673 | io.WriteString(w, "|#") | |
674 | io.WriteString(w, removeNewlines(tags[0])) | |
675 | for _, tag := range tags[1:] { | |
676 | io.WriteString(w, ",") | |
677 | io.WriteString(w, removeNewlines(tag)) | |
678 | } | |
679 | } | |
729 | // Error due to an unsupported DD_ORIGIN_DETECTION_ENABLED value | |
730 | // default to true | |
731 | return true | |
732 | } | |
733 | ||
734 | return enabled | |
735 | } |
0 | package statsd | |
0 | package statsd_test | |
1 | 1 | |
2 | 2 | import ( |
3 | 3 | "fmt" |
4 | "strconv" | |
4 | "io" | |
5 | "log" | |
6 | "net" | |
7 | "os" | |
8 | "sync/atomic" | |
5 | 9 | "testing" |
10 | ||
11 | "github.com/DataDog/datadog-go/v5/statsd" | |
6 | 12 | ) |
7 | 13 | |
8 | var statBytes []byte | |
9 | var stat string | |
10 | ||
11 | // Results: | |
12 | // BenchmarkStatBuildGauge_Sprintf-8 500 45699958 ns/op | |
13 | // BenchmarkStatBuildGauge_Concat-8 1000 23452863 ns/op | |
14 | // BenchmarkStatBuildGauge_BytesAppend-8 1000 21705121 ns/op | |
15 | func BenchmarkStatBuildGauge_Sprintf(b *testing.B) { | |
16 | for n := 0; n < b.N; n++ { | |
17 | for x := 0; x < 100000; x++ { | |
18 | stat = fmt.Sprintf("%f|g", 3.14159) | |
14 | const writerNameUDP = "udp" | |
15 | const writerNameUDS = "uds" | |
16 | ||
17 | func setupUDSClientServer(b *testing.B, options []statsd.Option) (*statsd.Client, net.Listener) { | |
18 | sockAddr := "/tmp/test.sock" | |
19 | if err := os.RemoveAll(sockAddr); err != nil { | |
20 | log.Fatal(err) | |
21 | } | |
22 | conn, err := net.Listen("unix", sockAddr) | |
23 | if err != nil { | |
24 | log.Fatal("listen error:", err) | |
25 | } | |
26 | go func() { | |
27 | for { | |
28 | _, err := conn.Accept() | |
29 | if err != nil { | |
30 | return | |
31 | } | |
19 | 32 | } |
20 | } | |
21 | } | |
22 | ||
23 | func BenchmarkStatBuildGauge_Concat(b *testing.B) { | |
24 | for n := 0; n < b.N; n++ { | |
25 | for x := 0; x < 100000; x++ { | |
26 | stat = strconv.FormatFloat(3.14159, 'f', -1, 64) + "|g" | |
33 | }() | |
34 | client, err := statsd.New("unix://"+sockAddr, options...) | |
35 | if err != nil { | |
36 | b.Error(err) | |
37 | } | |
38 | return client, conn | |
39 | } | |
40 | ||
41 | func setupUDPClientServer(b *testing.B, options []statsd.Option) (*statsd.Client, *net.UDPConn) { | |
42 | addr, err := net.ResolveUDPAddr("udp", ":0") | |
43 | if err != nil { | |
44 | b.Error(err) | |
45 | } | |
46 | conn, err := net.ListenUDP("udp", addr) | |
47 | if err != nil { | |
48 | b.Error(err) | |
49 | } | |
50 | ||
51 | client, err := statsd.New(conn.LocalAddr().String(), options...) | |
52 | if err != nil { | |
53 | b.Error(err) | |
54 | } | |
55 | return client, conn | |
56 | } | |
57 | ||
58 | func setupClient(b *testing.B, transport string, extraOptions []statsd.Option) (*statsd.Client, io.Closer) { | |
59 | options := []statsd.Option{statsd.WithMaxMessagesPerPayload(1024)} | |
60 | options = append(options, extraOptions...) | |
61 | ||
62 | if transport == writerNameUDP { | |
63 | return setupUDPClientServer(b, options) | |
64 | } | |
65 | return setupUDSClientServer(b, options) | |
66 | } | |
67 | ||
68 | func benchmarkStatsdDifferentMetrics(b *testing.B, transport string, extraOptions ...statsd.Option) { | |
69 | client, conn := setupClient(b, transport, extraOptions) | |
70 | defer conn.Close() | |
71 | ||
72 | n := int32(0) | |
73 | b.ResetTimer() | |
74 | ||
75 | b.RunParallel(func(pb *testing.PB) { | |
76 | testNumber := atomic.AddInt32(&n, 1) | |
77 | name := fmt.Sprintf("test.metric%d", testNumber) | |
78 | for pb.Next() { | |
79 | client.Gauge(name, 1, []string{"tag:tag"}, 1) | |
27 | 80 | } |
28 | } | |
29 | } | |
30 | ||
31 | func BenchmarkStatBuildGauge_BytesAppend(b *testing.B) { | |
32 | suffix := []byte("|g") | |
33 | ||
34 | for n := 0; n < b.N; n++ { | |
35 | for x := 0; x < 100000; x++ { | |
36 | statBytes = []byte{} | |
37 | statBytes = append(strconv.AppendFloat(statBytes, 3.14159, 'f', -1, 64), suffix...) | |
81 | }) | |
82 | client.Flush() | |
83 | t := client.GetTelemetry() | |
84 | reportMetric(b, float64(t.TotalDroppedOnReceive)/float64(t.TotalMetrics)*100, "%_dropRate") | |
85 | ||
86 | b.StopTimer() | |
87 | client.Close() | |
88 | } | |
89 | ||
90 | func benchmarkStatsdSameMetrics(b *testing.B, transport string, extraOptions ...statsd.Option) { | |
91 | client, conn := setupClient(b, transport, extraOptions) | |
92 | defer conn.Close() | |
93 | ||
94 | b.ResetTimer() | |
95 | ||
96 | b.RunParallel(func(pb *testing.PB) { | |
97 | for pb.Next() { | |
98 | client.Gauge("test.metric", 1, []string{"tag:tag"}, 1) | |
38 | 99 | } |
39 | } | |
40 | } | |
41 | ||
42 | func BenchmarkStatBuildCount_Sprintf(b *testing.B) { | |
43 | for n := 0; n < b.N; n++ { | |
44 | for x := 0; x < 100000; x++ { | |
45 | stat = fmt.Sprintf("%d|c", 314) | |
46 | } | |
47 | } | |
48 | } | |
49 | ||
50 | func BenchmarkStatBuildCount_Concat(b *testing.B) { | |
51 | for n := 0; n < b.N; n++ { | |
52 | for x := 0; x < 100000; x++ { | |
53 | stat = strconv.FormatInt(314, 10) + "|c" | |
54 | } | |
55 | } | |
56 | } | |
57 | ||
58 | func BenchmarkStatBuildCount_BytesAppend(b *testing.B) { | |
59 | suffix := []byte("|c") | |
60 | ||
61 | for n := 0; n < b.N; n++ { | |
62 | for x := 0; x < 100000; x++ { | |
63 | statBytes = []byte{} | |
64 | statBytes = append(strconv.AppendInt(statBytes, 314, 10), suffix...) | |
65 | } | |
66 | } | |
67 | } | |
100 | }) | |
101 | client.Flush() | |
102 | t := client.GetTelemetry() | |
103 | reportMetric(b, float64(t.TotalDroppedOnReceive)/float64(t.TotalMetrics)*100, "%_dropRate") | |
104 | ||
105 | b.StopTimer() | |
106 | client.Close() | |
107 | } | |
108 | ||
109 | /* | |
110 | UDP with the same metric | |
111 | */ | |
112 | ||
113 | // blocking + no aggregation | |
114 | func BenchmarkStatsdUDPSameMetricMutex(b *testing.B) { | |
115 | benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation()) | |
116 | } | |
117 | ||
118 | // dropping + no aggregation | |
119 | func BenchmarkStatsdUDPSameMetricChannel(b *testing.B) { | |
120 | benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation()) | |
121 | } | |
122 | ||
123 | // blocking + aggregation | |
124 | func BenchmarkStatsdUDPSameMetricMutexAggregation(b *testing.B) { | |
125 | benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithClientSideAggregation()) | |
126 | } | |
127 | ||
128 | // dropping + aggregation | |
129 | func BenchmarkStatsdUDPSameMetricChannelAggregation(b *testing.B) { | |
130 | benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithClientSideAggregation()) | |
131 | } | |
132 | ||
133 | /* | |
134 | UDP with the different metrics | |
135 | */ | |
136 | ||
137 | // blocking + no aggregation | |
138 | func BenchmarkStatsdUDPDifferentMetricMutex(b *testing.B) { | |
139 | benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation()) | |
140 | } | |
141 | ||
142 | // dropping + no aggregation | |
143 | func BenchmarkStatsdUDPDifferentMetricChannel(b *testing.B) { | |
144 | benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation()) | |
145 | } | |
146 | ||
147 | // blocking + aggregation | |
148 | func BenchmarkStatsdUDPDifferentMetricMutexAggregation(b *testing.B) { | |
149 | benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithClientSideAggregation()) | |
150 | } | |
151 | ||
152 | // dropping + aggregation | |
153 | func BenchmarkStatsdUDPDifferentMetricChannelAggregation(b *testing.B) { | |
154 | benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithClientSideAggregation()) | |
155 | } | |
156 | ||
157 | /* | |
158 | UDS with the same metric | |
159 | */ | |
160 | // blocking + no aggregation | |
161 | func BenchmarkStatsdUDSSameMetricMutex(b *testing.B) { | |
162 | benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation()) | |
163 | } | |
164 | ||
165 | // dropping + no aggregation | |
166 | func BenchmarkStatsdUDSSameMetricChannel(b *testing.B) { | |
167 | benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation()) | |
168 | } | |
169 | ||
170 | // blocking + aggregation | |
171 | func BenchmarkStatsdUDSSameMetricMutexAggregation(b *testing.B) { | |
172 | benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithClientSideAggregation()) | |
173 | } | |
174 | ||
175 | // dropping + aggregation | |
176 | func BenchmarkStatsdUDSSameMetricChannelAggregation(b *testing.B) { | |
177 | benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithClientSideAggregation()) | |
178 | } | |
179 | ||
180 | /* | |
181 | UDS with different metrics | |
182 | */ | |
183 | // blocking + no aggregation | |
184 | func BenchmarkStatsdUDPSifferentMetricMutex(b *testing.B) { | |
185 | benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation()) | |
186 | } | |
187 | ||
188 | // dropping + no aggregation | |
189 | func BenchmarkStatsdUDSDifferentMetricChannel(b *testing.B) { | |
190 | benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation()) | |
191 | } | |
192 | ||
193 | // blocking + aggregation | |
194 | func BenchmarkStatsdUDPSifferentMetricMutexAggregation(b *testing.B) { | |
195 | benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithClientSideAggregation()) | |
196 | } | |
197 | ||
198 | // dropping + aggregation | |
199 | func BenchmarkStatsdUDSDifferentMetricChannelAggregation(b *testing.B) { | |
200 | benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithClientSideAggregation()) | |
201 | } |
0 | // Copyright 2013 Ooyala, Inc. | |
1 | ||
2 | 0 | package statsd |
3 | 1 | |
4 | 2 | import ( |
5 | "bytes" | |
6 | 3 | "fmt" |
7 | "io" | |
8 | "io/ioutil" | |
9 | "net" | |
10 | 4 | "os" |
11 | "path/filepath" | |
12 | "reflect" | |
13 | "strconv" | |
14 | 5 | "strings" |
6 | "sync" | |
15 | 7 | "testing" |
16 | 8 | "time" |
9 | ||
10 | "github.com/stretchr/testify/assert" | |
11 | "github.com/stretchr/testify/require" | |
17 | 12 | ) |
18 | ||
19 | var dogstatsdTests = []struct { | |
20 | GlobalNamespace string | |
21 | GlobalTags []string | |
22 | Method string | |
23 | Metric string | |
24 | Value interface{} | |
25 | Tags []string | |
26 | Rate float64 | |
27 | Expected string | |
28 | }{ | |
29 | {"", nil, "Gauge", "test.gauge", 1.0, nil, 1.0, "test.gauge:1.000000|g"}, | |
30 | {"", nil, "Gauge", "test.gauge", 1.0, nil, 0.999999, "test.gauge:1.000000|g|@0.999999"}, | |
31 | {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA"}, 1.0, "test.gauge:1.000000|g|#tagA"}, | |
32 | {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA", "tagB"}, 1.0, "test.gauge:1.000000|g|#tagA,tagB"}, | |
33 | {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA"}, 0.999999, "test.gauge:1.000000|g|@0.999999|#tagA"}, | |
34 | {"", nil, "Count", "test.count", int64(1), []string{"tagA"}, 1.0, "test.count:1|c|#tagA"}, | |
35 | {"", nil, "Count", "test.count", int64(-1), []string{"tagA"}, 1.0, "test.count:-1|c|#tagA"}, | |
36 | {"", nil, "Histogram", "test.histogram", 2.3, []string{"tagA"}, 1.0, "test.histogram:2.300000|h|#tagA"}, | |
37 | {"", nil, "Distribution", "test.distribution", 2.3, []string{"tagA"}, 1.0, "test.distribution:2.300000|d|#tagA"}, | |
38 | {"", nil, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "test.set:uuid|s|#tagA"}, | |
39 | {"flubber.", nil, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "flubber.test.set:uuid|s|#tagA"}, | |
40 | {"", []string{"tagC"}, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "test.set:uuid|s|#tagC,tagA"}, | |
41 | {"", nil, "Count", "test.count", int64(1), []string{"hello\nworld"}, 1.0, "test.count:1|c|#helloworld"}, | |
42 | } | |
43 | 13 | |
44 | 14 | func assertNotPanics(t *testing.T, f func()) { |
45 | 15 | defer func() { |
50 | 20 | f() |
51 | 21 | } |
52 | 22 | |
53 | func TestClientUDP(t *testing.T) { | |
54 | addr := "localhost:1201" | |
55 | udpAddr, err := net.ResolveUDPAddr("udp", addr) | |
56 | if err != nil { | |
57 | t.Fatal(err) | |
58 | } | |
59 | ||
60 | server, err := net.ListenUDP("udp", udpAddr) | |
61 | if err != nil { | |
62 | t.Fatal(err) | |
63 | } | |
64 | defer server.Close() | |
65 | ||
66 | client, err := New(addr) | |
67 | if err != nil { | |
68 | t.Fatal(err) | |
69 | } | |
70 | ||
71 | clientTest(t, server, client) | |
23 | func TestNilError(t *testing.T) { | |
24 | var c *Client | |
25 | tests := []func() error{ | |
26 | func() error { return c.Flush() }, | |
27 | func() error { return c.Close() }, | |
28 | func() error { return c.Count("", 0, nil, 1) }, | |
29 | func() error { return c.Incr("", nil, 1) }, | |
30 | func() error { return c.Decr("", nil, 1) }, | |
31 | func() error { return c.Histogram("", 0, nil, 1) }, | |
32 | func() error { return c.Distribution("", 0, nil, 1) }, | |
33 | func() error { return c.Gauge("", 0, nil, 1) }, | |
34 | func() error { return c.Set("", "", nil, 1) }, | |
35 | func() error { return c.Timing("", time.Second, nil, 1) }, | |
36 | func() error { return c.TimeInMilliseconds("", 1, nil, 1) }, | |
37 | func() error { return c.Event(NewEvent("", "")) }, | |
38 | func() error { return c.SimpleEvent("", "") }, | |
39 | func() error { return c.ServiceCheck(NewServiceCheck("", Ok)) }, | |
40 | func() error { return c.SimpleServiceCheck("", Ok) }, | |
41 | func() error { | |
42 | _, err := CloneWithExtraOptions(nil, WithChannelMode()) | |
43 | return err | |
44 | }, | |
45 | } | |
46 | for i, f := range tests { | |
47 | var err error | |
48 | assertNotPanics(t, func() { err = f() }) | |
49 | if err != ErrNoClient { | |
50 | t.Errorf("Test case %d: expected ErrNoClient, got %#v", i, err) | |
51 | } | |
52 | } | |
53 | } | |
54 | ||
55 | func TestDoubleClosePanic(t *testing.T) { | |
56 | c, err := New("localhost:8125") | |
57 | assert.NoError(t, err) | |
58 | c.Close() | |
59 | c.Close() | |
72 | 60 | } |
73 | 61 | |
74 | 62 | type statsdWriterWrapper struct { |
75 | io.WriteCloser | |
76 | } | |
77 | ||
78 | func (statsdWriterWrapper) SetWriteTimeout(time.Duration) error { | |
63 | data []string | |
64 | } | |
65 | ||
66 | func (s *statsdWriterWrapper) Close() error { | |
79 | 67 | return nil |
80 | 68 | } |
81 | 69 | |
82 | func TestClientWithConn(t *testing.T) { | |
83 | server, conn, err := os.Pipe() | |
84 | if err != nil { | |
85 | t.Fatal(err) | |
86 | } | |
87 | ||
88 | client, err := NewWithWriter(statsdWriterWrapper{conn}) | |
89 | if err != nil { | |
90 | t.Fatal(err) | |
91 | } | |
92 | ||
93 | clientTest(t, server, client) | |
94 | } | |
95 | ||
96 | func clientTest(t *testing.T, server io.Reader, client *Client) { | |
97 | for _, tt := range dogstatsdTests { | |
98 | client.Namespace = tt.GlobalNamespace | |
99 | client.Tags = tt.GlobalTags | |
100 | method := reflect.ValueOf(client).MethodByName(tt.Method) | |
101 | e := method.Call([]reflect.Value{ | |
102 | reflect.ValueOf(tt.Metric), | |
103 | reflect.ValueOf(tt.Value), | |
104 | reflect.ValueOf(tt.Tags), | |
105 | reflect.ValueOf(tt.Rate)})[0] | |
106 | errInter := e.Interface() | |
107 | if errInter != nil { | |
108 | t.Fatal(errInter.(error)) | |
70 | func (s *statsdWriterWrapper) Write(p []byte) (n int, err error) { | |
71 | for _, m := range strings.Split(string(p), "\n") { | |
72 | if m != "" { | |
73 | s.data = append(s.data, m) | |
109 | 74 | } |
110 | ||
111 | bytes := make([]byte, 1024) | |
112 | n, err := server.Read(bytes) | |
113 | if err != nil { | |
114 | t.Fatal(err) | |
115 | } | |
116 | message := bytes[:n] | |
117 | if string(message) != tt.Expected { | |
118 | t.Errorf("Expected: %s. Actual: %s", tt.Expected, string(message)) | |
119 | } | |
120 | } | |
121 | } | |
122 | ||
123 | func TestClientUDS(t *testing.T) { | |
124 | dir, err := ioutil.TempDir("", "socket") | |
125 | if err != nil { | |
126 | t.Fatal(err) | |
127 | } | |
128 | defer os.RemoveAll(dir) // clean up | |
129 | ||
130 | addr := filepath.Join(dir, "dsd.socket") | |
131 | ||
132 | udsAddr, err := net.ResolveUnixAddr("unixgram", addr) | |
133 | if err != nil { | |
134 | t.Fatal(err) | |
135 | } | |
136 | ||
137 | server, err := net.ListenUnixgram("unixgram", udsAddr) | |
138 | if err != nil { | |
139 | t.Fatal(err) | |
140 | } | |
141 | defer server.Close() | |
142 | ||
143 | addrParts := []string{UnixAddressPrefix, addr} | |
144 | client, err := New(strings.Join(addrParts, "")) | |
145 | if err != nil { | |
146 | t.Fatal(err) | |
147 | } | |
148 | ||
149 | for _, tt := range dogstatsdTests { | |
150 | client.Namespace = tt.GlobalNamespace | |
151 | client.Tags = tt.GlobalTags | |
152 | method := reflect.ValueOf(client).MethodByName(tt.Method) | |
153 | e := method.Call([]reflect.Value{ | |
154 | reflect.ValueOf(tt.Metric), | |
155 | reflect.ValueOf(tt.Value), | |
156 | reflect.ValueOf(tt.Tags), | |
157 | reflect.ValueOf(tt.Rate)})[0] | |
158 | errInter := e.Interface() | |
159 | if errInter != nil { | |
160 | t.Fatal(errInter.(error)) | |
161 | } | |
162 | ||
163 | bytes := make([]byte, 1024) | |
164 | n, err := server.Read(bytes) | |
165 | if err != nil { | |
166 | t.Fatal(err) | |
167 | } | |
168 | message := bytes[:n] | |
169 | if string(message) != tt.Expected { | |
170 | t.Errorf("Expected: %s. Actual: %s", tt.Expected, string(message)) | |
171 | } | |
172 | } | |
173 | } | |
174 | ||
175 | func TestClientUDSClose(t *testing.T) { | |
176 | dir, err := ioutil.TempDir("", "socket") | |
177 | if err != nil { | |
178 | t.Fatal(err) | |
179 | } | |
180 | defer os.RemoveAll(dir) // clean up | |
181 | ||
182 | addr := filepath.Join(dir, "dsd.socket") | |
183 | ||
184 | addrParts := []string{UnixAddressPrefix, addr} | |
185 | client, err := New(strings.Join(addrParts, "")) | |
186 | if err != nil { | |
187 | t.Fatal(err) | |
188 | } | |
189 | ||
190 | assertNotPanics(t, func() { client.Close() }) | |
191 | } | |
192 | ||
193 | func TestBufferedClient(t *testing.T) { | |
194 | addr := "localhost:1201" | |
195 | udpAddr, err := net.ResolveUDPAddr("udp", addr) | |
196 | if err != nil { | |
197 | t.Fatal(err) | |
198 | } | |
199 | ||
200 | server, err := net.ListenUDP("udp", udpAddr) | |
201 | if err != nil { | |
202 | t.Fatal(err) | |
203 | } | |
204 | defer server.Close() | |
205 | ||
206 | bufferLength := 9 | |
207 | client, err := NewBuffered(addr, bufferLength) | |
208 | if err != nil { | |
209 | t.Fatal(err) | |
210 | } | |
211 | ||
212 | client.Namespace = "foo." | |
213 | client.Tags = []string{"dd:2"} | |
214 | ||
215 | dur, _ := time.ParseDuration("123us") | |
216 | ||
217 | client.Incr("ic", nil, 1) | |
218 | client.Decr("dc", nil, 1) | |
219 | client.Count("cc", 1, nil, 1) | |
220 | client.Gauge("gg", 10, nil, 1) | |
221 | client.Histogram("hh", 1, nil, 1) | |
222 | client.Distribution("dd", 1, nil, 1) | |
223 | client.Timing("tt", dur, nil, 1) | |
224 | client.Set("ss", "ss", nil, 1) | |
225 | ||
226 | if len(client.commands) != (bufferLength - 1) { | |
227 | t.Errorf("Expected client to have buffered %d commands, but found %d\n", (bufferLength - 1), len(client.commands)) | |
228 | } | |
229 | ||
230 | client.Set("ss", "xx", nil, 1) | |
231 | client.Lock() | |
232 | err = client.flushLocked() | |
233 | client.Unlock() | |
234 | if err != nil { | |
235 | t.Errorf("Error sending: %s", err) | |
236 | } | |
237 | ||
238 | if len(client.commands) != 0 { | |
239 | t.Errorf("Expecting send to flush commands, but found %d\n", len(client.commands)) | |
240 | } | |
241 | ||
242 | buffer := make([]byte, 4096) | |
243 | n, err := io.ReadAtLeast(server, buffer, 1) | |
244 | result := string(buffer[:n]) | |
245 | ||
246 | if err != nil { | |
247 | t.Error(err) | |
248 | } | |
249 | ||
250 | expected := []string{ | |
251 | `foo.ic:1|c|#dd:2`, | |
252 | `foo.dc:-1|c|#dd:2`, | |
253 | `foo.cc:1|c|#dd:2`, | |
254 | `foo.gg:10.000000|g|#dd:2`, | |
255 | `foo.hh:1.000000|h|#dd:2`, | |
256 | `foo.dd:1.000000|d|#dd:2`, | |
257 | `foo.tt:0.123000|ms|#dd:2`, | |
258 | `foo.ss:ss|s|#dd:2`, | |
259 | `foo.ss:xx|s|#dd:2`, | |
260 | } | |
261 | ||
262 | for i, res := range strings.Split(result, "\n") { | |
263 | if res != expected[i] { | |
264 | t.Errorf("Got `%s`, expected `%s`", res, expected[i]) | |
265 | } | |
266 | } | |
267 | ||
268 | client.Event(&Event{Title: "title1", Text: "text1", Priority: Normal, AlertType: Success, Tags: []string{"tagg"}}) | |
269 | client.SimpleEvent("event1", "text1") | |
270 | ||
271 | if len(client.commands) != 2 { | |
272 | t.Errorf("Expected to find %d commands, but found %d\n", 2, len(client.commands)) | |
273 | } | |
274 | ||
275 | client.Lock() | |
276 | err = client.flushLocked() | |
277 | client.Unlock() | |
278 | ||
279 | if err != nil { | |
280 | t.Errorf("Error sending: %s", err) | |
281 | } | |
282 | ||
283 | if len(client.commands) != 0 { | |
284 | t.Errorf("Expecting send to flush commands, but found %d\n", len(client.commands)) | |
285 | } | |
286 | ||
287 | buffer = make([]byte, 1024) | |
288 | n, err = io.ReadAtLeast(server, buffer, 1) | |
289 | result = string(buffer[:n]) | |
290 | ||
291 | if err != nil { | |
292 | t.Error(err) | |
293 | } | |
294 | ||
295 | if n == 0 { | |
296 | t.Errorf("Read 0 bytes but expected more.") | |
297 | } | |
298 | ||
299 | expected = []string{ | |
300 | `_e{6,5}:title1|text1|p:normal|t:success|#dd:2,tagg`, | |
301 | `_e{6,5}:event1|text1|#dd:2`, | |
302 | } | |
303 | ||
304 | for i, res := range strings.Split(result, "\n") { | |
305 | if res != expected[i] { | |
306 | t.Errorf("Got `%s`, expected `%s`", res, expected[i]) | |
307 | } | |
308 | } | |
309 | ||
310 | } | |
311 | ||
312 | func TestBufferedClientBackground(t *testing.T) { | |
313 | addr := "localhost:1201" | |
314 | udpAddr, err := net.ResolveUDPAddr("udp", addr) | |
315 | if err != nil { | |
316 | t.Fatal(err) | |
317 | } | |
318 | ||
319 | server, err := net.ListenUDP("udp", udpAddr) | |
320 | if err != nil { | |
321 | t.Fatal(err) | |
322 | } | |
323 | defer server.Close() | |
324 | ||
325 | bufferLength := 5 | |
326 | client, err := NewBuffered(addr, bufferLength) | |
327 | if err != nil { | |
328 | t.Fatal(err) | |
329 | } | |
330 | defer client.Close() | |
331 | ||
332 | client.Namespace = "foo." | |
333 | client.Tags = []string{"dd:2"} | |
334 | ||
335 | client.Count("cc", 1, nil, 1) | |
336 | client.Gauge("gg", 10, nil, 1) | |
337 | client.Histogram("hh", 1, nil, 1) | |
338 | client.Distribution("dd", 1, nil, 1) | |
339 | client.Set("ss", "ss", nil, 1) | |
340 | client.Set("ss", "xx", nil, 1) | |
341 | ||
342 | time.Sleep(client.flushTime * 2) | |
343 | client.Lock() | |
344 | if len(client.commands) != 0 { | |
345 | t.Errorf("Watch goroutine should have flushed commands, but found %d\n", len(client.commands)) | |
346 | } | |
347 | client.Unlock() | |
348 | } | |
349 | ||
350 | func TestBufferedClientFlush(t *testing.T) { | |
351 | addr := "localhost:1201" | |
352 | udpAddr, err := net.ResolveUDPAddr("udp", addr) | |
353 | if err != nil { | |
354 | t.Fatal(err) | |
355 | } | |
356 | ||
357 | server, err := net.ListenUDP("udp", udpAddr) | |
358 | if err != nil { | |
359 | t.Fatal(err) | |
360 | } | |
361 | defer server.Close() | |
362 | ||
363 | bufferLength := 5 | |
364 | client, err := NewBuffered(addr, bufferLength) | |
365 | if err != nil { | |
366 | t.Fatal(err) | |
367 | } | |
368 | defer client.Close() | |
369 | ||
370 | client.Namespace = "foo." | |
371 | client.Tags = []string{"dd:2"} | |
372 | ||
373 | client.Count("cc", 1, nil, 1) | |
374 | client.Gauge("gg", 10, nil, 1) | |
375 | client.Histogram("hh", 1, nil, 1) | |
376 | client.Distribution("dd", 1, nil, 1) | |
377 | client.Set("ss", "ss", nil, 1) | |
378 | client.Set("ss", "xx", nil, 1) | |
379 | ||
380 | client.Flush() | |
381 | ||
382 | client.Lock() | |
383 | if len(client.commands) != 0 { | |
384 | t.Errorf("Flush should have flushed commands, but found %d\n", len(client.commands)) | |
385 | } | |
386 | client.Unlock() | |
387 | } | |
388 | ||
389 | func TestJoinMaxSize(t *testing.T) { | |
390 | c := Client{} | |
391 | elements := []string{"abc", "abcd", "ab", "xyz", "foobaz", "x", "wwxxyyzz"} | |
392 | res, n := c.joinMaxSize(elements, " ", 8) | |
393 | ||
394 | if len(res) != len(n) && len(res) != 4 { | |
395 | t.Errorf("Was expecting 4 frames to flush but got: %v - %v", n, res) | |
396 | } | |
397 | if n[0] != 2 { | |
398 | t.Errorf("Was expecting 2 elements in first frame but got: %v", n[0]) | |
399 | } | |
400 | if string(res[0]) != "abc abcd" { | |
401 | t.Errorf("Join should have returned \"abc abcd\" in frame, but found: %s", res[0]) | |
402 | } | |
403 | if n[1] != 2 { | |
404 | t.Errorf("Was expecting 2 elements in second frame but got: %v - %v", n[1], n) | |
405 | } | |
406 | if string(res[1]) != "ab xyz" { | |
407 | t.Errorf("Join should have returned \"ab xyz\" in frame, but found: %s", res[1]) | |
408 | } | |
409 | if n[2] != 2 { | |
410 | t.Errorf("Was expecting 2 elements in third frame but got: %v - %v", n[2], n) | |
411 | } | |
412 | if string(res[2]) != "foobaz x" { | |
413 | t.Errorf("Join should have returned \"foobaz x\" in frame, but found: %s", res[2]) | |
414 | } | |
415 | if n[3] != 1 { | |
416 | t.Errorf("Was expecting 1 element in fourth frame but got: %v - %v", n[3], n) | |
417 | } | |
418 | if string(res[3]) != "wwxxyyzz" { | |
419 | t.Errorf("Join should have returned \"wwxxyyzz\" in frame, but found: %s", res[3]) | |
420 | } | |
421 | ||
422 | res, n = c.joinMaxSize(elements, " ", 11) | |
423 | ||
424 | if len(res) != len(n) && len(res) != 3 { | |
425 | t.Errorf("Was expecting 3 frames to flush but got: %v - %v", n, res) | |
426 | } | |
427 | if n[0] != 3 { | |
428 | t.Errorf("Was expecting 3 elements in first frame but got: %v", n[0]) | |
429 | } | |
430 | if string(res[0]) != "abc abcd ab" { | |
431 | t.Errorf("Join should have returned \"abc abcd ab\" in frame, but got: %s", res[0]) | |
432 | } | |
433 | if n[1] != 2 { | |
434 | t.Errorf("Was expecting 2 elements in second frame but got: %v", n[1]) | |
435 | } | |
436 | if string(res[1]) != "xyz foobaz" { | |
437 | t.Errorf("Join should have returned \"xyz foobaz\" in frame, but got: %s", res[1]) | |
438 | } | |
439 | if n[2] != 2 { | |
440 | t.Errorf("Was expecting 2 elements in third frame but got: %v", n[2]) | |
441 | } | |
442 | if string(res[2]) != "x wwxxyyzz" { | |
443 | t.Errorf("Join should have returned \"x wwxxyyzz\" in frame, but got: %s", res[2]) | |
444 | } | |
445 | ||
446 | res, n = c.joinMaxSize(elements, " ", 8) | |
447 | ||
448 | if len(res) != len(n) && len(res) != 7 { | |
449 | t.Errorf("Was expecting 7 frames to flush but got: %v - %v", n, res) | |
450 | } | |
451 | if n[0] != 1 { | |
452 | t.Errorf("Separator is long, expected a single element in frame but got: %d - %v", n[0], res) | |
453 | } | |
454 | if string(res[0]) != "abc" { | |
455 | t.Errorf("Join should have returned \"abc\" in first frame, but got: %s", res) | |
456 | } | |
457 | if n[1] != 1 { | |
458 | t.Errorf("Separator is long, expected a single element in frame but got: %d - %v", n[1], res) | |
459 | } | |
460 | if string(res[1]) != "abcd" { | |
461 | t.Errorf("Join should have returned \"abcd\" in second frame, but got: %s", res[1]) | |
462 | } | |
463 | if n[2] != 1 { | |
464 | t.Errorf("Separator is long, expected a single element in third frame but got: %d - %v", n[2], res) | |
465 | } | |
466 | if string(res[2]) != "ab" { | |
467 | t.Errorf("Join should have returned \"ab\" in third frame, but got: %s", res[2]) | |
468 | } | |
469 | if n[3] != 1 { | |
470 | t.Errorf("Separator is long, expected a single element in fourth frame but got: %d - %v", n[3], res) | |
471 | } | |
472 | if string(res[3]) != "xyz" { | |
473 | t.Errorf("Join should have returned \"xyz\" in fourth frame, but got: %s", res[3]) | |
474 | } | |
475 | if n[4] != 1 { | |
476 | t.Errorf("Separator is long, expected a single element in fifth frame but got: %d - %v", n[4], res) | |
477 | } | |
478 | if string(res[4]) != "foobaz" { | |
479 | t.Errorf("Join should have returned \"foobaz\" in fifth frame, but got: %s", res[4]) | |
480 | } | |
481 | if n[5] != 1 { | |
482 | t.Errorf("Separator is long, expected a single element in sixth frame but got: %d - %v", n[5], res) | |
483 | } | |
484 | if string(res[5]) != "x" { | |
485 | t.Errorf("Join should have returned \"x\" in sixth frame, but got: %s", res[5]) | |
486 | } | |
487 | if n[6] != 1 { | |
488 | t.Errorf("Separator is long, expected a single element in seventh frame but got: %d - %v", n[6], res) | |
489 | } | |
490 | if string(res[6]) != "wwxxyyzz" { | |
491 | t.Errorf("Join should have returned \"wwxxyyzz\" in seventh frame, but got: %s", res[6]) | |
492 | } | |
493 | ||
494 | res, n = c.joinMaxSize(elements[4:], " ", 6) | |
495 | if len(res) != len(n) && len(res) != 3 { | |
496 | t.Errorf("Was expecting 3 frames to flush but got: %v - %v", n, res) | |
497 | ||
498 | } | |
499 | if n[0] != 1 { | |
500 | t.Errorf("Element should just fit in frame - expected single element in frame: %d - %v", n[0], res) | |
501 | } | |
502 | if string(res[0]) != "foobaz" { | |
503 | t.Errorf("Join should have returned \"foobaz\" in first frame, but got: %s", res[0]) | |
504 | } | |
505 | if n[1] != 1 { | |
506 | t.Errorf("Single element expected in frame, but got. %d - %v", n[1], res) | |
507 | } | |
508 | if string(res[1]) != "x" { | |
509 | t.Errorf("Join should' have returned \"x\" in second frame, but got: %s", res[1]) | |
510 | } | |
511 | if n[2] != 1 { | |
512 | t.Errorf("Even though element is greater then max size we still try to send it. %d - %v", n[2], res) | |
513 | } | |
514 | if string(res[2]) != "wwxxyyzz" { | |
515 | t.Errorf("Join should have returned \"wwxxyyzz\" in third frame, but got: %s", res[2]) | |
516 | } | |
517 | } | |
518 | ||
519 | func TestSendMsgUDP(t *testing.T) { | |
520 | addr := "localhost:1201" | |
521 | udpAddr, err := net.ResolveUDPAddr("udp", addr) | |
522 | if err != nil { | |
523 | t.Fatal(err) | |
524 | } | |
525 | ||
526 | server, err := net.ListenUDP("udp", udpAddr) | |
527 | if err != nil { | |
528 | t.Fatal(err) | |
529 | } | |
530 | defer server.Close() | |
531 | ||
532 | client, err := New(addr) | |
533 | if err != nil { | |
534 | t.Fatal(err) | |
535 | } | |
536 | ||
537 | err = client.sendMsg(strings.Repeat("x", MaxUDPPayloadSize+1)) | |
538 | if err == nil { | |
539 | t.Error("Expected error to be returned if message size is bigger than MaxUDPPayloadSize") | |
540 | } | |
541 | ||
542 | message := "test message" | |
543 | ||
544 | err = client.sendMsg(message) | |
545 | if err != nil { | |
546 | t.Errorf("Expected no error to be returned if message size is smaller or equal to MaxUDPPayloadSize, got: %s", err.Error()) | |
547 | } | |
548 | ||
549 | buffer := make([]byte, MaxUDPPayloadSize+1) | |
550 | n, err := io.ReadAtLeast(server, buffer, 1) | |
551 | ||
552 | if err != nil { | |
553 | t.Fatalf("Expected no error to be returned reading the buffer, got: %s", err.Error()) | |
554 | } | |
555 | ||
556 | if n != len(message) { | |
557 | t.Fatalf("Failed to read full message from buffer. Got size `%d` expected `%d`", n, MaxUDPPayloadSize) | |
558 | } | |
559 | ||
560 | if string(buffer[:n]) != message { | |
561 | t.Fatalf("The received message did not match what we expect.") | |
562 | } | |
563 | ||
564 | client, err = NewBuffered(addr, 1) | |
565 | if err != nil { | |
566 | t.Fatal(err) | |
567 | } | |
568 | ||
569 | err = client.sendMsg(strings.Repeat("x", MaxUDPPayloadSize+1)) | |
570 | if err == nil { | |
571 | t.Error("Expected error to be returned if message size is bigger than MaxUDPPayloadSize") | |
572 | } | |
573 | ||
574 | err = client.sendMsg(message) | |
575 | if err != nil { | |
576 | t.Errorf("Expected no error to be returned if message size is smaller or equal to MaxUDPPayloadSize, got: %s", err.Error()) | |
577 | } | |
578 | ||
579 | client.Lock() | |
580 | err = client.flushLocked() | |
581 | client.Unlock() | |
582 | ||
583 | if err != nil { | |
584 | t.Fatalf("Expected no error to be returned flushing the client, got: %s", err.Error()) | |
585 | } | |
586 | ||
587 | buffer = make([]byte, MaxUDPPayloadSize+1) | |
588 | n, err = io.ReadAtLeast(server, buffer, 1) | |
589 | ||
590 | if err != nil { | |
591 | t.Fatalf("Expected no error to be returned reading the buffer, got: %s", err.Error()) | |
592 | } | |
593 | ||
594 | if n != len(message) { | |
595 | t.Fatalf("Failed to read full message from buffer. Got size `%d` expected `%d`", n, MaxUDPPayloadSize) | |
596 | } | |
597 | ||
598 | if string(buffer[:n]) != message { | |
599 | t.Fatalf("The received message did not match what we expect.") | |
600 | } | |
601 | } | |
602 | ||
603 | func TestSendUDSErrors(t *testing.T) { | |
604 | dir, err := ioutil.TempDir("", "socket") | |
605 | if err != nil { | |
606 | t.Fatal(err) | |
607 | } | |
608 | defer os.RemoveAll(dir) // clean up | |
609 | ||
610 | message := "test message" | |
611 | ||
612 | addr := filepath.Join(dir, "dsd.socket") | |
613 | udsAddr, err := net.ResolveUnixAddr("unixgram", addr) | |
614 | if err != nil { | |
615 | t.Fatal(err) | |
616 | } | |
617 | ||
618 | addrParts := []string{UnixAddressPrefix, addr} | |
619 | client, err := New(strings.Join(addrParts, "")) | |
620 | if err != nil { | |
621 | t.Fatal(err) | |
622 | } | |
623 | ||
624 | // Server not listening yet | |
625 | err = client.sendMsg(message) | |
626 | if err == nil || !strings.HasSuffix(err.Error(), "no such file or directory") { | |
627 | t.Errorf("Expected error \"no such file or directory\", got: %s", err.Error()) | |
628 | } | |
629 | ||
630 | // Start server and send packet | |
631 | server, err := net.ListenUnixgram("unixgram", udsAddr) | |
632 | if err != nil { | |
633 | t.Fatal(err) | |
634 | } | |
635 | err = client.sendMsg(message) | |
636 | if err != nil { | |
637 | t.Errorf("Expected no error to be returned when server is listening, got: %s", err.Error()) | |
638 | } | |
639 | bytes := make([]byte, 1024) | |
640 | n, err := server.Read(bytes) | |
641 | if err != nil { | |
642 | t.Fatal(err) | |
643 | } | |
644 | if string(bytes[:n]) != message { | |
645 | t.Errorf("Expected: %s. Actual: %s", string(message), string(bytes)) | |
646 | } | |
647 | ||
648 | // close server and send packet | |
649 | server.Close() | |
650 | os.Remove(addr) | |
651 | err = client.sendMsg(message) | |
652 | if err == nil { | |
653 | t.Error("Expected an error, got nil") | |
654 | } | |
655 | ||
656 | // Restart server and send packet | |
657 | server, err = net.ListenUnixgram("unixgram", udsAddr) | |
658 | if err != nil { | |
659 | t.Fatal(err) | |
660 | } | |
661 | time.Sleep(100 * time.Millisecond) | |
662 | defer server.Close() | |
663 | err = client.sendMsg(message) | |
664 | if err != nil { | |
665 | t.Errorf("Expected no error to be returned when server is listening, got: %s", err.Error()) | |
666 | } | |
667 | ||
668 | bytes = make([]byte, 1024) | |
669 | n, err = server.Read(bytes) | |
670 | if err != nil { | |
671 | t.Fatal(err) | |
672 | } | |
673 | if string(bytes[:n]) != message { | |
674 | t.Errorf("Expected: %s. Actual: %s", string(message), string(bytes)) | |
675 | } | |
676 | } | |
677 | ||
678 | func TestSendUDSIgnoreErrors(t *testing.T) { | |
679 | client, err := New("unix:///invalid") | |
680 | if err != nil { | |
681 | t.Fatal(err) | |
682 | } | |
683 | ||
684 | // Default mode throws error | |
685 | err = client.sendMsg("message") | |
686 | if err == nil || !strings.HasSuffix(err.Error(), "no such file or directory") { | |
687 | t.Errorf("Expected error \"connect: no such file or directory\", got: %s", err.Error()) | |
688 | } | |
689 | ||
690 | // Skip errors | |
691 | client.SkipErrors = true | |
692 | err = client.sendMsg("message") | |
693 | if err != nil { | |
694 | t.Errorf("Expected no error to be returned when in skip errors mode, got: %s", err.Error()) | |
695 | } | |
696 | } | |
697 | ||
698 | func TestNilSafe(t *testing.T) { | |
699 | var c *Client | |
700 | assertNotPanics(t, func() { c.SetWriteTimeout(0) }) | |
701 | assertNotPanics(t, func() { c.Flush() }) | |
702 | assertNotPanics(t, func() { c.Close() }) | |
703 | assertNotPanics(t, func() { c.Count("", 0, nil, 1) }) | |
704 | assertNotPanics(t, func() { c.Histogram("", 0, nil, 1) }) | |
705 | assertNotPanics(t, func() { c.Distribution("", 0, nil, 1) }) | |
706 | assertNotPanics(t, func() { c.Gauge("", 0, nil, 1) }) | |
707 | assertNotPanics(t, func() { c.Set("", "", nil, 1) }) | |
708 | assertNotPanics(t, func() { | |
709 | c.send("", "", []byte(""), nil, 1) | |
710 | }) | |
711 | assertNotPanics(t, func() { c.Event(NewEvent("", "")) }) | |
712 | assertNotPanics(t, func() { c.SimpleEvent("", "") }) | |
713 | assertNotPanics(t, func() { c.ServiceCheck(NewServiceCheck("", Ok)) }) | |
714 | assertNotPanics(t, func() { c.SimpleServiceCheck("", Ok) }) | |
715 | } | |
716 | ||
717 | func TestEvents(t *testing.T) { | |
718 | matrix := []struct { | |
719 | event *Event | |
720 | encoded string | |
75 | } | |
76 | return len(p), nil | |
77 | } | |
78 | ||
79 | func TestNewWithWriter(t *testing.T) { | |
80 | w := statsdWriterWrapper{} | |
81 | client, err := NewWithWriter(&w, WithoutTelemetry()) | |
82 | require.Nil(t, err) | |
83 | ||
84 | ts := &testServer{} | |
85 | expected := ts.sendAllType(client) | |
86 | client.Close() | |
87 | ||
88 | ts.assertMetric(t, w.data, expected) | |
89 | } | |
90 | ||
91 | // TestConcurrentSend sends various metric types in separate goroutines to | |
92 | // trigger any possible data races. It is intended to be run with the data race | |
93 | // detector enabled. | |
94 | func TestConcurrentSend(t *testing.T) { | |
95 | tests := []struct { | |
96 | description string | |
97 | clientOptions []Option | |
721 | 98 | }{ |
722 | 99 | { |
723 | NewEvent("Hello", "Something happened to my event"), | |
724 | `_e{5,30}:Hello|Something happened to my event`, | |
725 | }, { | |
726 | &Event{Title: "hi", Text: "okay", AggregationKey: "foo"}, | |
727 | `_e{2,4}:hi|okay|k:foo`, | |
728 | }, { | |
729 | &Event{Title: "hi", Text: "okay", AggregationKey: "foo", AlertType: Info}, | |
730 | `_e{2,4}:hi|okay|k:foo|t:info`, | |
731 | }, { | |
732 | &Event{Title: "hi", Text: "w/e", AlertType: Error, Priority: Normal}, | |
733 | `_e{2,3}:hi|w/e|p:normal|t:error`, | |
734 | }, { | |
735 | &Event{Title: "hi", Text: "uh", Tags: []string{"host:foo", "app:bar"}}, | |
736 | `_e{2,2}:hi|uh|#host:foo,app:bar`, | |
737 | }, { | |
738 | &Event{Title: "hi", Text: "line1\nline2", Tags: []string{"hello\nworld"}}, | |
739 | `_e{2,12}:hi|line1\nline2|#helloworld`, | |
740 | }, | |
741 | } | |
742 | ||
743 | for _, m := range matrix { | |
744 | r, err := m.event.Encode() | |
745 | if err != nil { | |
746 | t.Errorf("Error encoding: %s\n", err) | |
747 | continue | |
748 | } | |
749 | if r != m.encoded { | |
750 | t.Errorf("Expected `%s`, got `%s`\n", m.encoded, r) | |
751 | } | |
752 | } | |
753 | ||
754 | e := NewEvent("", "hi") | |
755 | if _, err := e.Encode(); err == nil { | |
756 | t.Errorf("Expected error on empty Title.") | |
757 | } | |
758 | ||
759 | e = NewEvent("hi", "") | |
760 | if _, err := e.Encode(); err == nil { | |
761 | t.Errorf("Expected error on empty Text.") | |
762 | } | |
763 | ||
764 | e = NewEvent("hello", "world") | |
765 | s, err := e.Encode("tag1", "tag2") | |
766 | if err != nil { | |
767 | t.Error(err) | |
768 | } | |
769 | expected := "_e{5,5}:hello|world|#tag1,tag2" | |
770 | if s != expected { | |
771 | t.Errorf("Expected %s, got %s", expected, s) | |
772 | } | |
773 | if len(e.Tags) != 0 { | |
774 | t.Errorf("Modified event in place illegally.") | |
775 | } | |
776 | } | |
777 | ||
778 | func TestServiceChecks(t *testing.T) { | |
779 | matrix := []struct { | |
780 | serviceCheck *ServiceCheck | |
781 | encoded string | |
100 | description: "Client with default options", | |
101 | clientOptions: []Option{}, | |
102 | }, | |
103 | { | |
104 | description: "Client with mutex mode enabled", | |
105 | clientOptions: []Option{WithMutexMode()}, | |
106 | }, | |
107 | { | |
108 | description: "Client with channel mode enabled", | |
109 | clientOptions: []Option{WithChannelMode()}, | |
110 | }, | |
111 | } | |
112 | ||
113 | for _, test := range tests { | |
114 | test := test // Capture range variable. | |
115 | t.Run(test.description, func(t *testing.T) { | |
116 | t.Parallel() | |
117 | ||
118 | client, err := New("localhost:9876", test.clientOptions...) | |
119 | require.Nil(t, err, fmt.Sprintf("failed to create client: %s", err)) | |
120 | ||
121 | var wg sync.WaitGroup | |
122 | wg.Add(1) | |
123 | go func() { | |
124 | client.Gauge("name", 1, []string{"tag"}, 0.1) | |
125 | wg.Done() | |
126 | }() | |
127 | ||
128 | wg.Add(1) | |
129 | go func() { | |
130 | client.Count("name", 1, []string{"tag"}, 0.1) | |
131 | wg.Done() | |
132 | }() | |
133 | ||
134 | wg.Add(1) | |
135 | go func() { | |
136 | client.Timing("name", 1, []string{"tag"}, 0.1) | |
137 | wg.Done() | |
138 | }() | |
139 | ||
140 | wg.Wait() | |
141 | err = client.Close() | |
142 | require.Nil(t, err, fmt.Sprintf("failed to close client: %s", err)) | |
143 | }) | |
144 | } | |
145 | } | |
146 | ||
147 | // TestCloseRace close the client multiple times in separate goroutines to | |
148 | // trigger any possible data races. It is intended to be run with the data race | |
149 | // detector enabled. | |
150 | func TestCloseRace(t *testing.T) { | |
151 | c, err := New("localhost:8125") | |
152 | assert.NoError(t, err) | |
153 | start := make(chan struct{}) | |
154 | var wg sync.WaitGroup | |
155 | for j := 0; j < 100; j++ { | |
156 | wg.Add(1) | |
157 | go func() { | |
158 | defer wg.Done() | |
159 | <-start | |
160 | c.Close() | |
161 | }() | |
162 | } | |
163 | close(start) | |
164 | wg.Wait() | |
165 | } | |
166 | ||
167 | func TestCloseWithClientAlreadyClosed(t *testing.T) { | |
168 | c, err := New("localhost:8125") | |
169 | assert.NoError(t, err) | |
170 | assert.False(t, c.IsClosed()) | |
171 | ||
172 | assert.NoError(t, c.Close()) | |
173 | assert.True(t, c.IsClosed()) | |
174 | ||
175 | assert.NoError(t, c.Close()) | |
176 | assert.True(t, c.IsClosed()) | |
177 | } | |
178 | ||
179 | func TestIsClosed(t *testing.T) { | |
180 | c, err := New("localhost:8125") | |
181 | assert.NoError(t, err) | |
182 | assert.False(t, c.IsClosed()) | |
183 | ||
184 | assert.NoError(t, c.Close()) | |
185 | assert.True(t, c.IsClosed()) | |
186 | } | |
187 | ||
188 | func TestCloneWithExtraOptions(t *testing.T) { | |
189 | client, err := New("localhost:1201", WithTags([]string{"tag1", "tag2"})) | |
190 | require.Nil(t, err, fmt.Sprintf("failed to create client: %s", err)) | |
191 | ||
192 | assert.Equal(t, client.tags, []string{"tag1", "tag2"}) | |
193 | assert.Equal(t, client.namespace, "") | |
194 | assert.Equal(t, client.workersMode, mutexMode) | |
195 | assert.Equal(t, "localhost:1201", client.addrOption) | |
196 | assert.Len(t, client.options, 1) | |
197 | ||
198 | cloneClient, err := CloneWithExtraOptions(client, WithNamespace("test"), WithChannelMode()) | |
199 | require.Nil(t, err, fmt.Sprintf("failed to clone client: %s", err)) | |
200 | ||
201 | assert.Equal(t, cloneClient.tags, []string{"tag1", "tag2"}) | |
202 | assert.Equal(t, cloneClient.namespace, "test.") | |
203 | assert.Equal(t, cloneClient.workersMode, channelMode) | |
204 | assert.Equal(t, "localhost:1201", cloneClient.addrOption) | |
205 | assert.Len(t, cloneClient.options, 3) | |
206 | } | |
207 | ||
208 | func TestResolveAddressFromEnvironment(t *testing.T) { | |
209 | hostInitialValue, hostInitiallySet := os.LookupEnv(agentHostEnvVarName) | |
210 | if hostInitiallySet { | |
211 | defer os.Setenv(agentHostEnvVarName, hostInitialValue) | |
212 | } else { | |
213 | defer os.Unsetenv(agentHostEnvVarName) | |
214 | } | |
215 | portInitialValue, portInitiallySet := os.LookupEnv(agentPortEnvVarName) | |
216 | if portInitiallySet { | |
217 | defer os.Setenv(agentPortEnvVarName, portInitialValue) | |
218 | } else { | |
219 | defer os.Unsetenv(agentPortEnvVarName) | |
220 | } | |
221 | ||
222 | for _, tc := range []struct { | |
223 | name string | |
224 | addrParam string | |
225 | hostEnv string | |
226 | portEnv string | |
227 | expectedAddr string | |
782 | 228 | }{ |
783 | { | |
784 | NewServiceCheck("DataCatService", Ok), | |
785 | `_sc|DataCatService|0`, | |
786 | }, { | |
787 | NewServiceCheck("DataCatService", Warn), | |
788 | `_sc|DataCatService|1`, | |
789 | }, { | |
790 | NewServiceCheck("DataCatService", Critical), | |
791 | `_sc|DataCatService|2`, | |
792 | }, { | |
793 | NewServiceCheck("DataCatService", Unknown), | |
794 | `_sc|DataCatService|3`, | |
795 | }, { | |
796 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat"}, | |
797 | `_sc|DataCatService|0|h:DataStation.Cat`, | |
798 | }, { | |
799 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message"}, | |
800 | `_sc|DataCatService|0|h:DataStation.Cat|m:Here goes valuable message`, | |
801 | }, { | |
802 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш"}, | |
803 | `_sc|DataCatService|0|h:DataStation.Cat|m:Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш`, | |
804 | }, { | |
805 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message", Tags: []string{"host:foo", "app:bar"}}, | |
806 | `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes valuable message`, | |
807 | }, { | |
808 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes \n that should be escaped", Tags: []string{"host:foo", "app:b\nar"}}, | |
809 | `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes \n that should be escaped`, | |
810 | }, { | |
811 | &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes m: that should be escaped", Tags: []string{"host:foo", "app:bar"}}, | |
812 | `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes m\: that should be escaped`, | |
813 | }, | |
814 | } | |
815 | ||
816 | for _, m := range matrix { | |
817 | r, err := m.serviceCheck.Encode() | |
818 | if err != nil { | |
819 | t.Errorf("Error encoding: %s\n", err) | |
820 | continue | |
821 | } | |
822 | if r != m.encoded { | |
823 | t.Errorf("Expected `%s`, got `%s`\n", m.encoded, r) | |
824 | } | |
825 | } | |
826 | ||
827 | sc := NewServiceCheck("", Ok) | |
828 | if _, err := sc.Encode(); err == nil { | |
829 | t.Errorf("Expected error on empty Name.") | |
830 | } | |
831 | ||
832 | sc = NewServiceCheck("sc", ServiceCheckStatus(5)) | |
833 | if _, err := sc.Encode(); err == nil { | |
834 | t.Errorf("Expected error on invalid status value.") | |
835 | } | |
836 | ||
837 | sc = NewServiceCheck("hello", Warn) | |
838 | s, err := sc.Encode("tag1", "tag2") | |
839 | if err != nil { | |
840 | t.Error(err) | |
841 | } | |
842 | expected := "_sc|hello|1|#tag1,tag2" | |
843 | if s != expected { | |
844 | t.Errorf("Expected %s, got %s", expected, s) | |
845 | } | |
846 | if len(sc.Tags) != 0 { | |
847 | t.Errorf("Modified serviceCheck in place illegally.") | |
848 | } | |
849 | } | |
850 | ||
851 | func TestFlushOnClose(t *testing.T) { | |
852 | client, err := NewBuffered("localhost:1201", 64) | |
853 | if err != nil { | |
854 | t.Fatal(err) | |
855 | } | |
856 | // stop the flushing mechanism so we can test the buffer without interferences | |
857 | client.stop <- struct{}{} | |
858 | ||
859 | message := "test message" | |
860 | ||
861 | err = client.sendMsg(message) | |
862 | if err != nil { | |
863 | t.Fatal(err) | |
864 | } | |
865 | ||
866 | if len(client.commands) != 1 { | |
867 | t.Errorf("Commands buffer should contain 1 item, got %d", len(client.commands)) | |
868 | } | |
869 | ||
870 | err = client.Close() | |
871 | if err != nil { | |
872 | t.Fatal(err) | |
873 | } | |
874 | ||
875 | if len(client.commands) != 0 { | |
876 | t.Errorf("Commands buffer should be empty, got %d", len(client.commands)) | |
877 | } | |
878 | } | |
879 | ||
880 | // These benchmarks show that using different format options: | |
881 | // v1: sprintf-ing together a bunch of intermediate strings is 4-5x faster | |
882 | // v2: some use of buffer | |
883 | // v3: removing sprintf from stat generation and pushing stat building into format | |
884 | func BenchmarkFormatV3(b *testing.B) { | |
885 | b.StopTimer() | |
886 | c := &Client{} | |
887 | c.Namespace = "foo.bar." | |
888 | c.Tags = []string{"app:foo", "host:bar"} | |
889 | b.StartTimer() | |
890 | for i := 0; i < b.N; i++ { | |
891 | c.format("system.cpu.idle", 10, gaugeSuffix, []string{"foo"}, 1) | |
892 | c.format("system.cpu.load", 0.1, gaugeSuffix, nil, 0.9) | |
893 | } | |
894 | } | |
895 | ||
896 | func BenchmarkFormatV1(b *testing.B) { | |
897 | b.StopTimer() | |
898 | c := &Client{} | |
899 | c.Namespace = "foo.bar." | |
900 | c.Tags = []string{"app:foo", "host:bar"} | |
901 | b.StartTimer() | |
902 | for i := 0; i < b.N; i++ { | |
903 | c.formatV1("system.cpu.idle", 10, []string{"foo"}, 1) | |
904 | c.formatV1("system.cpu.load", 0.1, nil, 0.9) | |
905 | } | |
906 | } | |
907 | ||
908 | // V1 formatting function, added to client for tests | |
909 | func (c *Client) formatV1(name string, value float64, tags []string, rate float64) string { | |
910 | valueAsString := fmt.Sprintf("%f|g", value) | |
911 | if rate < 1 { | |
912 | valueAsString = fmt.Sprintf("%s|@%f", valueAsString, rate) | |
913 | } | |
914 | if c.Namespace != "" { | |
915 | name = fmt.Sprintf("%s%s", c.Namespace, name) | |
916 | } | |
917 | ||
918 | tags = append(c.Tags, tags...) | |
919 | if len(tags) > 0 { | |
920 | valueAsString = fmt.Sprintf("%s|#%s", valueAsString, strings.Join(tags, ",")) | |
921 | } | |
922 | ||
923 | return fmt.Sprintf("%s:%s", name, valueAsString) | |
924 | ||
925 | } | |
926 | ||
927 | func BenchmarkFormatV2(b *testing.B) { | |
928 | b.StopTimer() | |
929 | c := &Client{} | |
930 | c.Namespace = "foo.bar." | |
931 | c.Tags = []string{"app:foo", "host:bar"} | |
932 | b.StartTimer() | |
933 | for i := 0; i < b.N; i++ { | |
934 | c.formatV2("system.cpu.idle", 10, []string{"foo"}, 1) | |
935 | c.formatV2("system.cpu.load", 0.1, nil, 0.9) | |
936 | } | |
937 | } | |
938 | ||
939 | // V2 formatting function, added to client for tests | |
940 | func (c *Client) formatV2(name string, value float64, tags []string, rate float64) string { | |
941 | var buf bytes.Buffer | |
942 | if c.Namespace != "" { | |
943 | buf.WriteString(c.Namespace) | |
944 | } | |
945 | buf.WriteString(name) | |
946 | buf.WriteString(":") | |
947 | buf.WriteString(fmt.Sprintf("%f|g", value)) | |
948 | if rate < 1 { | |
949 | buf.WriteString(`|@`) | |
950 | buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64)) | |
951 | } | |
952 | ||
953 | writeTagString(&buf, c.Tags, tags) | |
954 | ||
955 | return buf.String() | |
956 | } | |
229 | {"UPD Nominal case", "127.0.0.1:1234", "", "", "127.0.0.1:1234"}, | |
230 | {"UPD Parameter overrides environment", "127.0.0.1:8125", "10.12.16.9", "1234", "127.0.0.1:8125"}, | |
231 | {"UPD Host and port passed as env", "", "10.12.16.9", "1234", "10.12.16.9:1234"}, | |
232 | {"UPD Host env, default port", "", "10.12.16.9", "", "10.12.16.9:8125"}, | |
233 | {"UPD Host passed, ignore env port", "10.12.16.9", "", "1234", "10.12.16.9:8125"}, | |
234 | ||
235 | {"UDS socket passed", "unix://test/path.socket", "", "", "unix://test/path.socket"}, | |
236 | {"UDS socket env", "", "unix://test/path.socket", "", "unix://test/path.socket"}, | |
237 | {"UDS socket env with port", "", "unix://test/path.socket", "8125", "unix://test/path.socket"}, | |
238 | ||
239 | {"Pipe passed", "\\\\.\\pipe\\my_pipe", "", "", "\\\\.\\pipe\\my_pipe"}, | |
240 | {"Pipe env", "", "\\\\.\\pipe\\my_pipe", "", "\\\\.\\pipe\\my_pipe"}, | |
241 | {"Pipe env with port", "", "\\\\.\\pipe\\my_pipe", "8125", "\\\\.\\pipe\\my_pipe"}, | |
242 | ||
243 | {"No autodetection failed", "", "", "", ""}, | |
244 | } { | |
245 | t.Run(tc.name, func(t *testing.T) { | |
246 | os.Setenv(agentHostEnvVarName, tc.hostEnv) | |
247 | os.Setenv(agentPortEnvVarName, tc.portEnv) | |
248 | ||
249 | addr := resolveAddr(tc.addrParam) | |
250 | assert.Equal(t, tc.expectedAddr, addr) | |
251 | }) | |
252 | } | |
253 | } | |
254 | ||
255 | func TestGetTelemetry(t *testing.T) { | |
256 | ts, client := newClientAndTestServer(t, | |
257 | "udp", | |
258 | "localhost:8765", | |
259 | nil, | |
260 | WithExtendedClientSideAggregation(), | |
261 | ) | |
262 | ||
263 | ts.sendAllAndAssert(t, client) | |
264 | tlm := client.GetTelemetry() | |
265 | ||
266 | assert.Equal(t, uint64(9), tlm.TotalMetrics, "telmetry TotalMetrics was wrong") | |
267 | assert.Equal(t, uint64(1), tlm.TotalMetricsGauge, "telmetry TotalMetricsGauge was wrong") | |
268 | assert.Equal(t, uint64(3), tlm.TotalMetricsCount, "telmetry TotalMetricsCount was wrong") | |
269 | assert.Equal(t, uint64(1), tlm.TotalMetricsHistogram, "telmetry TotalMetricsHistogram was wrong") | |
270 | assert.Equal(t, uint64(1), tlm.TotalMetricsDistribution, "telmetry TotalMetricsDistribution was wrong") | |
271 | assert.Equal(t, uint64(1), tlm.TotalMetricsSet, "telmetry TotalMetricsSet was wrong") | |
272 | assert.Equal(t, uint64(2), tlm.TotalMetricsTiming, "telmetry TotalMetricsTiming was wrong") | |
273 | assert.Equal(t, uint64(1), tlm.TotalEvents, "telmetry TotalEvents was wrong") | |
274 | assert.Equal(t, uint64(1), tlm.TotalServiceChecks, "telmetry TotalServiceChecks was wrong") | |
275 | assert.Equal(t, uint64(0), tlm.TotalDroppedOnReceive, "telmetry TotalDroppedOnReceive was wrong") | |
276 | assert.Equal(t, uint64(22), tlm.TotalPayloadsSent, "telmetry TotalPayloadsSent was wrong") | |
277 | assert.Equal(t, uint64(0), tlm.TotalPayloadsDropped, "telmetry TotalPayloadsDropped was wrong") | |
278 | assert.Equal(t, uint64(0), tlm.TotalPayloadsDroppedWriter, "telmetry TotalPayloadsDroppedWriter was wrong") | |
279 | assert.Equal(t, uint64(0), tlm.TotalPayloadsDroppedQueueFull, "telmetry TotalPayloadsDroppedQueueFull was wrong") | |
280 | assert.Equal(t, uint64(3112), tlm.TotalBytesSent, "telmetry TotalBytesSent was wrong") | |
281 | assert.Equal(t, uint64(0), tlm.TotalBytesDropped, "telmetry TotalBytesDropped was wrong") | |
282 | assert.Equal(t, uint64(0), tlm.TotalBytesDroppedWriter, "telmetry TotalBytesDroppedWriter was wrong") | |
283 | assert.Equal(t, uint64(0), tlm.TotalBytesDroppedQueueFull, "telmetry TotalBytesDroppedQueueFull was wrong") | |
284 | assert.Equal(t, uint64(9), tlm.AggregationNbContext, "telmetry AggregationNbContext was wrong") | |
285 | assert.Equal(t, uint64(1), tlm.AggregationNbContextGauge, "telmetry AggregationNbContextGauge was wrong") | |
286 | assert.Equal(t, uint64(3), tlm.AggregationNbContextCount, "telmetry AggregationNbContextCount was wrong") | |
287 | assert.Equal(t, uint64(1), tlm.AggregationNbContextSet, "telmetry AggregationNbContextSet was wrong") | |
288 | assert.Equal(t, uint64(1), tlm.AggregationNbContextHistogram, "telmetry AggregationNbContextHistogram was wrong") | |
289 | assert.Equal(t, uint64(1), tlm.AggregationNbContextDistribution, "telmetry AggregationNbContextDistribution was wrong") | |
290 | assert.Equal(t, uint64(2), tlm.AggregationNbContextTiming, "telmetry AggregationNbContextTiming was wrong") | |
291 | } | |
292 | ||
293 | func Test_isOriginDetectionEnabled(t *testing.T) { | |
294 | tests := []struct { | |
295 | name string | |
296 | o *Options | |
297 | hasEntityID bool | |
298 | configEnvVarValue string | |
299 | want bool | |
300 | }{ | |
301 | { | |
302 | name: "nominal case", | |
303 | o: &Options{originDetection: defaultOriginDetection}, | |
304 | hasEntityID: false, | |
305 | configEnvVarValue: "", | |
306 | want: true, | |
307 | }, | |
308 | { | |
309 | name: "has entity ID", | |
310 | o: &Options{originDetection: defaultOriginDetection}, | |
311 | hasEntityID: true, | |
312 | configEnvVarValue: "", | |
313 | want: false, | |
314 | }, | |
315 | { | |
316 | name: "has user-provided container ID", | |
317 | o: &Options{containerID: "user-provided"}, | |
318 | hasEntityID: true, | |
319 | configEnvVarValue: "", | |
320 | want: false, | |
321 | }, | |
322 | { | |
323 | name: "originDetection option disabled", | |
324 | o: &Options{originDetection: false}, | |
325 | hasEntityID: false, | |
326 | configEnvVarValue: "", | |
327 | want: false, | |
328 | }, | |
329 | { | |
330 | name: "DD_ORIGIN_DETECTION_ENABLED=false", | |
331 | o: &Options{originDetection: defaultOriginDetection}, | |
332 | hasEntityID: false, | |
333 | configEnvVarValue: "false", | |
334 | want: false, | |
335 | }, | |
336 | { | |
337 | name: "invalid DD_ORIGIN_DETECTION_ENABLED value", | |
338 | o: &Options{originDetection: defaultOriginDetection}, | |
339 | hasEntityID: false, | |
340 | configEnvVarValue: "invalid", | |
341 | want: true, | |
342 | }, | |
343 | } | |
344 | for _, tt := range tests { | |
345 | t.Run(tt.name, func(t *testing.T) { | |
346 | os.Setenv("DD_ORIGIN_DETECTION_ENABLED", tt.configEnvVarValue) | |
347 | defer os.Unsetenv("DD_ORIGIN_DETECTION_ENABLED") | |
348 | ||
349 | assert.Equal(t, tt.want, isOriginDetectionEnabled(tt.o, tt.hasEntityID)) | |
350 | }) | |
351 | } | |
352 | } | |
353 | ||
354 | func TestMessageTooLongError(t *testing.T) { | |
355 | client, err := New("localhost:8765", WithMaxBytesPerPayload(10), WithoutClientSideAggregation()) | |
356 | require.NoError(t, err) | |
357 | ||
358 | err = client.Gauge("fake_name_", 21, nil, 1) | |
359 | require.Error(t, err) | |
360 | assert.IsType(t, MessageTooLongError{}, err) | |
361 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "sync" | |
5 | "time" | |
6 | ) | |
7 | ||
8 | /* | |
9 | telemetryInterval is the interval at which telemetry will be sent by the client. | |
10 | */ | |
11 | const telemetryInterval = 10 * time.Second | |
12 | ||
13 | /* | |
14 | clientTelemetryTag is a tag identifying this specific client. | |
15 | */ | |
16 | var clientTelemetryTag = "client:go" | |
17 | ||
18 | /* | |
19 | clientVersionTelemetryTag is a tag identifying this specific client version. | |
20 | */ | |
21 | var clientVersionTelemetryTag = "client_version:5.1.0" | |
22 | ||
23 | // Telemetry represents internal metrics about the client behavior since it started. | |
24 | type Telemetry struct { | |
25 | // | |
26 | // Those are produced by the 'Client' | |
27 | // | |
28 | ||
29 | // TotalMetrics is the total number of metrics sent by the client before aggregation and sampling. | |
30 | TotalMetrics uint64 | |
31 | // TotalMetricsGauge is the total number of gauges sent by the client before aggregation and sampling. | |
32 | TotalMetricsGauge uint64 | |
33 | // TotalMetricsCount is the total number of counts sent by the client before aggregation and sampling. | |
34 | TotalMetricsCount uint64 | |
35 | // TotalMetricsHistogram is the total number of histograms sent by the client before aggregation and sampling. | |
36 | TotalMetricsHistogram uint64 | |
37 | // TotalMetricsDistribution is the total number of distributions sent by the client before aggregation and | |
38 | // sampling. | |
39 | TotalMetricsDistribution uint64 | |
40 | // TotalMetricsSet is the total number of sets sent by the client before aggregation and sampling. | |
41 | TotalMetricsSet uint64 | |
42 | // TotalMetricsTiming is the total number of timings sent by the client before aggregation and sampling. | |
43 | TotalMetricsTiming uint64 | |
44 | // TotalEvents is the total number of events sent by the client before aggregation and sampling. | |
45 | TotalEvents uint64 | |
46 | // TotalServiceChecks is the total number of service_checks sent by the client before aggregation and sampling. | |
47 | TotalServiceChecks uint64 | |
48 | ||
49 | // TotalDroppedOnReceive is the total number metrics/event/service_checks dropped when using ChannelMode (see | |
50 | // WithChannelMode option). | |
51 | TotalDroppedOnReceive uint64 | |
52 | ||
53 | // | |
54 | // Those are produced by the 'sender' | |
55 | // | |
56 | ||
57 | // TotalPayloadsSent is the total number of payload (packet on the network) succesfully sent by the client. When | |
58 | // using UDP we don't know if packet dropped or not, so all packet are considered as succesfully sent. | |
59 | TotalPayloadsSent uint64 | |
60 | // TotalPayloadsDropped is the total number of payload dropped by the client. This includes all cause of dropped | |
61 | // (TotalPayloadsDroppedQueueFull and TotalPayloadsDroppedWriter). When using UDP This won't includes the | |
62 | // network dropped. | |
63 | TotalPayloadsDropped uint64 | |
64 | // TotalPayloadsDroppedWriter is the total number of payload dropped by the writer (when using UDS or named | |
65 | // pipe) due to network timeout or error. | |
66 | TotalPayloadsDroppedWriter uint64 | |
67 | // TotalPayloadsDroppedQueueFull is the total number of payload dropped internally because the queue of payloads | |
68 | // waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on | |
69 | // the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size. | |
70 | TotalPayloadsDroppedQueueFull uint64 | |
71 | ||
72 | // TotalBytesSent is the total number of bytes succesfully sent by the client. When using UDP we don't know if | |
73 | // packet dropped or not, so all packet are considered as succesfully sent. | |
74 | TotalBytesSent uint64 | |
75 | // TotalBytesDropped is the total number of bytes dropped by the client. This includes all cause of dropped | |
76 | // (TotalBytesDroppedQueueFull and TotalBytesDroppedWriter). When using UDP This | |
77 | // won't includes the network dropped. | |
78 | TotalBytesDropped uint64 | |
79 | // TotalBytesDroppedWriter is the total number of bytes dropped by the writer (when using UDS or named pipe) due | |
80 | // to network timeout or error. | |
81 | TotalBytesDroppedWriter uint64 | |
82 | // TotalBytesDroppedQueueFull is the total number of bytes dropped internally because the queue of payloads | |
83 | // waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on | |
84 | // the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size. | |
85 | TotalBytesDroppedQueueFull uint64 | |
86 | ||
87 | // | |
88 | // Those are produced by the 'aggregator' | |
89 | // | |
90 | ||
91 | // AggregationNbContext is the total number of contexts flushed by the aggregator when either | |
92 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
93 | AggregationNbContext uint64 | |
94 | // AggregationNbContextGauge is the total number of contexts for gauges flushed by the aggregator when either | |
95 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
96 | AggregationNbContextGauge uint64 | |
97 | // AggregationNbContextCount is the total number of contexts for counts flushed by the aggregator when either | |
98 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
99 | AggregationNbContextCount uint64 | |
100 | // AggregationNbContextSet is the total number of contexts for sets flushed by the aggregator when either | |
101 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
102 | AggregationNbContextSet uint64 | |
103 | // AggregationNbContextHistogram is the total number of contexts for histograms flushed by the aggregator when either | |
104 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
105 | AggregationNbContextHistogram uint64 | |
106 | // AggregationNbContextDistribution is the total number of contexts for distributions flushed by the aggregator when either | |
107 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
108 | AggregationNbContextDistribution uint64 | |
109 | // AggregationNbContextTiming is the total number of contexts for timings flushed by the aggregator when either | |
110 | // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled. | |
111 | AggregationNbContextTiming uint64 | |
112 | } | |
113 | ||
114 | type telemetryClient struct { | |
115 | c *Client | |
116 | tags []string | |
117 | aggEnabled bool // is aggregation enabled and should we sent aggregation telemetry. | |
118 | tagsByType map[metricType][]string | |
119 | sender *sender | |
120 | worker *worker | |
121 | lastSample Telemetry // The previous sample of telemetry sent | |
122 | } | |
123 | ||
124 | func newTelemetryClient(c *Client, transport string, aggregationEnabled bool) *telemetryClient { | |
125 | t := &telemetryClient{ | |
126 | c: c, | |
127 | tags: append(c.tags, clientTelemetryTag, clientVersionTelemetryTag, "client_transport:"+transport), | |
128 | aggEnabled: aggregationEnabled, | |
129 | tagsByType: map[metricType][]string{}, | |
130 | } | |
131 | ||
132 | t.tagsByType[gauge] = append(append([]string{}, t.tags...), "metrics_type:gauge") | |
133 | t.tagsByType[count] = append(append([]string{}, t.tags...), "metrics_type:count") | |
134 | t.tagsByType[set] = append(append([]string{}, t.tags...), "metrics_type:set") | |
135 | t.tagsByType[timing] = append(append([]string{}, t.tags...), "metrics_type:timing") | |
136 | t.tagsByType[histogram] = append(append([]string{}, t.tags...), "metrics_type:histogram") | |
137 | t.tagsByType[distribution] = append(append([]string{}, t.tags...), "metrics_type:distribution") | |
138 | return t | |
139 | } | |
140 | ||
141 | func newTelemetryClientWithCustomAddr(c *Client, transport string, telemetryAddr string, aggregationEnabled bool, pool *bufferPool, writeTimeout time.Duration) (*telemetryClient, error) { | |
142 | telemetryWriter, _, err := createWriter(telemetryAddr, writeTimeout) | |
143 | if err != nil { | |
144 | return nil, fmt.Errorf("Could not resolve telemetry address: %v", err) | |
145 | } | |
146 | ||
147 | t := newTelemetryClient(c, transport, aggregationEnabled) | |
148 | ||
149 | // Creating a custom sender/worker with 1 worker in mutex mode for the | |
150 | // telemetry that share the same bufferPool. | |
151 | // FIXME due to performance pitfall, we're always using UDP defaults | |
152 | // even for UDS. | |
153 | t.sender = newSender(telemetryWriter, DefaultUDPBufferPoolSize, pool) | |
154 | t.worker = newWorker(pool, t.sender) | |
155 | return t, nil | |
156 | } | |
157 | ||
158 | func (t *telemetryClient) run(wg *sync.WaitGroup, stop chan struct{}) { | |
159 | wg.Add(1) | |
160 | go func() { | |
161 | defer wg.Done() | |
162 | ticker := time.NewTicker(telemetryInterval) | |
163 | for { | |
164 | select { | |
165 | case <-ticker.C: | |
166 | t.sendTelemetry() | |
167 | case <-stop: | |
168 | ticker.Stop() | |
169 | if t.sender != nil { | |
170 | t.sender.close() | |
171 | } | |
172 | return | |
173 | } | |
174 | } | |
175 | }() | |
176 | } | |
177 | ||
178 | func (t *telemetryClient) sendTelemetry() { | |
179 | for _, m := range t.flush() { | |
180 | if t.worker != nil { | |
181 | t.worker.processMetric(m) | |
182 | } else { | |
183 | t.c.send(m) | |
184 | } | |
185 | } | |
186 | ||
187 | if t.worker != nil { | |
188 | t.worker.flush() | |
189 | } | |
190 | } | |
191 | ||
192 | func (t *telemetryClient) getTelemetry() Telemetry { | |
193 | if t == nil { | |
194 | // telemetry was disabled through the WithoutTelemetry option | |
195 | return Telemetry{} | |
196 | } | |
197 | ||
198 | tlm := Telemetry{} | |
199 | t.c.flushTelemetryMetrics(&tlm) | |
200 | t.c.sender.flushTelemetryMetrics(&tlm) | |
201 | t.c.agg.flushTelemetryMetrics(&tlm) | |
202 | ||
203 | tlm.TotalMetrics = tlm.TotalMetricsGauge + | |
204 | tlm.TotalMetricsCount + | |
205 | tlm.TotalMetricsSet + | |
206 | tlm.TotalMetricsHistogram + | |
207 | tlm.TotalMetricsDistribution + | |
208 | tlm.TotalMetricsTiming | |
209 | ||
210 | tlm.TotalPayloadsDropped = tlm.TotalPayloadsDroppedQueueFull + tlm.TotalPayloadsDroppedWriter | |
211 | tlm.TotalBytesDropped = tlm.TotalBytesDroppedQueueFull + tlm.TotalBytesDroppedWriter | |
212 | ||
213 | if t.aggEnabled { | |
214 | tlm.AggregationNbContext = tlm.AggregationNbContextGauge + | |
215 | tlm.AggregationNbContextCount + | |
216 | tlm.AggregationNbContextSet + | |
217 | tlm.AggregationNbContextHistogram + | |
218 | tlm.AggregationNbContextDistribution + | |
219 | tlm.AggregationNbContextTiming | |
220 | } | |
221 | return tlm | |
222 | } | |
223 | ||
224 | // flushTelemetry returns Telemetry metrics to be flushed. It's its own function to ease testing. | |
225 | func (t *telemetryClient) flush() []metric { | |
226 | m := []metric{} | |
227 | ||
228 | // same as Count but without global namespace | |
229 | telemetryCount := func(name string, value int64, tags []string) { | |
230 | m = append(m, metric{metricType: count, name: name, ivalue: value, tags: tags, rate: 1}) | |
231 | } | |
232 | ||
233 | tlm := t.getTelemetry() | |
234 | ||
235 | // We send the diff between now and the previous telemetry flush. This keep the same telemetry behavior from V4 | |
236 | // so users dashboard's aren't broken when upgrading to V5. It also allow to graph on the same dashboard a mix | |
237 | // of V4 and V5 apps. | |
238 | telemetryCount("datadog.dogstatsd.client.metrics", int64(tlm.TotalMetrics-t.lastSample.TotalMetrics), t.tags) | |
239 | telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsGauge-t.lastSample.TotalMetricsGauge), t.tagsByType[gauge]) | |
240 | telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsCount-t.lastSample.TotalMetricsCount), t.tagsByType[count]) | |
241 | telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsHistogram-t.lastSample.TotalMetricsHistogram), t.tagsByType[histogram]) | |
242 | telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsDistribution-t.lastSample.TotalMetricsDistribution), t.tagsByType[distribution]) | |
243 | telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsSet-t.lastSample.TotalMetricsSet), t.tagsByType[set]) | |
244 | telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsTiming-t.lastSample.TotalMetricsTiming), t.tagsByType[timing]) | |
245 | telemetryCount("datadog.dogstatsd.client.events", int64(tlm.TotalEvents-t.lastSample.TotalEvents), t.tags) | |
246 | telemetryCount("datadog.dogstatsd.client.service_checks", int64(tlm.TotalServiceChecks-t.lastSample.TotalServiceChecks), t.tags) | |
247 | ||
248 | telemetryCount("datadog.dogstatsd.client.metric_dropped_on_receive", int64(tlm.TotalDroppedOnReceive-t.lastSample.TotalDroppedOnReceive), t.tags) | |
249 | ||
250 | telemetryCount("datadog.dogstatsd.client.packets_sent", int64(tlm.TotalPayloadsSent-t.lastSample.TotalPayloadsSent), t.tags) | |
251 | telemetryCount("datadog.dogstatsd.client.packets_dropped", int64(tlm.TotalPayloadsDropped-t.lastSample.TotalPayloadsDropped), t.tags) | |
252 | telemetryCount("datadog.dogstatsd.client.packets_dropped_queue", int64(tlm.TotalPayloadsDroppedQueueFull-t.lastSample.TotalPayloadsDroppedQueueFull), t.tags) | |
253 | telemetryCount("datadog.dogstatsd.client.packets_dropped_writer", int64(tlm.TotalPayloadsDroppedWriter-t.lastSample.TotalPayloadsDroppedWriter), t.tags) | |
254 | ||
255 | telemetryCount("datadog.dogstatsd.client.bytes_dropped", int64(tlm.TotalBytesDropped-t.lastSample.TotalBytesDropped), t.tags) | |
256 | telemetryCount("datadog.dogstatsd.client.bytes_sent", int64(tlm.TotalBytesSent-t.lastSample.TotalBytesSent), t.tags) | |
257 | telemetryCount("datadog.dogstatsd.client.bytes_dropped_queue", int64(tlm.TotalBytesDroppedQueueFull-t.lastSample.TotalBytesDroppedQueueFull), t.tags) | |
258 | telemetryCount("datadog.dogstatsd.client.bytes_dropped_writer", int64(tlm.TotalBytesDroppedWriter-t.lastSample.TotalBytesDroppedWriter), t.tags) | |
259 | ||
260 | if t.aggEnabled { | |
261 | telemetryCount("datadog.dogstatsd.client.aggregated_context", int64(tlm.AggregationNbContext-t.lastSample.AggregationNbContext), t.tags) | |
262 | telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextGauge-t.lastSample.AggregationNbContextGauge), t.tagsByType[gauge]) | |
263 | telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextSet-t.lastSample.AggregationNbContextSet), t.tagsByType[set]) | |
264 | telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextCount-t.lastSample.AggregationNbContextCount), t.tagsByType[count]) | |
265 | telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextHistogram-t.lastSample.AggregationNbContextHistogram), t.tagsByType[histogram]) | |
266 | telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextDistribution-t.lastSample.AggregationNbContextDistribution), t.tagsByType[distribution]) | |
267 | telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextTiming-t.lastSample.AggregationNbContextTiming), t.tagsByType[timing]) | |
268 | } | |
269 | ||
270 | t.lastSample = tlm | |
271 | ||
272 | return m | |
273 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "io" | |
5 | "net" | |
6 | "sort" | |
7 | "strings" | |
8 | "testing" | |
9 | "time" | |
10 | ||
11 | "github.com/stretchr/testify/assert" | |
12 | "github.com/stretchr/testify/require" | |
13 | ) | |
14 | ||
15 | // | |
16 | // Most of the behavior of the telemetry is tested in the end_to_end_test.go file | |
17 | // | |
18 | ||
19 | func TestTelemetryCustomAddr(t *testing.T) { | |
20 | telAddr := "localhost:8764" | |
21 | ts, client := newClientAndTestServer(t, | |
22 | "udp", | |
23 | "localhost:8765", | |
24 | nil, | |
25 | WithTelemetryAddr(telAddr), | |
26 | WithNamespace("test_namespace"), | |
27 | ) | |
28 | ||
29 | udpAddr, err := net.ResolveUDPAddr("udp", telAddr) | |
30 | require.Nil(t, err, fmt.Sprintf("could not resolve udp '%s': %s", telAddr, err)) | |
31 | server, err := net.ListenUDP("udp", udpAddr) | |
32 | require.Nil(t, err, fmt.Sprintf("could not listen to UDP addr: %s", err)) | |
33 | defer server.Close() | |
34 | ||
35 | expectedResult := []string{ | |
36 | "datadog.dogstatsd.client.metrics:9|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
37 | "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:gauge", | |
38 | "datadog.dogstatsd.client.metrics_by_type:3|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:count", | |
39 | "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:histogram", | |
40 | "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:distribution", | |
41 | "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:set", | |
42 | "datadog.dogstatsd.client.metrics_by_type:2|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:timing", | |
43 | "datadog.dogstatsd.client.events:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
44 | "datadog.dogstatsd.client.service_checks:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
45 | "datadog.dogstatsd.client.metric_dropped_on_receive:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
46 | "datadog.dogstatsd.client.packets_sent:10|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
47 | "datadog.dogstatsd.client.bytes_sent:473|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
48 | "datadog.dogstatsd.client.packets_dropped:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
49 | "datadog.dogstatsd.client.bytes_dropped:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
50 | "datadog.dogstatsd.client.packets_dropped_queue:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
51 | "datadog.dogstatsd.client.bytes_dropped_queue:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
52 | "datadog.dogstatsd.client.packets_dropped_writer:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
53 | "datadog.dogstatsd.client.bytes_dropped_writer:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
54 | "datadog.dogstatsd.client.aggregated_context:5|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp", | |
55 | "datadog.dogstatsd.client.aggregated_context_by_type:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:distribution", | |
56 | "datadog.dogstatsd.client.aggregated_context_by_type:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:histogram", | |
57 | "datadog.dogstatsd.client.aggregated_context_by_type:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:timing", | |
58 | "datadog.dogstatsd.client.aggregated_context_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:gauge", | |
59 | "datadog.dogstatsd.client.aggregated_context_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:set", | |
60 | "datadog.dogstatsd.client.aggregated_context_by_type:3|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:count", | |
61 | } | |
62 | expectedSize := 0 | |
63 | for _, s := range expectedResult { | |
64 | expectedSize += len(s) | |
65 | } | |
66 | sort.Strings(expectedResult) | |
67 | ||
68 | readDone := make(chan struct{}) | |
69 | buffer := make([]byte, 10000) | |
70 | n := 0 | |
71 | go func() { | |
72 | n, _ = io.ReadAtLeast(server, buffer, expectedSize) | |
73 | close(readDone) | |
74 | }() | |
75 | ||
76 | ts.sendAllType(client) | |
77 | client.Flush() | |
78 | client.telemetryClient.sendTelemetry() | |
79 | ||
80 | select { | |
81 | case <-readDone: | |
82 | case <-time.After(2 * time.Second): | |
83 | require.Fail(t, "No data was flush on Close") | |
84 | } | |
85 | ||
86 | result := []string{} | |
87 | for _, s := range strings.Split(string(buffer[:n]), "\n") { | |
88 | if s != "" { | |
89 | result = append(result, s) | |
90 | } | |
91 | } | |
92 | sort.Strings(result) | |
93 | ||
94 | assert.Equal(t, expectedResult, result) | |
95 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "io" | |
5 | "net" | |
6 | "os" | |
7 | "sort" | |
8 | "strings" | |
9 | "sync" | |
10 | "testing" | |
11 | "time" | |
12 | ||
13 | "github.com/stretchr/testify/assert" | |
14 | "github.com/stretchr/testify/require" | |
15 | ) | |
16 | ||
17 | type testTelemetryData struct { | |
18 | gauge int | |
19 | count int | |
20 | histogram int | |
21 | distribution int | |
22 | set int | |
23 | timing int | |
24 | event int | |
25 | service_check int | |
26 | ||
27 | aggregated_context int | |
28 | aggregated_gauge int | |
29 | aggregated_set int | |
30 | aggregated_count int | |
31 | aggregated_histogram int | |
32 | aggregated_distribution int | |
33 | aggregated_timing int | |
34 | ||
35 | metric_dropped_on_receive int | |
36 | packets_sent int | |
37 | packets_dropped int | |
38 | packets_dropped_queue int | |
39 | packets_dropped_writer int | |
40 | bytes_sent int | |
41 | bytes_dropped int | |
42 | bytes_dropped_queue int | |
43 | bytes_dropped_writer int | |
44 | } | |
45 | ||
46 | // testServer acts as a fake server and keep track of what was sent to a client. This allows end-to-end testing of the | |
47 | // dogstatsd client | |
48 | type testServer struct { | |
49 | sync.Mutex | |
50 | ||
51 | conn io.ReadCloser | |
52 | data []string | |
53 | errors []string | |
54 | readData []string | |
55 | proto string | |
56 | addr string | |
57 | stopped chan struct{} | |
58 | tags string | |
59 | namespace string | |
60 | containerID string | |
61 | ||
62 | aggregation bool | |
63 | extendedAggregation bool | |
64 | telemetry testTelemetryData | |
65 | telemetryEnabled bool | |
66 | } | |
67 | ||
68 | func newClientAndTestServer(t *testing.T, proto string, addr string, tags []string, options ...Option) (*testServer, *Client) { | |
69 | ||
70 | opt, err := resolveOptions(options) | |
71 | require.NoError(t, err) | |
72 | ||
73 | ts := &testServer{ | |
74 | proto: proto, | |
75 | data: []string{}, | |
76 | addr: addr, | |
77 | stopped: make(chan struct{}), | |
78 | aggregation: opt.aggregation, | |
79 | extendedAggregation: opt.extendedAggregation, | |
80 | telemetryEnabled: opt.telemetry, | |
81 | telemetry: testTelemetryData{}, | |
82 | namespace: opt.namespace, | |
83 | } | |
84 | ||
85 | if tags != nil { | |
86 | ts.tags = strings.Join(tags, ",") | |
87 | } | |
88 | ||
89 | switch proto { | |
90 | case "udp": | |
91 | udpAddr, err := net.ResolveUDPAddr("udp", addr) | |
92 | require.NoError(t, err) | |
93 | ||
94 | conn, err := net.ListenUDP("udp", udpAddr) | |
95 | require.NoError(t, err) | |
96 | ts.conn = conn | |
97 | case "uds": | |
98 | socketPath := addr[7:] | |
99 | address, err := net.ResolveUnixAddr("unixgram", socketPath) | |
100 | require.NoError(t, err) | |
101 | conn, err := net.ListenUnixgram("unixgram", address) | |
102 | require.NoError(t, err) | |
103 | err = os.Chmod(socketPath, 0722) | |
104 | require.NoError(t, err) | |
105 | ts.conn = conn | |
106 | default: | |
107 | require.FailNow(t, "unknown proto '%s'", proto) | |
108 | } | |
109 | ||
110 | client, err := New(addr, options...) | |
111 | require.NoError(t, err) | |
112 | ||
113 | ts.containerID = getContainerID() | |
114 | ||
115 | go ts.start() | |
116 | return ts, client | |
117 | } | |
118 | ||
119 | func (ts *testServer) start() { | |
120 | buffer := make([]byte, 2048) | |
121 | for { | |
122 | n, err := ts.conn.Read(buffer) | |
123 | if err != nil { | |
124 | // connection has been closed | |
125 | if strings.HasSuffix(err.Error(), " use of closed network connection") { | |
126 | return | |
127 | } | |
128 | ts.errors = append(ts.errors, err.Error()) | |
129 | continue | |
130 | } | |
131 | readData := string(buffer[:n]) | |
132 | if n != 0 { | |
133 | ts.readData = append(ts.readData, readData) | |
134 | } | |
135 | ||
136 | payload := strings.Split(readData, "\n") | |
137 | ts.Lock() | |
138 | for _, s := range payload { | |
139 | if s != "" { | |
140 | ts.data = append(ts.data, s) | |
141 | } | |
142 | } | |
143 | ts.Unlock() | |
144 | } | |
145 | } | |
146 | ||
147 | func (ts *testServer) assertMetric(t *testing.T, received []string, expected []string) { | |
148 | sort.Strings(expected) | |
149 | sort.Strings(received) | |
150 | ||
151 | assert.Equal(t, len(expected), len(received), fmt.Sprintf("expected %d metrics but got actual %d", len(expected), len(received))) | |
152 | ||
153 | if os.Getenv("PRINT_METRICS") != "" && len(expected) != len(received) { | |
154 | fmt.Printf("received:\n") | |
155 | for _, m := range received { | |
156 | fmt.Printf(" %s\n", m) | |
157 | } | |
158 | ||
159 | fmt.Printf("\nexpected:\n") | |
160 | for _, m := range expected { | |
161 | fmt.Printf(" %s\n", m) | |
162 | } | |
163 | } | |
164 | ||
165 | min := len(received) | |
166 | if len(expected) < min { | |
167 | min = len(expected) | |
168 | } | |
169 | ||
170 | for idx := 0; idx < min; idx++ { | |
171 | if strings.HasPrefix(expected[idx], "datadog.dogstatsd.client.bytes_sent") { | |
172 | continue | |
173 | } | |
174 | if strings.HasPrefix(expected[idx], "datadog.dogstatsd.client.packets_sent") { | |
175 | continue | |
176 | } | |
177 | assert.Equal(t, expected[idx], received[idx]) | |
178 | } | |
179 | } | |
180 | ||
181 | func (ts *testServer) stop() { | |
182 | ts.conn.Close() | |
183 | close(ts.stopped) | |
184 | } | |
185 | ||
186 | func (ts *testServer) wait(t *testing.T, nbExpectedMetric int, timeout int, waitForTelemetry bool) { | |
187 | start := time.Now() | |
188 | for { | |
189 | ts.Lock() | |
190 | if nbExpectedMetric <= len(ts.data) { | |
191 | ts.Unlock() | |
192 | return | |
193 | } else if time.Now().Sub(start) > time.Duration(timeout)*time.Second { | |
194 | ts.Unlock() | |
195 | require.FailNowf(t, "timeout while waiting for metrics", "%d metrics expected but only %d were received after %s\n", nbExpectedMetric, len(ts.data), time.Now().Sub(start)) | |
196 | return | |
197 | } | |
198 | ts.Unlock() | |
199 | time.Sleep(100 * time.Millisecond) | |
200 | } | |
201 | } | |
202 | ||
203 | func (ts *testServer) assertNbRead(t *testing.T, expectedNbRead int) { | |
204 | errorMsg := "" | |
205 | for idx, s := range ts.readData { | |
206 | errorMsg += fmt.Sprintf("read %d:\n%s\n\n", idx, s) | |
207 | } | |
208 | assert.Equal(t, expectedNbRead, len(ts.readData), "expected %d read but got %d:\n%s", expectedNbRead, len(ts.readData), errorMsg) | |
209 | } | |
210 | ||
211 | // meta helper: take a list of expected metrics and assert | |
212 | func (ts *testServer) assert(t *testing.T, client *Client, expectedMetrics []string) { | |
213 | // First wait for all the metrics to be sent. This is important when using channel mode + aggregation as we | |
214 | // don't know when all the metrics will be fully aggregated | |
215 | ts.wait(t, len(expectedMetrics), 5, false) | |
216 | ||
217 | if ts.telemetryEnabled { | |
218 | // Now that all the metrics have been handled we can flush the telemetry before the default interval of | |
219 | // 10s | |
220 | client.telemetryClient.sendTelemetry() | |
221 | expectedMetrics = append(expectedMetrics, ts.getTelemetry()...) | |
222 | // Wait for the telemetry to arrive | |
223 | ts.wait(t, len(expectedMetrics), 5, true) | |
224 | } | |
225 | ||
226 | client.Close() | |
227 | ts.stop() | |
228 | received := ts.getData() | |
229 | ts.assertMetric(t, received, expectedMetrics) | |
230 | assert.Empty(t, ts.errors) | |
231 | } | |
232 | ||
233 | func (ts *testServer) assertContainerID(t *testing.T, expected string) { | |
234 | assert.Equal(t, expected, ts.containerID) | |
235 | } | |
236 | ||
237 | // meta helper: most test send all types and then assert | |
238 | func (ts *testServer) sendAllAndAssert(t *testing.T, client *Client) { | |
239 | expectedMetrics := ts.sendAllType(client) | |
240 | ts.assert(t, client, expectedMetrics) | |
241 | } | |
242 | ||
243 | func (ts *testServer) getData() []string { | |
244 | ts.Lock() | |
245 | defer ts.Unlock() | |
246 | ||
247 | data := make([]string, len(ts.data)) | |
248 | copy(data, ts.data) | |
249 | return data | |
250 | } | |
251 | ||
252 | func (ts *testServer) getTelemetry() []string { | |
253 | ts.Lock() | |
254 | defer ts.Unlock() | |
255 | ||
256 | tags := ts.getFinalTelemetryTags() | |
257 | ||
258 | totalMetrics := ts.telemetry.gauge + | |
259 | ts.telemetry.count + | |
260 | ts.telemetry.histogram + | |
261 | ts.telemetry.distribution + | |
262 | ts.telemetry.set + | |
263 | ts.telemetry.timing | |
264 | ||
265 | containerID := ts.getContainerID() | |
266 | ||
267 | metrics := []string{ | |
268 | fmt.Sprintf("datadog.dogstatsd.client.metrics:%d|c%s", totalMetrics, tags) + containerID, | |
269 | fmt.Sprintf("datadog.dogstatsd.client.events:%d|c%s", ts.telemetry.event, tags) + containerID, | |
270 | fmt.Sprintf("datadog.dogstatsd.client.service_checks:%d|c%s", ts.telemetry.service_check, tags) + containerID, | |
271 | fmt.Sprintf("datadog.dogstatsd.client.metric_dropped_on_receive:%d|c%s", ts.telemetry.metric_dropped_on_receive, tags) + containerID, | |
272 | fmt.Sprintf("datadog.dogstatsd.client.packets_sent:%d|c%s", ts.telemetry.packets_sent, tags) + containerID, | |
273 | fmt.Sprintf("datadog.dogstatsd.client.packets_dropped:%d|c%s", ts.telemetry.packets_dropped, tags) + containerID, | |
274 | fmt.Sprintf("datadog.dogstatsd.client.packets_dropped_queue:%d|c%s", ts.telemetry.packets_dropped_queue, tags) + containerID, | |
275 | fmt.Sprintf("datadog.dogstatsd.client.packets_dropped_writer:%d|c%s", ts.telemetry.packets_dropped_writer, tags) + containerID, | |
276 | fmt.Sprintf("datadog.dogstatsd.client.bytes_sent:%d|c%s", ts.telemetry.bytes_sent, tags) + containerID, | |
277 | fmt.Sprintf("datadog.dogstatsd.client.bytes_dropped:%d|c%s", ts.telemetry.bytes_dropped, tags) + containerID, | |
278 | fmt.Sprintf("datadog.dogstatsd.client.bytes_dropped_queue:%d|c%s", ts.telemetry.bytes_dropped_queue, tags) + containerID, | |
279 | fmt.Sprintf("datadog.dogstatsd.client.bytes_dropped_writer:%d|c%s", ts.telemetry.bytes_dropped_writer, tags) + containerID, | |
280 | fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:gauge", ts.telemetry.gauge, tags) + containerID, | |
281 | fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:count", ts.telemetry.count, tags) + containerID, | |
282 | fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:histogram", ts.telemetry.histogram, tags) + containerID, | |
283 | fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:distribution", ts.telemetry.distribution, tags) + containerID, | |
284 | fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:set", ts.telemetry.set, tags) + containerID, | |
285 | fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:timing", ts.telemetry.timing, tags) + containerID, | |
286 | } | |
287 | ||
288 | if ts.aggregation { | |
289 | metrics = append(metrics, []string{ | |
290 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context:%d|c%s", ts.telemetry.aggregated_context, tags) + containerID, | |
291 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:gauge", ts.telemetry.aggregated_gauge, tags) + containerID, | |
292 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:count", ts.telemetry.aggregated_count, tags) + containerID, | |
293 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:set", ts.telemetry.aggregated_set, tags) + containerID, | |
294 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:distribution", ts.telemetry.aggregated_distribution, tags) + containerID, | |
295 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:histogram", ts.telemetry.aggregated_histogram, tags) + containerID, | |
296 | fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:timing", ts.telemetry.aggregated_timing, tags) + containerID, | |
297 | }...) | |
298 | } | |
299 | return metrics | |
300 | } | |
301 | ||
302 | // Default testing scenarios | |
303 | ||
304 | func (ts *testServer) getFinalTags(t ...string) string { | |
305 | if t == nil && ts.tags == "" { | |
306 | return "" | |
307 | } | |
308 | ||
309 | res := "|#" | |
310 | if ts.tags != "" { | |
311 | res += ts.tags | |
312 | } | |
313 | ||
314 | if t != nil { | |
315 | if ts.tags != "" { | |
316 | res += "," | |
317 | } | |
318 | res += strings.Join(t, ",") | |
319 | } | |
320 | return res | |
321 | } | |
322 | ||
323 | func (ts *testServer) getContainerID() string { | |
324 | if ts.containerID == "" { | |
325 | return "" | |
326 | } | |
327 | return "|c:" + ts.containerID | |
328 | } | |
329 | ||
330 | func (ts *testServer) getFinalTelemetryTags() string { | |
331 | base := "|#" | |
332 | if ts.tags != "" { | |
333 | base += ts.tags + "," | |
334 | } | |
335 | return base + strings.Join( | |
336 | []string{clientTelemetryTag, clientVersionTelemetryTag, "client_transport:" + ts.proto}, | |
337 | ",") | |
338 | } | |
339 | ||
340 | func (ts *testServer) sendAllMetrics(c *Client) []string { | |
341 | tags := []string{"custom:1", "custom:2"} | |
342 | c.Gauge("Gauge", 1, tags, 1) | |
343 | c.Count("Count", 2, tags, 1) | |
344 | c.Histogram("Histogram", 3, tags, 1) | |
345 | c.Distribution("Distribution", 4, tags, 1) | |
346 | c.Decr("Decr", tags, 1) | |
347 | c.Incr("Incr", tags, 1) | |
348 | c.Set("Set", "value", tags, 1) | |
349 | c.Timing("Timing", 5*time.Second, tags, 1) | |
350 | c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1) | |
351 | ||
352 | ts.telemetry.gauge += 1 | |
353 | ts.telemetry.histogram += 1 | |
354 | ts.telemetry.distribution += 1 | |
355 | ts.telemetry.count += 3 | |
356 | ts.telemetry.set += 1 | |
357 | ts.telemetry.timing += 2 | |
358 | ||
359 | if ts.aggregation { | |
360 | ts.telemetry.aggregated_context += 5 | |
361 | ts.telemetry.aggregated_gauge += 1 | |
362 | ts.telemetry.aggregated_count += 3 | |
363 | ts.telemetry.aggregated_set += 1 | |
364 | } | |
365 | if ts.extendedAggregation { | |
366 | ts.telemetry.aggregated_context += 4 | |
367 | ts.telemetry.aggregated_histogram += 1 | |
368 | ts.telemetry.aggregated_distribution += 1 | |
369 | ts.telemetry.aggregated_timing += 2 | |
370 | } | |
371 | ||
372 | finalTags := ts.getFinalTags(tags...) | |
373 | containerID := ts.getContainerID() | |
374 | ||
375 | return []string{ | |
376 | ts.namespace + "Gauge:1|g" + finalTags + containerID, | |
377 | ts.namespace + "Count:2|c" + finalTags + containerID, | |
378 | ts.namespace + "Histogram:3|h" + finalTags + containerID, | |
379 | ts.namespace + "Distribution:4|d" + finalTags + containerID, | |
380 | ts.namespace + "Decr:-1|c" + finalTags + containerID, | |
381 | ts.namespace + "Incr:1|c" + finalTags + containerID, | |
382 | ts.namespace + "Set:value|s" + finalTags + containerID, | |
383 | ts.namespace + "Timing:5000.000000|ms" + finalTags + containerID, | |
384 | ts.namespace + "TimeInMilliseconds:6.000000|ms" + finalTags + containerID, | |
385 | } | |
386 | } | |
387 | ||
388 | func (ts *testServer) sendAllMetricsForBasicAggregation(c *Client) []string { | |
389 | tags := []string{"custom:1", "custom:2"} | |
390 | c.Gauge("Gauge", 1, tags, 1) | |
391 | c.Gauge("Gauge", 2, tags, 1) | |
392 | c.Count("Count", 2, tags, 1) | |
393 | c.Count("Count", 2, tags, 1) | |
394 | c.Histogram("Histogram", 3, tags, 1) | |
395 | c.Distribution("Distribution", 4, tags, 1) | |
396 | c.Decr("Decr", tags, 1) | |
397 | c.Decr("Decr", tags, 1) | |
398 | c.Incr("Incr", tags, 1) | |
399 | c.Incr("Incr", tags, 1) | |
400 | c.Set("Set", "value", tags, 1) | |
401 | c.Set("Set", "value", tags, 1) | |
402 | c.Timing("Timing", 5*time.Second, tags, 1) | |
403 | c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1) | |
404 | ||
405 | ts.telemetry.gauge += 2 | |
406 | ts.telemetry.histogram += 1 | |
407 | ts.telemetry.distribution += 1 | |
408 | ts.telemetry.count += 6 | |
409 | ts.telemetry.set += 2 | |
410 | ts.telemetry.timing += 2 | |
411 | ||
412 | if ts.aggregation { | |
413 | ts.telemetry.aggregated_context += 5 | |
414 | ts.telemetry.aggregated_gauge += 1 | |
415 | ts.telemetry.aggregated_count += 3 | |
416 | ts.telemetry.aggregated_set += 1 | |
417 | } | |
418 | if ts.extendedAggregation { | |
419 | ts.telemetry.aggregated_context += 4 | |
420 | ts.telemetry.aggregated_histogram += 1 | |
421 | ts.telemetry.aggregated_distribution += 1 | |
422 | ts.telemetry.aggregated_timing += 2 | |
423 | } | |
424 | ||
425 | finalTags := ts.getFinalTags(tags...) | |
426 | containerID := ts.getContainerID() | |
427 | ||
428 | return []string{ | |
429 | ts.namespace + "Gauge:2|g" + finalTags + containerID, | |
430 | ts.namespace + "Count:4|c" + finalTags + containerID, | |
431 | ts.namespace + "Histogram:3|h" + finalTags + containerID, | |
432 | ts.namespace + "Distribution:4|d" + finalTags + containerID, | |
433 | ts.namespace + "Decr:-2|c" + finalTags + containerID, | |
434 | ts.namespace + "Incr:2|c" + finalTags + containerID, | |
435 | ts.namespace + "Set:value|s" + finalTags + containerID, | |
436 | ts.namespace + "Timing:5000.000000|ms" + finalTags + containerID, | |
437 | ts.namespace + "TimeInMilliseconds:6.000000|ms" + finalTags + containerID, | |
438 | } | |
439 | } | |
440 | ||
441 | func (ts *testServer) sendAllMetricsForExtendedAggregation(c *Client) []string { | |
442 | tags := []string{"custom:1", "custom:2"} | |
443 | c.Gauge("Gauge", 1, tags, 1) | |
444 | c.Gauge("Gauge", 2, tags, 1) | |
445 | c.Count("Count", 2, tags, 1) | |
446 | c.Count("Count", 2, tags, 1) | |
447 | c.Histogram("Histogram", 3, tags, 1) | |
448 | c.Histogram("Histogram", 3, tags, 1) | |
449 | c.Distribution("Distribution", 4, tags, 1) | |
450 | c.Distribution("Distribution", 4, tags, 1) | |
451 | c.Decr("Decr", tags, 1) | |
452 | c.Decr("Decr", tags, 1) | |
453 | c.Incr("Incr", tags, 1) | |
454 | c.Incr("Incr", tags, 1) | |
455 | c.Set("Set", "value", tags, 1) | |
456 | c.Set("Set", "value", tags, 1) | |
457 | c.Timing("Timing", 5*time.Second, tags, 1) | |
458 | c.Timing("Timing", 5*time.Second, tags, 1) | |
459 | c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1) | |
460 | c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1) | |
461 | ||
462 | ts.telemetry.gauge += 2 | |
463 | ts.telemetry.histogram += 2 | |
464 | ts.telemetry.distribution += 2 | |
465 | ts.telemetry.count += 6 | |
466 | ts.telemetry.set += 2 | |
467 | ts.telemetry.timing += 4 | |
468 | ||
469 | if ts.aggregation { | |
470 | ts.telemetry.aggregated_context += 5 | |
471 | ts.telemetry.aggregated_gauge += 1 | |
472 | ts.telemetry.aggregated_count += 3 | |
473 | ts.telemetry.aggregated_set += 1 | |
474 | } | |
475 | if ts.extendedAggregation { | |
476 | ts.telemetry.aggregated_context += 4 | |
477 | ts.telemetry.aggregated_histogram += 1 | |
478 | ts.telemetry.aggregated_distribution += 1 | |
479 | ts.telemetry.aggregated_timing += 2 | |
480 | } | |
481 | ||
482 | finalTags := ts.getFinalTags(tags...) | |
483 | containerID := ts.getContainerID() | |
484 | ||
485 | return []string{ | |
486 | ts.namespace + "Gauge:2|g" + finalTags + containerID, | |
487 | ts.namespace + "Count:4|c" + finalTags + containerID, | |
488 | ts.namespace + "Histogram:3:3|h" + finalTags + containerID, | |
489 | ts.namespace + "Distribution:4:4|d" + finalTags + containerID, | |
490 | ts.namespace + "Decr:-2|c" + finalTags + containerID, | |
491 | ts.namespace + "Incr:2|c" + finalTags + containerID, | |
492 | ts.namespace + "Set:value|s" + finalTags + containerID, | |
493 | ts.namespace + "Timing:5000.000000:5000.000000|ms" + finalTags + containerID, | |
494 | ts.namespace + "TimeInMilliseconds:6.000000:6.000000|ms" + finalTags + containerID, | |
495 | } | |
496 | } | |
497 | ||
498 | func (ts *testServer) sendAllType(c *Client) []string { | |
499 | res := ts.sendAllMetrics(c) | |
500 | c.SimpleEvent("hello", "world") | |
501 | c.SimpleServiceCheck("hello", Warn) | |
502 | ||
503 | ts.telemetry.event += 1 | |
504 | ts.telemetry.service_check += 1 | |
505 | ||
506 | finalTags := ts.getFinalTags() | |
507 | containerID := ts.getContainerID() | |
508 | ||
509 | return append( | |
510 | res, | |
511 | "_e{5,5}:hello|world"+finalTags+containerID, | |
512 | "_sc|hello|1"+finalTags+containerID, | |
513 | ) | |
514 | } | |
515 | ||
516 | func (ts *testServer) sendBasicAggregationMetrics(client *Client) []string { | |
517 | tags := []string{"custom:1", "custom:2"} | |
518 | client.Gauge("gauge", 1, tags, 1) | |
519 | client.Gauge("gauge", 21, tags, 1) | |
520 | client.Count("count", 1, tags, 1) | |
521 | client.Count("count", 3, tags, 1) | |
522 | client.Set("set", "my_id", tags, 1) | |
523 | client.Set("set", "my_id", tags, 1) | |
524 | ||
525 | finalTags := ts.getFinalTags(tags...) | |
526 | containerID := ts.getContainerID() | |
527 | return []string{ | |
528 | ts.namespace + "set:my_id|s" + finalTags + containerID, | |
529 | ts.namespace + "gauge:21|g" + finalTags + containerID, | |
530 | ts.namespace + "count:4|c" + finalTags + containerID, | |
531 | } | |
532 | } | |
533 | ||
534 | func (ts *testServer) sendExtendedBasicAggregationMetrics(client *Client) []string { | |
535 | tags := []string{"custom:1", "custom:2"} | |
536 | client.Gauge("gauge", 1, tags, 1) | |
537 | client.Count("count", 2, tags, 1) | |
538 | client.Set("set", "3_id", tags, 1) | |
539 | client.Histogram("histo", 4, tags, 1) | |
540 | client.Distribution("distro", 5, tags, 1) | |
541 | client.Timing("timing", 6*time.Second, tags, 1) | |
542 | ||
543 | finalTags := ts.getFinalTags(tags...) | |
544 | containerID := ts.getContainerID() | |
545 | return []string{ | |
546 | ts.namespace + "gauge:1|g" + finalTags + containerID, | |
547 | ts.namespace + "count:2|c" + finalTags + containerID, | |
548 | ts.namespace + "set:3_id|s" + finalTags + containerID, | |
549 | ts.namespace + "histo:4|h" + finalTags + containerID, | |
550 | ts.namespace + "distro:5|d" + finalTags + containerID, | |
551 | ts.namespace + "timing:6000.000000|ms" + finalTags + containerID, | |
552 | } | |
553 | } | |
554 | ||
555 | func patchContainerID(id string) { containerID = id } | |
556 | ||
557 | func resetContainerID() { | |
558 | containerID = "" | |
559 | initOnce = sync.Once{} | |
560 | } |
0 | 0 | package statsd |
1 | 1 | |
2 | 2 | import ( |
3 | "errors" | |
4 | 3 | "net" |
5 | 4 | "time" |
6 | 5 | ) |
11 | 10 | } |
12 | 11 | |
13 | 12 | // New returns a pointer to a new udpWriter given an addr in the format "hostname:port". |
14 | func newUDPWriter(addr string) (*udpWriter, error) { | |
13 | func newUDPWriter(addr string, _ time.Duration) (*udpWriter, error) { | |
15 | 14 | udpAddr, err := net.ResolveUDPAddr("udp", addr) |
16 | 15 | if err != nil { |
17 | 16 | return nil, err |
24 | 23 | return writer, nil |
25 | 24 | } |
26 | 25 | |
27 | // SetWriteTimeout is not needed for UDP, returns error | |
28 | func (w *udpWriter) SetWriteTimeout(d time.Duration) error { | |
29 | return errors.New("SetWriteTimeout: not supported for UDP connections") | |
30 | } | |
31 | ||
32 | 26 | // Write data to the UDP connection with no error handling |
33 | 27 | func (w *udpWriter) Write(data []byte) (int, error) { |
34 | 28 | return w.conn.Write(data) |
0 | // +build !windows | |
1 | ||
0 | 2 | package statsd |
1 | 3 | |
2 | 4 | import ( |
3 | 5 | "net" |
6 | "sync" | |
4 | 7 | "time" |
5 | 8 | ) |
6 | ||
7 | /* | |
8 | UDSTimeout holds the default timeout for UDS socket writes, as they can get | |
9 | blocking when the receiving buffer is full. | |
10 | */ | |
11 | const defaultUDSTimeout = 1 * time.Millisecond | |
12 | 9 | |
13 | 10 | // udsWriter is an internal class wrapping around management of UDS connection |
14 | 11 | type udsWriter struct { |
18 | 15 | conn net.Conn |
19 | 16 | // write timeout |
20 | 17 | writeTimeout time.Duration |
18 | sync.RWMutex // used to lock conn / writer can replace it | |
21 | 19 | } |
22 | 20 | |
23 | // New returns a pointer to a new udsWriter given a socket file path as addr. | |
24 | func newUdsWriter(addr string) (*udsWriter, error) { | |
21 | // newUDSWriter returns a pointer to a new udsWriter given a socket file path as addr. | |
22 | func newUDSWriter(addr string, writeTimeout time.Duration) (*udsWriter, error) { | |
25 | 23 | udsAddr, err := net.ResolveUnixAddr("unixgram", addr) |
26 | 24 | if err != nil { |
27 | 25 | return nil, err |
28 | 26 | } |
29 | 27 | // Defer connection to first Write |
30 | writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout} | |
28 | writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: writeTimeout} | |
31 | 29 | return writer, nil |
32 | } | |
33 | ||
34 | // SetWriteTimeout allows the user to set a custom write timeout | |
35 | func (w *udsWriter) SetWriteTimeout(d time.Duration) error { | |
36 | w.writeTimeout = d | |
37 | return nil | |
38 | 30 | } |
39 | 31 | |
40 | 32 | // Write data to the UDS connection with write timeout and minimal error handling: |
41 | 33 | // create the connection if nil, and destroy it if the statsd server has disconnected |
42 | 34 | func (w *udsWriter) Write(data []byte) (int, error) { |
43 | // Try connecting (first packet or connection lost) | |
44 | if w.conn == nil { | |
45 | conn, err := net.Dial(w.addr.Network(), w.addr.String()) | |
46 | if err != nil { | |
47 | return 0, err | |
48 | } | |
49 | w.conn = conn | |
35 | conn, err := w.ensureConnection() | |
36 | if err != nil { | |
37 | return 0, err | |
50 | 38 | } |
51 | w.conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) | |
52 | n, e := w.conn.Write(data) | |
53 | if e != nil { | |
39 | ||
40 | conn.SetWriteDeadline(time.Now().Add(w.writeTimeout)) | |
41 | n, e := conn.Write(data) | |
42 | ||
43 | if err, isNetworkErr := e.(net.Error); err != nil && (!isNetworkErr || !err.Temporary()) { | |
54 | 44 | // Statsd server disconnected, retry connecting at next packet |
55 | w.conn = nil | |
45 | w.unsetConnection() | |
56 | 46 | return 0, e |
57 | 47 | } |
58 | 48 | return n, e |
64 | 54 | } |
65 | 55 | return nil |
66 | 56 | } |
57 | ||
58 | func (w *udsWriter) ensureConnection() (net.Conn, error) { | |
59 | // Check if we've already got a socket we can use | |
60 | w.RLock() | |
61 | currentConn := w.conn | |
62 | w.RUnlock() | |
63 | ||
64 | if currentConn != nil { | |
65 | return currentConn, nil | |
66 | } | |
67 | ||
68 | // Looks like we might need to connect - try again with write locking. | |
69 | w.Lock() | |
70 | defer w.Unlock() | |
71 | if w.conn != nil { | |
72 | return w.conn, nil | |
73 | } | |
74 | ||
75 | newConn, err := net.Dial(w.addr.Network(), w.addr.String()) | |
76 | if err != nil { | |
77 | return nil, err | |
78 | } | |
79 | w.conn = newConn | |
80 | return newConn, nil | |
81 | } | |
82 | ||
83 | func (w *udsWriter) unsetConnection() { | |
84 | w.Lock() | |
85 | defer w.Unlock() | |
86 | w.conn = nil | |
87 | } |
0 | // +build !windows | |
1 | ||
2 | package statsd | |
3 | ||
4 | import ( | |
5 | "fmt" | |
6 | "math/rand" | |
7 | "net" | |
8 | "os" | |
9 | "testing" | |
10 | "time" | |
11 | ||
12 | "github.com/stretchr/testify/assert" | |
13 | "github.com/stretchr/testify/require" | |
14 | ) | |
15 | ||
16 | func init() { | |
17 | rand.Seed(time.Now().UnixNano()) | |
18 | } | |
19 | ||
20 | func TestNewUDSWriter(t *testing.T) { | |
21 | w, err := newUDSWriter("/tmp/test.socket", 100*time.Millisecond) | |
22 | assert.NotNil(t, w) | |
23 | assert.NoError(t, err) | |
24 | } | |
25 | ||
26 | func TestUDSWrite(t *testing.T) { | |
27 | socketPath := fmt.Sprintf("/tmp/dsd_%d.socket", rand.Int()) | |
28 | defer os.Remove(socketPath) | |
29 | ||
30 | address, err := net.ResolveUnixAddr("unixgram", socketPath) | |
31 | require.NoError(t, err) | |
32 | conn, err := net.ListenUnixgram("unixgram", address) | |
33 | require.NoError(t, err) | |
34 | err = os.Chmod(socketPath, 0722) | |
35 | require.NoError(t, err) | |
36 | ||
37 | w, err := newUDSWriter(socketPath, 100*time.Millisecond) | |
38 | require.Nil(t, err) | |
39 | require.NotNil(t, w) | |
40 | ||
41 | // test 2 Write: the first one should setup the connection | |
42 | for i := 0; i < 2; i++ { | |
43 | n, err := w.Write([]byte("some data")) | |
44 | require.NoError(t, err) | |
45 | assert.Equal(t, 9, n) | |
46 | ||
47 | buffer := make([]byte, 100) | |
48 | n, err = conn.Read(buffer) | |
49 | require.NoError(t, err) | |
50 | assert.Equal(t, "some data", string(buffer[:n])) | |
51 | } | |
52 | } | |
53 | ||
54 | func TestUDSWriteUnsetConnection(t *testing.T) { | |
55 | socketPath := fmt.Sprintf("/tmp/dsd_%d.socket", rand.Int()) | |
56 | defer os.Remove(socketPath) | |
57 | ||
58 | address, err := net.ResolveUnixAddr("unixgram", socketPath) | |
59 | require.NoError(t, err) | |
60 | conn, err := net.ListenUnixgram("unixgram", address) | |
61 | require.NoError(t, err) | |
62 | err = os.Chmod(socketPath, 0722) | |
63 | require.NoError(t, err) | |
64 | ||
65 | w, err := newUDSWriter(socketPath, 100*time.Millisecond) | |
66 | require.Nil(t, err) | |
67 | require.NotNil(t, w) | |
68 | ||
69 | // test 2 Write: the first one should setup the connection | |
70 | for i := 0; i < 2; i++ { | |
71 | n, err := w.Write([]byte("some data")) | |
72 | require.NoError(t, err) | |
73 | assert.Equal(t, 9, n) | |
74 | ||
75 | buffer := make([]byte, 100) | |
76 | n, err = conn.Read(buffer) | |
77 | require.NoError(t, err) | |
78 | assert.Equal(t, "some data", string(buffer[:n])) | |
79 | ||
80 | // Unset connection for the next Read | |
81 | w.unsetConnection() | |
82 | } | |
83 | } |
0 | // +build windows | |
1 | ||
2 | package statsd | |
3 | ||
4 | import ( | |
5 | "fmt" | |
6 | "io" | |
7 | "time" | |
8 | ) | |
9 | ||
10 | // newUDSWriter is disabled on Windows as Unix sockets are not available. | |
11 | func newUDSWriter(_ string, _ time.Duration) (io.WriteCloser, error) { | |
12 | return nil, fmt.Errorf("Unix socket is not available on Windows") | |
13 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "math/rand" | |
4 | "sync" | |
5 | ) | |
6 | ||
7 | func shouldSample(rate float64, r *rand.Rand, lock *sync.Mutex) bool { | |
8 | if rate >= 1 { | |
9 | return true | |
10 | } | |
11 | // sources created by rand.NewSource() (ie. w.random) are not thread safe. | |
12 | // TODO: use defer once the lowest Go version we support is 1.14 (defer | |
13 | // has an overhead before that). | |
14 | lock.Lock() | |
15 | if r.Float64() > rate { | |
16 | lock.Unlock() | |
17 | return false | |
18 | } | |
19 | lock.Unlock() | |
20 | return true | |
21 | ||
22 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "math/rand" | |
4 | "sync" | |
5 | "time" | |
6 | ) | |
7 | ||
8 | type worker struct { | |
9 | pool *bufferPool | |
10 | buffer *statsdBuffer | |
11 | sender *sender | |
12 | random *rand.Rand | |
13 | randomLock sync.Mutex | |
14 | sync.Mutex | |
15 | ||
16 | inputMetrics chan metric | |
17 | stop chan struct{} | |
18 | } | |
19 | ||
20 | func newWorker(pool *bufferPool, sender *sender) *worker { | |
21 | // Each worker uses its own random source and random lock to prevent | |
22 | // workers in separate goroutines from contending for the lock on the | |
23 | // "math/rand" package-global random source (e.g. calls like | |
24 | // "rand.Float64()" must acquire a shared lock to get the next | |
25 | // pseudorandom number). | |
26 | // Note that calling "time.Now().UnixNano()" repeatedly quickly may return | |
27 | // very similar values. That's fine for seeding the worker-specific random | |
28 | // source because we just need an evenly distributed stream of float values. | |
29 | // Do not use this random source for cryptographic randomness. | |
30 | random := rand.New(rand.NewSource(time.Now().UnixNano())) | |
31 | return &worker{ | |
32 | pool: pool, | |
33 | sender: sender, | |
34 | buffer: pool.borrowBuffer(), | |
35 | random: random, | |
36 | stop: make(chan struct{}), | |
37 | } | |
38 | } | |
39 | ||
40 | func (w *worker) startReceivingMetric(bufferSize int) { | |
41 | w.inputMetrics = make(chan metric, bufferSize) | |
42 | go w.pullMetric() | |
43 | } | |
44 | ||
45 | func (w *worker) stopReceivingMetric() { | |
46 | w.stop <- struct{}{} | |
47 | } | |
48 | ||
49 | func (w *worker) pullMetric() { | |
50 | for { | |
51 | select { | |
52 | case m := <-w.inputMetrics: | |
53 | w.processMetric(m) | |
54 | case <-w.stop: | |
55 | return | |
56 | } | |
57 | } | |
58 | } | |
59 | ||
60 | func (w *worker) processMetric(m metric) error { | |
61 | if !shouldSample(m.rate, w.random, &w.randomLock) { | |
62 | return nil | |
63 | } | |
64 | w.Lock() | |
65 | var err error | |
66 | if err = w.writeMetricUnsafe(m); err == errBufferFull { | |
67 | w.flushUnsafe() | |
68 | err = w.writeMetricUnsafe(m) | |
69 | } | |
70 | w.Unlock() | |
71 | return err | |
72 | } | |
73 | ||
74 | func (w *worker) writeAggregatedMetricUnsafe(m metric, metricSymbol []byte, precision int) error { | |
75 | globalPos := 0 | |
76 | ||
77 | // first check how much data we can write to the buffer: | |
78 | // +3 + len(metricSymbol) because the message will include '|<metricSymbol>|#' before the tags | |
79 | // +1 for the potential line break at the start of the metric | |
80 | tagsSize := len(m.stags) + 4 + len(metricSymbol) | |
81 | for _, t := range m.globalTags { | |
82 | tagsSize += len(t) + 1 | |
83 | } | |
84 | ||
85 | for { | |
86 | pos, err := w.buffer.writeAggregated(metricSymbol, m.namespace, m.globalTags, m.name, m.fvalues[globalPos:], m.stags, tagsSize, precision) | |
87 | if err == errPartialWrite { | |
88 | // We successfully wrote part of the histogram metrics. | |
89 | // We flush the current buffer and finish the histogram | |
90 | // in a new one. | |
91 | w.flushUnsafe() | |
92 | globalPos += pos | |
93 | } else { | |
94 | return err | |
95 | } | |
96 | } | |
97 | } | |
98 | ||
99 | func (w *worker) writeMetricUnsafe(m metric) error { | |
100 | switch m.metricType { | |
101 | case gauge: | |
102 | return w.buffer.writeGauge(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) | |
103 | case count: | |
104 | return w.buffer.writeCount(m.namespace, m.globalTags, m.name, m.ivalue, m.tags, m.rate) | |
105 | case histogram: | |
106 | return w.buffer.writeHistogram(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) | |
107 | case distribution: | |
108 | return w.buffer.writeDistribution(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) | |
109 | case set: | |
110 | return w.buffer.writeSet(m.namespace, m.globalTags, m.name, m.svalue, m.tags, m.rate) | |
111 | case timing: | |
112 | return w.buffer.writeTiming(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate) | |
113 | case event: | |
114 | return w.buffer.writeEvent(m.evalue, m.globalTags) | |
115 | case serviceCheck: | |
116 | return w.buffer.writeServiceCheck(m.scvalue, m.globalTags) | |
117 | case histogramAggregated: | |
118 | return w.writeAggregatedMetricUnsafe(m, histogramSymbol, -1) | |
119 | case distributionAggregated: | |
120 | return w.writeAggregatedMetricUnsafe(m, distributionSymbol, -1) | |
121 | case timingAggregated: | |
122 | return w.writeAggregatedMetricUnsafe(m, timingSymbol, 6) | |
123 | default: | |
124 | return nil | |
125 | } | |
126 | } | |
127 | ||
128 | func (w *worker) flush() { | |
129 | w.Lock() | |
130 | w.flushUnsafe() | |
131 | w.Unlock() | |
132 | } | |
133 | ||
134 | func (w *worker) pause() { | |
135 | w.Lock() | |
136 | } | |
137 | ||
138 | func (w *worker) unpause() { | |
139 | w.Unlock() | |
140 | } | |
141 | ||
142 | // flush the current buffer. Lock must be held by caller. | |
143 | // flushed buffer written to the network asynchronously. | |
144 | func (w *worker) flushUnsafe() { | |
145 | if len(w.buffer.bytes()) > 0 { | |
146 | w.sender.send(w.buffer) | |
147 | w.buffer = w.pool.borrowBuffer() | |
148 | } | |
149 | } |
0 | package statsd | |
1 | ||
2 | import ( | |
3 | "fmt" | |
4 | "testing" | |
5 | ||
6 | "github.com/stretchr/testify/assert" | |
7 | ) | |
8 | ||
9 | func TestShouldSample(t *testing.T) { | |
10 | rates := []float64{0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99, 1.0} | |
11 | iterations := 50_000 | |
12 | ||
13 | for _, rate := range rates { | |
14 | rate := rate // Capture range variable. | |
15 | t.Run(fmt.Sprintf("Rate %0.2f", rate), func(t *testing.T) { | |
16 | t.Parallel() | |
17 | ||
18 | worker := newWorker(newBufferPool(1, 1, 1), nil) | |
19 | count := 0 | |
20 | for i := 0; i < iterations; i++ { | |
21 | if shouldSample(rate, worker.random, &worker.randomLock) { | |
22 | count++ | |
23 | } | |
24 | } | |
25 | assert.InDelta(t, rate, float64(count)/float64(iterations), 0.01) | |
26 | }) | |
27 | } | |
28 | } | |
29 | ||
30 | func BenchmarkShouldSample(b *testing.B) { | |
31 | b.RunParallel(func(pb *testing.PB) { | |
32 | worker := newWorker(newBufferPool(1, 1, 1), nil) | |
33 | for pb.Next() { | |
34 | shouldSample(0.1, worker.random, &worker.randomLock) | |
35 | } | |
36 | }) | |
37 | } | |
38 | ||
39 | func initWorker(bufferSize int) (*bufferPool, *sender, *worker) { | |
40 | pool := newBufferPool(10, bufferSize, 5) | |
41 | // manually create the sender so the sender loop is not started. All we | |
42 | // need is the queue | |
43 | s := &sender{ | |
44 | queue: make(chan *statsdBuffer, 10), | |
45 | pool: pool, | |
46 | } | |
47 | ||
48 | w := newWorker(pool, s) | |
49 | return pool, s, w | |
50 | } | |
51 | ||
52 | func testWorker(t *testing.T, m metric, expectedBuffer string) { | |
53 | _, s, w := initWorker(100) | |
54 | ||
55 | err := w.processMetric(m) | |
56 | assert.Nil(t, err) | |
57 | ||
58 | w.flush() | |
59 | data := <-s.queue | |
60 | assert.Equal(t, expectedBuffer, string(data.buffer)) | |
61 | ||
62 | } | |
63 | ||
64 | func TestWorkerGauge(t *testing.T) { | |
65 | testWorker( | |
66 | t, | |
67 | metric{ | |
68 | metricType: gauge, | |
69 | namespace: "namespace.", | |
70 | globalTags: []string{"globalTags", "globalTags2"}, | |
71 | name: "test_gauge", | |
72 | fvalue: 21, | |
73 | tags: []string{"tag1", "tag2"}, | |
74 | rate: 1, | |
75 | }, | |
76 | "namespace.test_gauge:21|g|#globalTags,globalTags2,tag1,tag2\n", | |
77 | ) | |
78 | } | |
79 | ||
80 | func TestWorkerCount(t *testing.T) { | |
81 | testWorker( | |
82 | t, | |
83 | metric{ | |
84 | metricType: count, | |
85 | namespace: "namespace.", | |
86 | globalTags: []string{"globalTags", "globalTags2"}, | |
87 | name: "test_count", | |
88 | ivalue: 21, | |
89 | tags: []string{"tag1", "tag2"}, | |
90 | rate: 1, | |
91 | }, | |
92 | "namespace.test_count:21|c|#globalTags,globalTags2,tag1,tag2\n", | |
93 | ) | |
94 | } | |
95 | ||
96 | func TestWorkerHistogram(t *testing.T) { | |
97 | testWorker( | |
98 | t, | |
99 | metric{ | |
100 | metricType: histogram, | |
101 | namespace: "namespace.", | |
102 | globalTags: []string{"globalTags", "globalTags2"}, | |
103 | name: "test_histogram", | |
104 | fvalue: 21, | |
105 | tags: []string{"tag1", "tag2"}, | |
106 | rate: 1, | |
107 | }, | |
108 | "namespace.test_histogram:21|h|#globalTags,globalTags2,tag1,tag2\n", | |
109 | ) | |
110 | } | |
111 | ||
112 | func TestWorkerDistribution(t *testing.T) { | |
113 | testWorker( | |
114 | t, | |
115 | metric{ | |
116 | metricType: distribution, | |
117 | namespace: "namespace.", | |
118 | globalTags: []string{"globalTags", "globalTags2"}, | |
119 | name: "test_distribution", | |
120 | fvalue: 21, | |
121 | tags: []string{"tag1", "tag2"}, | |
122 | rate: 1, | |
123 | }, | |
124 | "namespace.test_distribution:21|d|#globalTags,globalTags2,tag1,tag2\n", | |
125 | ) | |
126 | } | |
127 | ||
128 | func TestWorkerSet(t *testing.T) { | |
129 | testWorker( | |
130 | t, | |
131 | metric{ | |
132 | metricType: set, | |
133 | namespace: "namespace.", | |
134 | globalTags: []string{"globalTags", "globalTags2"}, | |
135 | name: "test_set", | |
136 | svalue: "value:1", | |
137 | tags: []string{"tag1", "tag2"}, | |
138 | rate: 1, | |
139 | }, | |
140 | "namespace.test_set:value:1|s|#globalTags,globalTags2,tag1,tag2\n", | |
141 | ) | |
142 | } | |
143 | ||
144 | func TestWorkerTiming(t *testing.T) { | |
145 | testWorker( | |
146 | t, | |
147 | metric{ | |
148 | metricType: timing, | |
149 | namespace: "namespace.", | |
150 | globalTags: []string{"globalTags", "globalTags2"}, | |
151 | name: "test_timing", | |
152 | fvalue: 1.2, | |
153 | tags: []string{"tag1", "tag2"}, | |
154 | rate: 1, | |
155 | }, | |
156 | "namespace.test_timing:1.200000|ms|#globalTags,globalTags2,tag1,tag2\n", | |
157 | ) | |
158 | } | |
159 | ||
160 | func TestWorkerHistogramAggregated(t *testing.T) { | |
161 | testWorker( | |
162 | t, | |
163 | metric{ | |
164 | metricType: histogramAggregated, | |
165 | namespace: "namespace.", | |
166 | globalTags: []string{"globalTags", "globalTags2"}, | |
167 | name: "test_histogram", | |
168 | fvalues: []float64{1.2}, | |
169 | stags: "tag1,tag2", | |
170 | rate: 1, | |
171 | }, | |
172 | "namespace.test_histogram:1.2|h|#globalTags,globalTags2,tag1,tag2\n", | |
173 | ) | |
174 | } | |
175 | ||
176 | func TestWorkerHistogramAggregatedMultiple(t *testing.T) { | |
177 | _, s, w := initWorker(100) | |
178 | ||
179 | m := metric{ | |
180 | metricType: histogramAggregated, | |
181 | namespace: "namespace.", | |
182 | globalTags: []string{"globalTags", "globalTags2"}, | |
183 | name: "test_histogram", | |
184 | fvalues: []float64{1.1, 2.2, 3.3, 4.4}, | |
185 | stags: "tag1,tag2", | |
186 | rate: 1, | |
187 | } | |
188 | err := w.processMetric(m) | |
189 | assert.Nil(t, err) | |
190 | ||
191 | w.flush() | |
192 | data := <-s.queue | |
193 | assert.Equal(t, "namespace.test_histogram:1.1:2.2:3.3:4.4|h|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
194 | ||
195 | // reducing buffer size so not all values fit in one packet | |
196 | _, s, w = initWorker(70) | |
197 | ||
198 | err = w.processMetric(m) | |
199 | assert.Nil(t, err) | |
200 | ||
201 | w.flush() | |
202 | data = <-s.queue | |
203 | assert.Equal(t, "namespace.test_histogram:1.1:2.2|h|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
204 | data = <-s.queue | |
205 | assert.Equal(t, "namespace.test_histogram:3.3:4.4|h|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
206 | } | |
207 | ||
208 | func TestWorkerDistributionAggregated(t *testing.T) { | |
209 | testWorker( | |
210 | t, | |
211 | metric{ | |
212 | metricType: distributionAggregated, | |
213 | namespace: "namespace.", | |
214 | globalTags: []string{"globalTags", "globalTags2"}, | |
215 | name: "test_distribution", | |
216 | fvalues: []float64{1.2}, | |
217 | stags: "tag1,tag2", | |
218 | rate: 1, | |
219 | }, | |
220 | "namespace.test_distribution:1.2|d|#globalTags,globalTags2,tag1,tag2\n", | |
221 | ) | |
222 | } | |
223 | ||
224 | func TestWorkerDistributionAggregatedMultiple(t *testing.T) { | |
225 | _, s, w := initWorker(100) | |
226 | ||
227 | m := metric{ | |
228 | metricType: distributionAggregated, | |
229 | namespace: "namespace.", | |
230 | globalTags: []string{"globalTags", "globalTags2"}, | |
231 | name: "test_distribution", | |
232 | fvalues: []float64{1.1, 2.2, 3.3, 4.4}, | |
233 | stags: "tag1,tag2", | |
234 | rate: 1, | |
235 | } | |
236 | err := w.processMetric(m) | |
237 | assert.Nil(t, err) | |
238 | ||
239 | w.flush() | |
240 | data := <-s.queue | |
241 | assert.Equal(t, "namespace.test_distribution:1.1:2.2:3.3:4.4|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
242 | ||
243 | // reducing buffer size so not all values fit in one packet | |
244 | _, s, w = initWorker(72) | |
245 | ||
246 | err = w.processMetric(m) | |
247 | assert.Nil(t, err) | |
248 | ||
249 | w.flush() | |
250 | data = <-s.queue | |
251 | assert.Equal(t, "namespace.test_distribution:1.1:2.2|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
252 | data = <-s.queue | |
253 | assert.Equal(t, "namespace.test_distribution:3.3:4.4|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
254 | } | |
255 | ||
256 | func TestWorkerMultipleDifferentDistributionAggregated(t *testing.T) { | |
257 | // first metric will fit but not the second one | |
258 | _, s, w := initWorker(160) | |
259 | ||
260 | m := metric{ | |
261 | metricType: distributionAggregated, | |
262 | namespace: "namespace.", | |
263 | globalTags: []string{"globalTags", "globalTags2"}, | |
264 | name: "test_distribution", | |
265 | fvalues: []float64{1.1, 2.2, 3.3, 4.4}, | |
266 | stags: "tag1,tag2", | |
267 | rate: 1, | |
268 | } | |
269 | err := w.processMetric(m) | |
270 | assert.Nil(t, err) | |
271 | m = metric{ | |
272 | metricType: distributionAggregated, | |
273 | namespace: "namespace.", | |
274 | globalTags: []string{"globalTags", "globalTags2"}, | |
275 | name: "test_distribution_2", | |
276 | fvalues: []float64{1.1, 2.2, 3.3, 4.4}, | |
277 | stags: "tag1,tag2", | |
278 | rate: 1, | |
279 | } | |
280 | err = w.processMetric(m) | |
281 | assert.Nil(t, err) | |
282 | ||
283 | w.flush() | |
284 | data := <-s.queue | |
285 | assert.Equal(t, "namespace.test_distribution:1.1:2.2:3.3:4.4|d|#globalTags,globalTags2,tag1,tag2\nnamespace.test_distribution_2:1.1:2.2:3.3|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
286 | data = <-s.queue | |
287 | assert.Equal(t, "namespace.test_distribution_2:4.4|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer)) | |
288 | } |