New Upstream Release - golang-github-datadog-datadog-go
Ready changes
Summary
Merged new upstream version: 5.1.1 (was: 5.1.0).
Resulting package
Built on 2022-05-28T03:44 (took 10m25s)
The resulting binary packages can be installed (if you have the apt repository enabled) by running one of:
apt install -t fresh-releases golang-github-datadog-datadog-go-dev
Lintian Result
Diff
diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 0000000..be8a710
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,75 @@
+# We use github actions to test the code on windows and linux amd64. Circleci is used for linux arm64.
+#
+version: 2.1
+
+orbs:
+ os-detect: circleci/os-detect@0.2
+
+executors:
+ linux_arm64:
+ description: "arm64"
+ machine:
+ image: ubuntu-2004:202101-01
+ resource_class: arm.medium
+
+commands:
+ install_go_linux:
+ parameters:
+ version:
+ type: string
+ steps:
+ - os-detect/init # this setup the '$SUDO' variable
+ - run:
+ name: "install Golang linux"
+ command: |
+ if command -v go >/dev/null; then
+ if go version | grep -q -F "go<< parameters.version >> "; then
+ echo "Binary already exists, skipping download."
+ exit 0
+ fi
+ echo "Error different version of Go already installed: '`go version`' when requested was '<< parameters.version >>'"
+
+ $SUDO rm -rf /usr/local/go
+ $SUDO install "--owner=${USER}" -d /usr/local/go
+ fi
+
+ echo "Installing the requested version of Go."
+
+ curl --fail --location -sS "https://dl.google.com/go/go<<parameters.version >>.linux-arm64.tar.gz" \
+ | sudo tar --no-same-owner --strip-components=1 --gunzip -x -C /usr/local/go/
+
+ echo "export PATH=$PATH:/usr/local/go/bin" >> $BASH_ENV
+ $SUDO chown -R "$(whoami):" /usr/local/go
+
+ go version
+
+ run_tests:
+ steps:
+ - checkout
+ - run: go vet ./statsd/...
+ - run: go fmt ./statsd/...
+ - run: go test -v ./statsd/...
+
+jobs:
+ # Those allow us to have the os name in the job name. 'matrix' don't add static parameters to the name in the circleci
+ # UI.
+ tests_arm64:
+ working_directory: /home/circleci/.go_workspace/src/github.com/DataDog/datadog-go
+ environment:
+ GO111MODULE: auto
+ executor: linux_arm64
+ parameters:
+ go-version:
+ type: string
+ steps:
+ - install_go_linux:
+ version: << parameters.go-version >>
+ - run_tests
+
+workflows:
+ all-tests:
+ jobs:
+ - tests_arm64:
+ matrix:
+ parameters:
+ go-version: ["1.13", "1.14", "1.15", "1.16", "1.17"]
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 0000000..e8055ac
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,7 @@
+# See https://help.github.com/articles/about-codeowners/ for syntax
+# Rules are matched bottom-to-top, so one team can own subdirectories
+# and another team can own the rest of the directory.
+
+
+# Documentation
+*.md @DataDog/baklava
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 0000000..aacce56
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,46 @@
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ master ]
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'go' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
+ # Learn more about CodeQL language support at https://git.io/codeql-language-support
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
+ # If this step fails, then you should remove it and run the build manually
+ - name: Autobuild
+ uses: github/codeql-action/autobuild@v1
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/datadog-go.yaml b/.github/workflows/datadog-go.yaml
new file mode 100644
index 0000000..8fb3e4b
--- /dev/null
+++ b/.github/workflows/datadog-go.yaml
@@ -0,0 +1,24 @@
+# We use github actions to test the code on windows and linux amd64. Circleci is used for linux arm64.
+
+name: datadog-go
+on:
+ pull_request:
+
+jobs:
+ native:
+ strategy:
+ matrix:
+ go-version: [ 1.17, 1.16, 1.15, 1.14, 1.13]
+ runs-on: [ ubuntu-latest, windows-latest, macos-latest ]
+ fail-fast: false
+ runs-on: ${{ matrix.runs-on }}
+ steps:
+ - name: Setup go
+ uses: actions/setup-go@v2
+ with:
+ go-version: ${{ matrix.go-version }}
+ - name: Checkout code
+ uses: actions/checkout@v2
+ - run: go vet ./statsd/...
+ - run: go fmt ./statsd/...
+ - run: go test -race -v ./statsd/...
diff --git a/.github/workflows/generate.yaml b/.github/workflows/generate.yaml
new file mode 100644
index 0000000..a597722
--- /dev/null
+++ b/.github/workflows/generate.yaml
@@ -0,0 +1,21 @@
+name: generate-mock
+on:
+ pull_request:
+
+jobs:
+ native:
+ strategy:
+ matrix:
+ runs-on: [ ubuntu-latest, windows-latest]
+ fail-fast: false
+ runs-on: ${{ matrix.runs-on }}
+ steps:
+ - name: Setup go
+ uses: actions/setup-go@v2
+ with:
+ go-version: 1.16
+ - name: Checkout code
+ uses: actions/checkout@v2
+ - run: go install github.com/golang/mock/mockgen
+ - run: go generate statsd/statsd.go
+ - run: git diff --exit-code HEAD
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3819313
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+*.swp
+*.swo
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 73556a1..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: go
-
-go:
- - 1.5
- - 1.6
- - 1.7
- - 1.8
- - 1.9
-
-script:
- - go test -race -v ./...
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7288d69..8dfb170 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,267 @@
## Changes
+[//]: # (comment: Don't forget to update statsd/telemetry.go:clientVersionTelemetryTag when releasing a new version)
+
+# 5.1.1 / 2022-05-05
+
+- [BUGFIX] Fix issue where tags of aggregated contexts could be modified after being sampled. See [#258][].
+
+# 5.1.0 / 2022-03-02
+
+* [FEATURE] Adding support for container origin detection. See [#250][].
+* [FEATURE] Adding `IsClosed` method the client. See [#254][], thanks [@lucassscaravelli][].
+* [FEATURE] Adding a mock for the `Client` interface to ease testing from users. See [#255][].
+* [IMPROVEMENT] Optimize `getContext` and `getContextAndTags` functions. See [#253][], thanks [@martin-sucha][].
+* [IMPROVEMENT] Export error `MessageTooLongError` to catch error when sending message that can't fit in a buffer. See
+ [#252][].
+* [BUGFIX] Add missing `GetTelemetry` from the `Client` Interface. See [#255][].
+
+# 5.0.2 / 2021-11-29
+
+* [BUGFIX] Fix Windows erroneous import. See [#242][], thanks [@programmer04][].
+
+# 5.0.1 / 2021-10-18
+
+* [BUGFIX] Fix Event.Check method: text is no longer required. See [#235][].
+
+# 5.0.0 / 2021-10-01
+
+## Breaking changes
+
+Many field/methods have been removed from the public API of the client to allow for the client internals to evolve
+more easily in the future without breaking the public API of the client.
+
+- New import path for the v5 is `github.com/DataDog/datadog-go/v5/statsd`
+- The project now use go.mod file for its dependencies.
+- `WithDevMode` option has been removed. The extended telemetry enabled by `WithDevMode` is now part of the default
+ telemetry.
+- `WithWriteTimeoutUDS` option has been renamed `WithWriteTimeout` since it also impact named pipe transport.
+- `SetWriteTimeout` method has been removed in favor of `WithWriteTimeout` option.
+- The following internal fields and methods have been removed from the public API:
+ + `WriterNameUDP`
+ + `WriterNameUDS`
+ + `WriterWindowsPipe`
+ + `TelemetryInterval`
+- Field `Client.Namespace` is now private, please use the `WithNamespace` option.
+- Field `Client.Tags` is now private, please use the `WithTags` option.
+- Method `NewBuffered` has been removed in favor of the `WithMaxMessagesPerPayload()` option.
+ Instead of `statsd.NewBuffered(add, bufferLength)` please use `statsd.New(addr, statsd.WithMaxMessagesPerPayload(bufferLength))`
+- `Encode` method for `Event` and `ServiceCheck` have been removed.
+- The `Check` method for `Event` and `ServiceCheck` now uses pointer receivers.
+- All `Options` internals are no longer part of the public API. Only the part needed by the client app is left in the
+ public API. This also improves/clarifies the `Options` documentation and usage.
+- `statsdWriter` have been removed from the API, `io.WriteCloser` can now be used instead.
+- `SenderMetrics` and `ClientMetrics` structs as well as `FlushTelemetryMetrics` method have been removed from the
+ public API in favor of the `Telemetry` struct and the `GetTelemetry` method. The client telemetry is now cummulative
+ since the start of the client instead of being reset after being sent to the Agent. See `Telemetry` struct
+ documentation for more information on what each field represents. This allows client apps to take action based on
+ the telemetry (ex: adapting sampling rate based on the number of packets dropped). The telemetry sent to the agent
+ hasn't changed so the same dashboard can be used for V4 and V5 apps.
+- Client side aggregation for Counts, Gauges and Sets is enabled by default. See `WithoutClientSideAggregation()` option
+ to disable it.
+- `WithBufferShardCount` option has been renamed `WithWorkersCount`.
+
+## Notes
+
+- [FEATURE] Adding public method `GetTelemetry` to retrieve the client internal telemetry since the start of the client.
+- [FEATURE] Client side aggregation for Counts, Gauges and Sets is enabled by default.
+ `WithExtendedClientSideAggregation()` for Timings, Histograms and Distributions is still disabled by default. Both
+ features are no longer considered BETA.
+
+# 4.8.3 / 2021-10-27
+
+* [BUGFIX] Fix `Event.Check` method: text is no longer required. See [#237][].
+
+# 4.8.2 / 2021-09-06
+
+* [BETA][BUGFIX] Fix race condition in aggregation where two sample could overwrite each other when sampled for the first time. See [#225][]
+
+# 4.8.1 / 2021-07-09
+
+* [BUGFIX] Prevent telemetry from using the client global namespace. See [#205][]
+* [BETA][BUGFIX] Fix timings having a different precision with and without extended aggregation. See [#204][]
+
+# 4.8.0 / 2021-06-14
+
+* [BETA][IMPROVEMENT] Reduce aggregation default window to 2s to reduce sampling aliasing. See [#199][]
+* [IMPROVEMENT] Automatically add a "\n" after each metric so the agent can determine if a metric is truncated. Per source EOL detection was made available in agent 7.28 with the `dogstatsd_eol_required` setting. See [#198][]
+
+# 4.7.0 / 2021-05-05
+
+* [BETA] Increase the number of workers in the aggregator when using channelMode with extended aggregation to have
+ similar performance than channelMode without aggregation. See [#195][].
+
+# 4.6.1 / 2021-04-30
+
+* [BETA BUGFIX] Fix telemetry with extended aggregation and channelMode. See [#194][].
+
+# 4.6.0 / 2021-04-16
+
+* [BETA] Support sample rate and channel mode for extended aggregation (ie: histograms, distributions and timings). See [#187][].
+
+# 4.5.1 / 2021-03-31
+
+* [BUGFIX] Fix support of UDS and named pipe for DD_AGENT_HOST environment variable. See [#192][].
+
+# 4.5.0 / 2021-03-15
+
+* [IMPROVEMENT] Increase UDS default timeout from 1ms to 100ms. See [#186][].
+* [IMPROVEMENT] Defer connection establishment to first write for Windows Named Pipe. See [#190][].
+
+# 4.4.0 / 2021-02-10
+
+* [BETA BUGFIX] Fix multi-metric aggregation when packing different metrics in the same packet. See [#181][].
+* [FEATURE] Add support for Windows Named Pipes (Windows only). See [#182][] and [#185][].
+
+# 4.3.1 / 2021-01-28
+
+* [BUGFIX] Fix race condition when using sample rate (introduce in 4.3.0). See [#179][].
+
+# 4.3.0 / 2021-01-20
+
+* [BETA] Adding client side aggregation for distributions, histograms and timings. See [#176][].
+* [IMPROVEMENT] Use a worker-specific random source to remove lock contention. See [#178][]. Thanks to [@matthewdale][].
+* [IMPROVEMENT] Update devMode telemetry naming and taging to ease graphing in Datadog. See [#175][].
+
+# 4.2.0 / 2020-11-02
+
+* [UDS] Use better payload size defaults for UDS connections. See [#171][].
+
+# 4.1.0 / 2020-10-23
+
+[BETA BUGFIX] Ignore sampling rate when client side aggregation is enabled (for Gauge, Count and Set). See [#170][].
+[FEATURE] Adding a new option `WithDevMode()`, to send more telemetry metrics to ease troubleshooting issues. See [#169][].
+
+
+# 4.0.1 / 2020-10-07
+
+### Notes
+
+* [BUGFIX] Fix incomplete manual flush of the sender when the client isn't stopped. See [#163][].
+
+# 4.0.0 / 2020-08-21
+
+### Notes
+
+* [FEATURE] Add new option `WithTelemetryAddr`, to send the telemetry data to a different endpoint. See [#157][].
+* [BUGFIX] Fix race condition in the flush mechanism of the aggregator. See [#166][]. Thanks to [@cyx][].
+
+### Breaking changes
+
+- Dropping support for EOL versions of Golang 1.11 and lower.
+
+# 3.7.2 / 2020-06-16
+
+### Notes
+
+* [BUGFIX] Fix panic on 32bits and ARM when using the telemetry. See [#156][].
+* [BETA BUGFIX] Fix typo in method name to configure the aggregation window interval. See [#154][].
+
+# 3.7.1 / 2020-05-01
+
+### Notes
+
+* [BUGFIX] Fix panic when calling CloneWithExtraOptions with a nil client. See [#148][].
+
+# 3.7.0 / 2020-04-29
+
+### Notes
+
+* [FEATURE] Add new function to clone a Client, so library can inherit and extend options from the main application. See [#147][].
+* [IMPROVEMENT] Auto append a '.' when needed to namespace. See [#145][]. Thanks to [@kamatama41][].
+* [IMPROVEMENT] Add the client global tags to the telemetry tags. See [#143][]. Thanks to [@chrisleavoy][].
+
+# 3.6.0 / 2020-04-21
+
+### Notes
+
+* [IMPROVEMENT] Reduce lock contention by sharding worker by metric name. See [#108][].
+* [FEATURE] Adding a "channel mode" to send metrics to the client, disable by default. See [#134][].
+* [BUGFIX] Fix metrics not being flushed when the client is closed. See [#144][].
+* [BETA] Adding client side aggregation for Gauge, Count and Set. See [#139][].
+
+# 3.5.0 / 2020-03-17
+
+### Notes
+
+* [IMPROVEMENT] Add support for `DD_ENV`, `DD_SERVICE`, and `DD_VERSION` to set global tags for `env`, `service` and `version`/ See [#137][]
+
+# 3.4.1 / 2020-03-10
+
+### Notes
+
+* [BUGFIX] Fix possible deadlock when closing the client. See [#135][]. Thanks to [@danp60][].
+
+# 3.4.0 / 2020-01-15
+
+### Notes
+
+* [IMPROVEMENT] Improve tags for the telemetry. See [#118][].
+* [IMPROVEMENT] Add option to disable the telemetry. See [#117][].
+* [IMPROVEMENT] Add metrics, event and service check count to the telemetry. See [#118][].
+
+# 3.3.1 / 2019-12-13
+
+### Notes
+
+* [BUGFIX] Fix Unix domain socket path extraction. See [#113][].
+* [BUGFIX] Fix an issue with custom writers leading to metric drops. See [#106][].
+* [BUGFIX] Fix an error check in uds.Write leading to unneeded re-connections. See [#115][].
+
+# 3.3.0 / 2019-12-02
+
+### Notes
+
+* [BUGFIX] Close the stop channel when closing a statsd client to avoid leaking. See [#107][].
+
+# 3.2.0 / 2019-10-28
+
+### Notes
+
+* [IMPROVEMENT] Add all `Client` public methods to the `ClientInterface` and `NoOpClient`. See [#100][]. Thanks [@skaji][].
+
+# 3.1.0 / 2019-10-24
+
+### Notes
+
+* [FEATURE] Add a noop client. See [#92][]. Thanks [@goodspark][].
+
+# 3.0.0 / 2019-10-18
+
+### Notes
+
+* [FEATURE] Add a way to configure the maximum size of a single payload (was always 1432, the optimal size for local UDP). See [#91][].
+* [IMPROVEMENT] Various performance improvements. See [#91][].
+* [OTHER] The client now pre-allocates 4MB of memory to queue up metrics. This can be controlled using the [WithBufferPoolSize](https://godoc.org/github.com/DataDog/datadog-go/statsd#WithBufferPoolSize) option.
+
+### Breaking changes
+
+- Sending a metric over UDS won't return an error if we fail to forward the datagram to the agent. We took this decision for two main reasons:
+ - This made the UDS client blocking by default which is not desirable
+ - This design was flawed if you used a buffer as only the call that actually sent the buffer would return an error
+- The `Buffered` option has been removed as the client can only be buffered. If for some reason you need to have only one dogstatsd message per payload you can still use the `WithMaxMessagesPerPayload` option set to 1.
+- The `AsyncUDS` option has been removed as the networking layer is now running in a separate Goroutine.
+
+# 2.3.0 / 2019-10-15
+
+### Notes
+
+ * [IMPROVEMENT] Use an error constant for "nil client" errors. See [#90][]. Thanks [@asf-stripe][].
+
+# 2.2.0 / 2019-04-11
+
+### Notes
+
+ * [FEATURE] UDS: non-blocking implementation. See [#81][].
+ * [FEATURE] Support configuration from standard environment variables. See [#78][].
+ * [FEATURE] Configuration at client creation. See [#82][].
+ * [IMPROVEMENT] UDS: change Mutex to RWMutex for fast already-connected path. See [#84][]. Thanks [@KJTsanaktsidis][].
+ * [IMPROVEMENT] Return error when using on nil client. See [#65][]. Thanks [@Aceeri][].
+ * [IMPROVEMENT] Reduce `Client.format` allocations. See [#53][]. Thanks [@vcabbage][].
+ * [BUGFIX] UDS: add lock to writer for concurrency safety. See [#62][].
+ * [DOCUMENTATION] Document new options, non-blocking client, etc. See [#85][].
+ * [TESTING] Adding go 1.10 and go 1.11 to CI. See [#75][]. Thanks [@thedevsaddam][].
+
# 2.1.0 / 2018-03-30
### Notes
@@ -72,7 +334,72 @@ Below, for reference, the latest improvements made in 07/2016 - 08/2016
[#46]: https://github.com/DataDog/datadog-go/issues/46
[#47]: https://github.com/DataDog/datadog-go/issues/47
[#52]: https://github.com/DataDog/datadog-go/issues/52
+[#53]: https://github.com/DataDog/datadog-go/issues/53
+[#62]: https://github.com/DataDog/datadog-go/issues/62
+[#65]: https://github.com/DataDog/datadog-go/issues/65
+[#75]: https://github.com/DataDog/datadog-go/issues/75
+[#78]: https://github.com/DataDog/datadog-go/issues/78
+[#81]: https://github.com/DataDog/datadog-go/issues/81
+[#82]: https://github.com/DataDog/datadog-go/issues/82
+[#84]: https://github.com/DataDog/datadog-go/issues/84
+[#85]: https://github.com/DataDog/datadog-go/issues/85
+[#90]: https://github.com/DataDog/datadog-go/issues/90
+[#91]: https://github.com/DataDog/datadog-go/issues/91
+[#92]: https://github.com/DataDog/datadog-go/issues/92
+[#100]: https://github.com/DataDog/datadog-go/issues/100
+[#106]: https://github.com/DataDog/datadog-go/issues/106
+[#107]: https://github.com/DataDog/datadog-go/issues/107
+[#113]: https://github.com/DataDog/datadog-go/issues/113
+[#117]: https://github.com/DataDog/datadog-go/issues/117
+[#118]: https://github.com/DataDog/datadog-go/issues/118
+[#115]: https://github.com/DataDog/datadog-go/issues/115
+[#135]: https://github.com/DataDog/datadog-go/issues/135
+[#137]: https://github.com/DataDog/datadog-go/issues/137
+[#108]: https://github.com/DataDog/datadog-go/pull/108
+[#134]: https://github.com/DataDog/datadog-go/pull/134
+[#139]: https://github.com/DataDog/datadog-go/pull/139
+[#143]: https://github.com/DataDog/datadog-go/pull/143
+[#144]: https://github.com/DataDog/datadog-go/pull/144
+[#145]: https://github.com/DataDog/datadog-go/pull/145
+[#147]: https://github.com/DataDog/datadog-go/pull/147
+[#148]: https://github.com/DataDog/datadog-go/pull/148
+[#154]: https://github.com/DataDog/datadog-go/pull/154
+[#156]: https://github.com/DataDog/datadog-go/pull/156
+[#157]: https://github.com/DataDog/datadog-go/pull/157
+[#163]: https://github.com/DataDog/datadog-go/pull/163
+[#169]: https://github.com/DataDog/datadog-go/pull/169
+[#170]: https://github.com/DataDog/datadog-go/pull/170
+[#171]: https://github.com/DataDog/datadog-go/pull/171
+[#175]: https://github.com/DataDog/datadog-go/pull/175
+[#176]: https://github.com/DataDog/datadog-go/pull/176
+[#178]: https://github.com/DataDog/datadog-go/pull/178
+[#179]: https://github.com/DataDog/datadog-go/pull/179
+[#181]: https://github.com/DataDog/datadog-go/pull/181
+[#182]: https://github.com/DataDog/datadog-go/pull/182
+[#185]: https://github.com/DataDog/datadog-go/pull/185
+[#186]: https://github.com/DataDog/datadog-go/pull/186
+[#187]: https://github.com/DataDog/datadog-go/pull/187
+[#190]: https://github.com/DataDog/datadog-go/pull/190
+[#192]: https://github.com/DataDog/datadog-go/pull/192
+[#194]: https://github.com/DataDog/datadog-go/pull/194
+[#195]: https://github.com/DataDog/datadog-go/pull/195
+[#198]: https://github.com/DataDog/datadog-go/pull/198
+[#199]: https://github.com/DataDog/datadog-go/pull/199
+[#204]: https://github.com/DataDog/datadog-go/pull/204
+[#205]: https://github.com/DataDog/datadog-go/pull/205
+[#225]: https://github.com/DataDog/datadog-go/pull/225
+[#235]: https://github.com/DataDog/datadog-go/pull/235
+[#237]: https://github.com/DataDog/datadog-go/pull/237
+[#242]: https://github.com/DataDog/datadog-go/pull/242
+[#250]: https://github.com/DataDog/datadog-go/pull/250
+[#252]: https://github.com/DataDog/datadog-go/pull/252
+[#253]: https://github.com/DataDog/datadog-go/pull/253
+[#254]: https://github.com/DataDog/datadog-go/pull/254
+[#255]: https://github.com/DataDog/datadog-go/pull/255
+[#258]: https://github.com/DataDog/datadog-go/pull/258
+[@Aceeri]: https://github.com/Aceeri
[@Jasrags]: https://github.com/Jasrags
+[@KJTsanaktsidis]: https://github.com/KJTsanaktsidis
[@abtris]: https://github.com/abtris
[@aviau]: https://github.com/aviau
[@colega]: https://github.com/colega
@@ -88,7 +415,20 @@ Below, for reference, the latest improvements made in 07/2016 - 08/2016
[@sschepens]: https://github.com/sschepens
[@tariq1890]: https://github.com/tariq1890
[@theckman]: https://github.com/theckman
+[@thedevsaddam]: https://github.com/thedevsaddam
[@thomas91310]: https://github.com/thomas91310
[@tummychow]: https://github.com/tummychow
+[@vcabbage]: https://github.com/vcabbage
[@victortrac]: https://github.com/victortrac
-[@w-vi]: https://github.com/w-vi
\ No newline at end of file
+[@w-vi]: https://github.com/w-vi
+[@asf-stripe]: https://github.com/asf-stripe
+[@goodspark]: https://github.com/goodspark
+[@skaji]: https://github.com/skaji
+[@danp60]: https://github.com/danp60
+[@kamatama41]: https://github.com/kamatama41
+[@chrisleavoy]: https://github.com/chrisleavoy
+[@cyx]: https://github.com/cyx
+[@matthewdale]: https://github.com/matthewdale
+[@programmer04]: https://github.com/programmer04
+[@martin-sucha]: https://github.com/martin-sucha
+[@lucassscaravelli]: https://github.com/lucassscaravelli
diff --git a/README.md b/README.md
index f73a462..1bc0133 100644
--- a/README.md
+++ b/README.md
@@ -1,33 +1,242 @@
-[![Build Status](https://travis-ci.org/DataDog/datadog-go.svg?branch=master)](https://travis-ci.org/DataDog/datadog-go)
-# Overview
+[![Build Status](https://circleci.com/gh/DataDog/datadog-go.svg?style=svg)](https://app.circleci.com/pipelines/github/DataDog/datadog-go)
-Packages in `datadog-go` provide Go clients for various APIs at [DataDog](http://datadoghq.com).
+# Datadog Go
-## Statsd
-
-[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/DataDog/datadog-go/statsd)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/DataDog/datadog-go/v5/statsd)
[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](http://opensource.org/licenses/MIT)
-The [statsd](https://github.com/DataDog/datadog-go/tree/master/statsd) package provides a client for
-[dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/):
+`datadog-go` is a library that provides a [DogStatsD](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go) client in Golang.
+
+Go 1.12+ is officially supported. Older versions might work but are not tested.
+
+The following documentation is available:
+
+* [GoDoc documentation for Datadog Go](http://godoc.org/github.com/DataDog/datadog-go/v5/statsd)
+* [Official Datadog DogStatsD documentation](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go).
+
+
+<!-- vim-markdown-toc GFM -->
+
+* [New major version](#new-major-version)
+* [Installation](#installation)
+ - [Supported environment variables](#supported-environment-variables)
+ - [Unix Domain Sockets Client](#unix-domain-sockets-client)
+* [Usage](#usage)
+ - [Metrics](#metrics)
+ - [Events](#events)
+ - [Service Checks](#service-checks)
+* [Client side aggregation](#client-side-aggregation)
+ - ["Basic" aggregation](#basic-aggregation)
+ - ["Extended" aggregation](#extended-aggregation)
+* [Performance / Metric drops](#performance--metric-drops)
+ - [Monitoring this client](#monitoring-this-client)
+ - [Tweaking kernel options](#tweaking-kernel-options)
+ + [Unix Domain Sockets](#unix-domain-sockets)
+ - [Maximum packets size in high-throughput scenarios](#maximum-packets-size-in-high-throughput-scenarios)
+* [Development](#development)
+* [License](#license)
+* [Credits](#credits)
+
+<!-- vim-markdown-toc -->
+
+
+## New major version
+
+The new major version `v5` is now the default. All new features will be added to this version and only bugfixes will be
+backported to `v4` (see `v4` branch).
+
+`v5` introduce a number of breaking changes compare to `v4`, see the
+[CHANGELOG](https://github.com/DataDog/datadog-go/blob/master/CHANGELOG.md#500--2021-10-01) for more information.
+
+Note that the import paths for `v5` and `v4` are different:
+- `v5`: github.com/DataDog/datadog-go/v5/statsd
+- `v4`: github.com/DataDog/datadog-go/statsd
+
+When migrating to the `v5` you will need to upgrade your imports.
+
+## Installation
+
+Get the code with:
+
+```shell
+$ go get github.com/DataDog/datadog-go/v5/statsd
+```
+
+Then create a new DogStatsD client:
+
+```go
+package main
+
+import (
+ "log"
+ "github.com/DataDog/datadog-go/v5/statsd"
+)
+
+func main() {
+ statsd, err := statsd.New("127.0.0.1:8125")
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+Find a list of all the available options for your DogStatsD Client in the [Datadog-go godoc documentation](https://godoc.org/github.com/DataDog/datadog-go/v5/statsd#Option) or in [Datadog public DogStatsD documentation](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go#client-instantiation-parameters).
+
+### Supported environment variables
+
+* If the `addr` parameter is empty, the client uses the `DD_AGENT_HOST` environment variables to build a target address.
+ Example: `DD_AGENT_HOST=127.0.0.1:8125` for UDP, `DD_AGENT_HOST=unix:///path/to/socket` for UDS and `DD_AGENT_HOST=\\.\pipe\my_windows_pipe` for Windows named pipe.
+* If the `DD_ENTITY_ID` environment variable is found, its value is injected as a global `dd.internal.entity_id` tag. The Datadog Agent uses this tag to insert container tags into the metrics.
+
+To enable origin detection and set the `DD_ENTITY_ID` environment variable, add the following lines to your application manifest:
+
+```yaml
+env:
+ - name: DD_ENTITY_ID
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.uid
+```
+
+* `DD_ENV`, `DD_SERVICE`, and `DD_VERSION` can be used by the statsd client to set `{env, service, version}` as global tags for all data emitted.
+
+### Unix Domain Sockets Client
+
+Agent v6+ accepts packets through a Unix Socket datagram connection. Details about the advantages of using UDS over UDP are available in the [DogStatsD Unix Socket documentation](https://docs.datadoghq.com/developers/dogstatsd/unix_socket/). You can use this protocol by giving a `unix:///path/to/dsd.socket` address argument to the `New` constructor.
+
+## Usage
+
+In order to use DogStatsD metrics, events, and Service Checks, the Agent must be [running and available](https://docs.datadoghq.com/developers/dogstatsd/?code-lang=go).
+
+### Metrics
+
+After the client is created, you can start sending custom metrics to Datadog. See the dedicated [Metric Submission: DogStatsD documentation](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go) to see how to submit all supported metric types to Datadog with working code examples:
+
+* [Submit a COUNT metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#count).
+* [Submit a GAUGE metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#gauge).
+* [Submit a SET metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#set)
+* [Submit a HISTOGRAM metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#histogram)
+* [Submit a DISTRIBUTION metric](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#distribution)
+
+Metric names must only contain ASCII alphanumerics, underscores, and periods. The client will not replace nor check for invalid characters.
+
+Some options are suppported when submitting metrics, like [applying a sample rate to your metrics](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#metric-submission-options) or [tagging your metrics with your custom tags](https://docs.datadoghq.com/metrics/dogstatsd_metrics_submission/?code-lang=go#metric-tagging). Find all the available functions to report metrics [in the Datadog Go client GoDoc documentation](https://godoc.org/github.com/DataDog/datadog-go/v5/statsd#Client).
+
+### Events
+
+After the client is created, you can start sending events to your Datadog Event Stream. See the dedicated [Event Submission: DogStatsD documentation](https://docs.datadoghq.com/developers/events/dogstatsd/?code-lang=go) to see how to submit an event to your Datadog Event Stream.
+
+### Service Checks
+
+After the client is created, you can start sending Service Checks to Datadog. See the dedicated [Service Check Submission: DogStatsD documentation](https://docs.datadoghq.com/developers/service_checks/dogstatsd_service_checks_submission/?code-lang=go) to see how to submit a Service Check to Datadog.
+
+## Client side aggregation
+
+Starting with version `5.0.0` (and `3.6.0` in beta), the client offers aggregation or value packing on the client side.
+
+This feature aims at reducing both the number of packets sent to the Agent and the packet drops in very high throughput
+scenarios.
+
+The aggregation window is 2s by default and can be changed through `WithAggregationInterval()` option. Note that the
+aggregation window on the Agent side is 10s for DogStatsD metrics. So for example, setting an aggregation window of 3s in
+the client will produce a spike in your dashboard every 30 second for counts metrics (as the third 10s bucket on the
+Agent will receive 4 samples from the client).
+
+Aggregation can be disabled using the `WithoutClientSideAggregation()` option.
+
+The telemetry `datadog.dogstatsd.client.metrics` is unchanged and represents the number of metrics before aggregation.
+New metrics `datadog.dogstatsd.client.aggregated_context` and `datadog.dogstatsd.client.aggregated_context_by_type` have
+been introduced. See the [Monitoring this client](#monitoring-this-client) section.
+
+### "Basic" aggregation
+
+Enabled by default, the client will aggregate `gauge`, `count` and `set`.
+
+This can be disabled with the `WithoutClientSideAggregation()` option.
+
+### "Extended" aggregation
+
+This feature is only compatible with Agent's version >=6.25.0 && <7.0.0 or Agent's versions >=7.25.0.
+
+Disabled by default, the client can also pack multiple values for `histogram`, `distribution` and `timing` in one
+message. Real aggregation is not possible for those types since the Agent also aggregates and two aggregation levels
+would change the final value sent to Datadog.
+
+When this option is enabled, the agent will buffer the metrics by combination of metric name and tags, and send them in the fewest number of messages.
+
+For example, if we sample 3 times the same metric. Instead of sending on the network:
+
+```
+my_distribution_metric:21|d|#all,my,tags
+my_distribution_metric:43.2|d|#all,my,tags
+my_distribution_metric:1657|d|#all,my,tags
+```
+
+The client will send only one message:
+
+```
+my_distribution_metric:21:43.2:1657|d|#all,my,tags
+```
+
+This will greatly reduce network usage and packet drops but will slightly increase the memory and CPU usage of the
+client. Looking at the telemetry metrics `datadog.dogstatsd.client.metrics_by_type` /
+`datadog.dogstatsd.client.aggregated_context_by_type` will show the aggregation ratio for each type. This is an
+interesting data to know how useful extended aggregation is to your app.
+
+This can be enabled with the `WithExtendedClientSideAggregation()` option.
+
+## Performance / Metric drops
+
+### Monitoring this client
+
+This client automatically injects telemetry about itself in the DogStatsD stream.
+Those metrics will not be counted as custom and will not be billed. This feature can be disabled using the `WithoutTelemetry` option.
+
+See [Telemetry documentation](https://docs.datadoghq.com/developers/dogstatsd/high_throughput/?code-lang=go#client-side-telemetry) to learn more about it.
+
+### Tweaking kernel options
+
+In very high throughput environments it is possible to improve performance by changing the values of some kernel options.
+
+#### Unix Domain Sockets
+
+- `sysctl -w net.unix.max_dgram_qlen=X` - Set datagram queue size to X (default value is usually 10).
+- `sysctl -w net.core.wmem_max=X` - Set the max size of the send buffer for all the host sockets.
+
+### Maximum packets size in high-throughput scenarios
+
+In order to have the most efficient use of this library in high-throughput scenarios,
+default values for the maximum packets size have already been set to have the best
+usage of the underlying network.
+However, if you perfectly know your network and you know that a different value for the maximum packets
+size should be used, you can set it with the option `WithMaxBytesPerPayload`. Example:
```go
-import "github.com/DataDog/datadog-go/statsd"
+package main
+
+import (
+ "log"
+ "github.com/DataDog/datadog-go/v5/statsd"
+)
func main() {
- c, err := statsd.New("127.0.0.1:8125")
+ statsd, err := statsd.New("127.0.0.1:8125", WithMaxBytesPerPayload(4096))
if err != nil {
log.Fatal(err)
}
- // prefix every metric with the app name
- c.Namespace = "flubber."
- // send the EC2 availability zone as a tag with every metric
- c.Tags = append(c.Tags, "region:us-east-1a")
- err = c.Gauge("request.duration", 1.2, nil, 1)
- // ...
}
```
+## Development
+
+Run the tests with:
+
+ $ go test
+
## License
-All code distributed under the [MIT License](http://opensource.org/licenses/MIT) unless otherwise specified.
+datadog-go is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php).
+
+## Credits
+
+Original code by [ooyala](https://github.com/ooyala/go-dogstatsd).
diff --git a/debian/changelog b/debian/changelog
index d817d6e..2226e4c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,10 @@
+golang-github-datadog-datadog-go (5.1.1-1) UNRELEASED; urgency=low
+
+ * New upstream release.
+ * New upstream release.
+
+ -- Debian Janitor <janitor@jelmer.uk> Sat, 28 May 2022 03:34:40 -0000
+
golang-github-datadog-datadog-go (2.1.0-3) unstable; urgency=medium
[ Debian Janitor ]
diff --git a/example/simple_example.go b/example/simple_example.go
new file mode 100644
index 0000000..39e4607
--- /dev/null
+++ b/example/simple_example.go
@@ -0,0 +1,19 @@
+package main
+
+import (
+ "log"
+
+ "github.com/DataDog/datadog-go/v5/statsd"
+)
+
+func main() {
+ client, err := statsd.New("127.0.0.1:8125",
+ statsd.WithTags([]string{"env:prod", "service:myservice"}),
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client.Histogram("my.metrics", 21, []string{"tag1", "tag2:value"}, 1)
+ client.Close()
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..3f8a528
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,9 @@
+module github.com/DataDog/datadog-go/v5
+
+go 1.13
+
+require (
+ github.com/Microsoft/go-winio v0.5.0
+ github.com/golang/mock v1.6.0
+ github.com/stretchr/testify v1.7.0
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..a9d41d1
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,49 @@
+github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU=
+github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/statsd/README.md b/statsd/README.md
index f68df54..2fc8996 100644
--- a/statsd/README.md
+++ b/statsd/README.md
@@ -2,63 +2,3 @@
Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags
and histograms.
-
-## Get the code
-
- $ go get github.com/DataDog/datadog-go/statsd
-
-## Usage
-
-```go
-// Create the client
-c, err := statsd.New("127.0.0.1:8125")
-if err != nil {
- log.Fatal(err)
-}
-// Prefix every metric with the app name
-c.Namespace = "flubber."
-// Send the EC2 availability zone as a tag with every metric
-c.Tags = append(c.Tags, "us-east-1a")
-
-// Do some metrics!
-err = c.Gauge("request.queue_depth", 12, nil, 1)
-err = c.Timing("request.duration", duration, nil, 1) // Uses a time.Duration!
-err = c.TimeInMilliseconds("request", 12, nil, 1)
-err = c.Incr("request.count_total", nil, 1)
-err = c.Decr("request.count_total", nil, 1)
-err = c.Count("request.count_total", 2, nil, 1)
-```
-
-## Buffering Client
-
-DogStatsD accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec.
-
-## Unix Domain Sockets Client
-
-DogStatsD version 6 accepts packets through a Unix Socket datagram connection. You can use this protocol by giving a
-`unix:///path/to/dsd.socket` addr argument to the `New` or `NewBufferingClient`.
-
-With this protocol, writes can become blocking if the server's receiving buffer is full. Our default behaviour is to
-timeout and drop the packet after 1 ms. You can set a custom timeout duration via the `SetWriteTimeout` method.
-
-The default mode is to pass write errors from the socket to the caller. This includes write errors the library will
-automatically recover from (DogStatsD server not ready yet or is restarting). You can drop these errors and emulate
-the UDP behaviour by setting the `SkipErrors` property to `true`. Please note that packets will be dropped in both modes.
-
-## Development
-
-Run the tests with:
-
- $ go test
-
-## Documentation
-
-Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd
-
-## License
-
-go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php).
-
-## Credits
-
-Original code by [ooyala](https://github.com/ooyala/go-dogstatsd).
diff --git a/statsd/aggregator.go b/statsd/aggregator.go
new file mode 100644
index 0000000..65c050e
--- /dev/null
+++ b/statsd/aggregator.go
@@ -0,0 +1,289 @@
+package statsd
+
+import (
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type (
+ countsMap map[string]*countMetric
+ gaugesMap map[string]*gaugeMetric
+ setsMap map[string]*setMetric
+ bufferedMetricMap map[string]*bufferedMetric
+)
+
+type aggregator struct {
+ nbContextGauge uint64
+ nbContextCount uint64
+ nbContextSet uint64
+
+ countsM sync.RWMutex
+ gaugesM sync.RWMutex
+ setsM sync.RWMutex
+
+ gauges gaugesMap
+ counts countsMap
+ sets setsMap
+ histograms bufferedMetricContexts
+ distributions bufferedMetricContexts
+ timings bufferedMetricContexts
+
+ closed chan struct{}
+
+ client *Client
+
+ // aggregator implements channelMode mechanism to receive histograms,
+ // distributions and timings. Since they need sampling they need to
+ // lock for random. When using both channelMode and ExtendedAggregation
+ // we don't want goroutine to fight over the lock.
+ inputMetrics chan metric
+ stopChannelMode chan struct{}
+ wg sync.WaitGroup
+}
+
+func newAggregator(c *Client) *aggregator {
+ return &aggregator{
+ client: c,
+ counts: countsMap{},
+ gauges: gaugesMap{},
+ sets: setsMap{},
+ histograms: newBufferedContexts(newHistogramMetric),
+ distributions: newBufferedContexts(newDistributionMetric),
+ timings: newBufferedContexts(newTimingMetric),
+ closed: make(chan struct{}),
+ stopChannelMode: make(chan struct{}),
+ }
+}
+
+func (a *aggregator) start(flushInterval time.Duration) {
+ ticker := time.NewTicker(flushInterval)
+
+ go func() {
+ for {
+ select {
+ case <-ticker.C:
+ a.flush()
+ case <-a.closed:
+ return
+ }
+ }
+ }()
+}
+
+func (a *aggregator) startReceivingMetric(bufferSize int, nbWorkers int) {
+ a.inputMetrics = make(chan metric, bufferSize)
+ for i := 0; i < nbWorkers; i++ {
+ a.wg.Add(1)
+ go a.pullMetric()
+ }
+}
+
+func (a *aggregator) stopReceivingMetric() {
+ close(a.stopChannelMode)
+ a.wg.Wait()
+}
+
+func (a *aggregator) stop() {
+ a.closed <- struct{}{}
+}
+
+func (a *aggregator) pullMetric() {
+ for {
+ select {
+ case m := <-a.inputMetrics:
+ switch m.metricType {
+ case histogram:
+ a.histogram(m.name, m.fvalue, m.tags, m.rate)
+ case distribution:
+ a.distribution(m.name, m.fvalue, m.tags, m.rate)
+ case timing:
+ a.timing(m.name, m.fvalue, m.tags, m.rate)
+ }
+ case <-a.stopChannelMode:
+ a.wg.Done()
+ return
+ }
+ }
+}
+
+func (a *aggregator) flush() {
+ for _, m := range a.flushMetrics() {
+ a.client.sendBlocking(m)
+ }
+}
+
+func (a *aggregator) flushTelemetryMetrics(t *Telemetry) {
+ if a == nil {
+ // aggregation is disabled
+ return
+ }
+
+ t.AggregationNbContextGauge = atomic.LoadUint64(&a.nbContextGauge)
+ t.AggregationNbContextCount = atomic.LoadUint64(&a.nbContextCount)
+ t.AggregationNbContextSet = atomic.LoadUint64(&a.nbContextSet)
+ t.AggregationNbContextHistogram = a.histograms.getNbContext()
+ t.AggregationNbContextDistribution = a.distributions.getNbContext()
+ t.AggregationNbContextTiming = a.timings.getNbContext()
+}
+
+func (a *aggregator) flushMetrics() []metric {
+ metrics := []metric{}
+
+ // We reset the values to avoid sending 'zero' values for metrics not
+ // sampled during this flush interval
+
+ a.setsM.Lock()
+ sets := a.sets
+ a.sets = setsMap{}
+ a.setsM.Unlock()
+
+ for _, s := range sets {
+ metrics = append(metrics, s.flushUnsafe()...)
+ }
+
+ a.gaugesM.Lock()
+ gauges := a.gauges
+ a.gauges = gaugesMap{}
+ a.gaugesM.Unlock()
+
+ for _, g := range gauges {
+ metrics = append(metrics, g.flushUnsafe())
+ }
+
+ a.countsM.Lock()
+ counts := a.counts
+ a.counts = countsMap{}
+ a.countsM.Unlock()
+
+ for _, c := range counts {
+ metrics = append(metrics, c.flushUnsafe())
+ }
+
+ metrics = a.histograms.flush(metrics)
+ metrics = a.distributions.flush(metrics)
+ metrics = a.timings.flush(metrics)
+
+ atomic.AddUint64(&a.nbContextCount, uint64(len(counts)))
+ atomic.AddUint64(&a.nbContextGauge, uint64(len(gauges)))
+ atomic.AddUint64(&a.nbContextSet, uint64(len(sets)))
+ return metrics
+}
+
+func getContext(name string, tags []string) string {
+ c, _ := getContextAndTags(name, tags)
+ return c
+}
+
+func getContextAndTags(name string, tags []string) (string, string) {
+ if len(tags) == 0 {
+ return name + nameSeparatorSymbol, ""
+ }
+ n := len(name) + len(nameSeparatorSymbol) + len(tagSeparatorSymbol)*(len(tags)-1)
+ for _, s := range tags {
+ n += len(s)
+ }
+
+ var sb strings.Builder
+ sb.Grow(n)
+ sb.WriteString(name)
+ sb.WriteString(nameSeparatorSymbol)
+ sb.WriteString(tags[0])
+ for _, s := range tags[1:] {
+ sb.WriteString(tagSeparatorSymbol)
+ sb.WriteString(s)
+ }
+
+ s := sb.String()
+
+ return s, s[len(name)+len(nameSeparatorSymbol):]
+}
+
+func (a *aggregator) count(name string, value int64, tags []string) error {
+ context := getContext(name, tags)
+ a.countsM.RLock()
+ if count, found := a.counts[context]; found {
+ count.sample(value)
+ a.countsM.RUnlock()
+ return nil
+ }
+ a.countsM.RUnlock()
+
+ a.countsM.Lock()
+ // Check if another goroutines hasn't created the value betwen the RUnlock and 'Lock'
+ if count, found := a.counts[context]; found {
+ count.sample(value)
+ a.countsM.Unlock()
+ return nil
+ }
+
+ a.counts[context] = newCountMetric(name, value, tags)
+ a.countsM.Unlock()
+ return nil
+}
+
+func (a *aggregator) gauge(name string, value float64, tags []string) error {
+ context := getContext(name, tags)
+ a.gaugesM.RLock()
+ if gauge, found := a.gauges[context]; found {
+ gauge.sample(value)
+ a.gaugesM.RUnlock()
+ return nil
+ }
+ a.gaugesM.RUnlock()
+
+ gauge := newGaugeMetric(name, value, tags)
+
+ a.gaugesM.Lock()
+ // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock'
+ if gauge, found := a.gauges[context]; found {
+ gauge.sample(value)
+ a.gaugesM.Unlock()
+ return nil
+ }
+ a.gauges[context] = gauge
+ a.gaugesM.Unlock()
+ return nil
+}
+
+func (a *aggregator) set(name string, value string, tags []string) error {
+ context := getContext(name, tags)
+ a.setsM.RLock()
+ if set, found := a.sets[context]; found {
+ set.sample(value)
+ a.setsM.RUnlock()
+ return nil
+ }
+ a.setsM.RUnlock()
+
+ a.setsM.Lock()
+ // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock'
+ if set, found := a.sets[context]; found {
+ set.sample(value)
+ a.setsM.Unlock()
+ return nil
+ }
+ a.sets[context] = newSetMetric(name, value, tags)
+ a.setsM.Unlock()
+ return nil
+}
+
+// Only histograms, distributions and timings are sampled with a rate since we
+// only pack them in on message instead of aggregating them. Discarding the
+// sample rate will have impacts on the CPU and memory usage of the Agent.
+
+// type alias for Client.sendToAggregator
+type bufferedMetricSampleFunc func(name string, value float64, tags []string, rate float64) error
+
+func (a *aggregator) histogram(name string, value float64, tags []string, rate float64) error {
+ return a.histograms.sample(name, value, tags, rate)
+}
+
+func (a *aggregator) distribution(name string, value float64, tags []string, rate float64) error {
+ return a.distributions.sample(name, value, tags, rate)
+}
+
+func (a *aggregator) timing(name string, value float64, tags []string, rate float64) error {
+ return a.timings.sample(name, value, tags, rate)
+}
diff --git a/statsd/aggregator_test.go b/statsd/aggregator_test.go
new file mode 100644
index 0000000..c2e92c4
--- /dev/null
+++ b/statsd/aggregator_test.go
@@ -0,0 +1,284 @@
+package statsd
+
+import (
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAggregatorSample(t *testing.T) {
+ a := newAggregator(nil)
+
+ tags := []string{"tag1", "tag2"}
+
+ for i := 0; i < 2; i++ {
+ a.gauge("gaugeTest", 21, tags)
+ assert.Len(t, a.gauges, 1)
+ assert.Contains(t, a.gauges, "gaugeTest:tag1,tag2")
+
+ a.count("countTest", 21, tags)
+ assert.Len(t, a.counts, 1)
+ assert.Contains(t, a.counts, "countTest:tag1,tag2")
+
+ a.set("setTest", "value1", tags)
+ assert.Len(t, a.sets, 1)
+ assert.Contains(t, a.sets, "setTest:tag1,tag2")
+
+ a.set("setTest", "value1", tags)
+ assert.Len(t, a.sets, 1)
+ assert.Contains(t, a.sets, "setTest:tag1,tag2")
+
+ a.histogram("histogramTest", 21, tags, 1)
+ assert.Len(t, a.histograms.values, 1)
+ assert.Contains(t, a.histograms.values, "histogramTest:tag1,tag2")
+
+ a.distribution("distributionTest", 21, tags, 1)
+ assert.Len(t, a.distributions.values, 1)
+ assert.Contains(t, a.distributions.values, "distributionTest:tag1,tag2")
+
+ a.timing("timingTest", 21, tags, 1)
+ assert.Len(t, a.timings.values, 1)
+ assert.Contains(t, a.timings.values, "timingTest:tag1,tag2")
+ }
+}
+
+func TestAggregatorFlush(t *testing.T) {
+ a := newAggregator(nil)
+
+ tags := []string{"tag1", "tag2"}
+
+ a.gauge("gaugeTest1", 21, tags)
+ a.gauge("gaugeTest1", 10, tags)
+ a.gauge("gaugeTest2", 15, tags)
+
+ a.count("countTest1", 21, tags)
+ a.count("countTest1", 10, tags)
+ a.count("countTest2", 1, tags)
+
+ a.set("setTest1", "value1", tags)
+ a.set("setTest1", "value1", tags)
+ a.set("setTest1", "value2", tags)
+ a.set("setTest2", "value1", tags)
+
+ a.histogram("histogramTest1", 21, tags, 1)
+ a.histogram("histogramTest1", 22, tags, 1)
+ a.histogram("histogramTest2", 23, tags, 1)
+
+ a.distribution("distributionTest1", 21, tags, 1)
+ a.distribution("distributionTest1", 22, tags, 1)
+ a.distribution("distributionTest2", 23, tags, 1)
+
+ a.timing("timingTest1", 21, tags, 1)
+ a.timing("timingTest1", 22, tags, 1)
+ a.timing("timingTest2", 23, tags, 1)
+
+ metrics := a.flushMetrics()
+
+ assert.Len(t, a.gauges, 0)
+ assert.Len(t, a.counts, 0)
+ assert.Len(t, a.sets, 0)
+ assert.Len(t, a.histograms.values, 0)
+ assert.Len(t, a.distributions.values, 0)
+ assert.Len(t, a.timings.values, 0)
+
+ assert.Len(t, metrics, 13)
+
+ sort.Slice(metrics, func(i, j int) bool {
+ if metrics[i].metricType == metrics[j].metricType {
+ res := strings.Compare(metrics[i].name, metrics[j].name)
+ // this happens fo set
+ if res == 0 {
+ return strings.Compare(metrics[i].svalue, metrics[j].svalue) != 1
+ }
+ return res != 1
+ }
+ return metrics[i].metricType < metrics[j].metricType
+ })
+
+ assert.Equal(t, []metric{
+ metric{
+ metricType: gauge,
+ name: "gaugeTest1",
+ tags: tags,
+ rate: 1,
+ fvalue: float64(10),
+ },
+ metric{
+ metricType: gauge,
+ name: "gaugeTest2",
+ tags: tags,
+ rate: 1,
+ fvalue: float64(15),
+ },
+ metric{
+ metricType: count,
+ name: "countTest1",
+ tags: tags,
+ rate: 1,
+ ivalue: int64(31),
+ },
+ metric{
+ metricType: count,
+ name: "countTest2",
+ tags: tags,
+ rate: 1,
+ ivalue: int64(1),
+ },
+ metric{
+ metricType: histogramAggregated,
+ name: "histogramTest1",
+ stags: strings.Join(tags, tagSeparatorSymbol),
+ rate: 1,
+ fvalues: []float64{21.0, 22.0},
+ },
+ metric{
+ metricType: histogramAggregated,
+ name: "histogramTest2",
+ stags: strings.Join(tags, tagSeparatorSymbol),
+ rate: 1,
+ fvalues: []float64{23.0},
+ },
+ metric{
+ metricType: distributionAggregated,
+ name: "distributionTest1",
+ stags: strings.Join(tags, tagSeparatorSymbol),
+ rate: 1,
+ fvalues: []float64{21.0, 22.0},
+ },
+ metric{
+ metricType: distributionAggregated,
+ name: "distributionTest2",
+ stags: strings.Join(tags, tagSeparatorSymbol),
+ rate: 1,
+ fvalues: []float64{23.0},
+ },
+ metric{
+ metricType: set,
+ name: "setTest1",
+ tags: tags,
+ rate: 1,
+ svalue: "value1",
+ },
+ metric{
+ metricType: set,
+ name: "setTest1",
+ tags: tags,
+ rate: 1,
+ svalue: "value2",
+ },
+ metric{
+ metricType: set,
+ name: "setTest2",
+ tags: tags,
+ rate: 1,
+ svalue: "value1",
+ },
+ metric{
+ metricType: timingAggregated,
+ name: "timingTest1",
+ stags: strings.Join(tags, tagSeparatorSymbol),
+ rate: 1,
+ fvalues: []float64{21.0, 22.0},
+ },
+ metric{
+ metricType: timingAggregated,
+ name: "timingTest2",
+ stags: strings.Join(tags, tagSeparatorSymbol),
+ rate: 1,
+ fvalues: []float64{23.0},
+ },
+ },
+ metrics)
+}
+
+func TestAggregatorFlushConcurrency(t *testing.T) {
+ a := newAggregator(nil)
+
+ var wg sync.WaitGroup
+ wg.Add(10)
+
+ tags := []string{"tag1", "tag2"}
+
+ for i := 0; i < 5; i++ {
+ go func() {
+ defer wg.Done()
+
+ a.gauge("gaugeTest1", 21, tags)
+ a.count("countTest1", 21, tags)
+ a.set("setTest1", "value1", tags)
+ a.histogram("histogramTest1", 21, tags, 1)
+ a.distribution("distributionTest1", 21, tags, 1)
+ a.timing("timingTest1", 21, tags, 1)
+ }()
+ }
+
+ for i := 0; i < 5; i++ {
+ go func() {
+ defer wg.Done()
+
+ a.flushMetrics()
+ }()
+ }
+
+ wg.Wait()
+}
+
+func TestAggregatorTagsCopy(t *testing.T) {
+ a := newAggregator(nil)
+ tags := []string{"tag1", "tag2"}
+
+ a.gauge("gauge", 21, tags)
+ a.count("count", 21, tags)
+ a.set("set", "test", tags)
+
+ tags[0] = "new_tags"
+
+ metrics := a.flushMetrics()
+ require.Len(t, metrics, 3)
+ for _, m := range metrics {
+ assert.Equal(t, []string{"tag1", "tag2"}, m.tags)
+ }
+}
+
+func TestGetContextAndTags(t *testing.T) {
+ tests := []struct {
+ testName string
+ name string
+ tags []string
+ wantContext string
+ wantTags string
+ }{
+ {
+ testName: "no tags",
+ name: "name",
+ tags: nil,
+ wantContext: "name:",
+ wantTags: "",
+ },
+ {
+ testName: "one tag",
+ name: "name",
+ tags: []string{"tag1"},
+ wantContext: "name:tag1",
+ wantTags: "tag1",
+ },
+ {
+ testName: "two tags",
+ name: "name",
+ tags: []string{"tag1", "tag2"},
+ wantContext: "name:tag1,tag2",
+ wantTags: "tag1,tag2",
+ },
+ }
+ for _, test := range tests {
+ t.Run(test.testName, func(t *testing.T) {
+ gotContext, gotTags := getContextAndTags(test.name, test.tags)
+ assert.Equal(t, test.wantContext, gotContext)
+ assert.Equal(t, test.wantTags, gotTags)
+ })
+ }
+}
diff --git a/statsd/benchmark_report_metric_noop_test.go b/statsd/benchmark_report_metric_noop_test.go
new file mode 100644
index 0000000..8e1cabd
--- /dev/null
+++ b/statsd/benchmark_report_metric_noop_test.go
@@ -0,0 +1,7 @@
+// +build !go1.13
+
+package statsd_test
+
+import "testing"
+
+func reportMetric(*testing.B, float64, string) {}
diff --git a/statsd/benchmark_report_metric_test.go b/statsd/benchmark_report_metric_test.go
new file mode 100644
index 0000000..dcf0f52
--- /dev/null
+++ b/statsd/benchmark_report_metric_test.go
@@ -0,0 +1,9 @@
+// +build go1.13
+
+package statsd_test
+
+import "testing"
+
+func reportMetric(b *testing.B, value float64, unit string) {
+ b.ReportMetric(value, unit)
+}
diff --git a/statsd/buffer.go b/statsd/buffer.go
new file mode 100644
index 0000000..0e4ea2b
--- /dev/null
+++ b/statsd/buffer.go
@@ -0,0 +1,195 @@
+package statsd
+
+import (
+ "strconv"
+)
+
+// MessageTooLongError is an error returned when a sample, event or service check is too large once serialized. See
+// WithMaxBytesPerPayload option for more details.
+type MessageTooLongError struct{}
+
+func (e MessageTooLongError) Error() string {
+ return "message too long. See 'WithMaxBytesPerPayload' documentation."
+}
+
+var errBufferFull = MessageTooLongError{}
+
+type partialWriteError string
+
+func (e partialWriteError) Error() string { return string(e) }
+
+const errPartialWrite = partialWriteError("value partially written")
+
+const metricOverhead = 512
+
+// statsdBuffer is a buffer containing statsd messages
+// this struct methods are NOT safe for concurent use
+type statsdBuffer struct {
+ buffer []byte
+ maxSize int
+ maxElements int
+ elementCount int
+}
+
+func newStatsdBuffer(maxSize, maxElements int) *statsdBuffer {
+ return &statsdBuffer{
+ buffer: make([]byte, 0, maxSize+metricOverhead), // pre-allocate the needed size + metricOverhead to avoid having Go re-allocate on it's own if an element does not fit
+ maxSize: maxSize,
+ maxElements: maxElements,
+ }
+}
+
+func (b *statsdBuffer) writeGauge(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendGauge(b.buffer, namespace, globalTags, name, value, tags, rate)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeCount(namespace string, globalTags []string, name string, value int64, tags []string, rate float64) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendCount(b.buffer, namespace, globalTags, name, value, tags, rate)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeHistogram(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendHistogram(b.buffer, namespace, globalTags, name, value, tags, rate)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+// writeAggregated serialized as many values as possible in the current buffer and return the position in values where it stopped.
+func (b *statsdBuffer) writeAggregated(metricSymbol []byte, namespace string, globalTags []string, name string, values []float64, tags string, tagSize int, precision int) (int, error) {
+ if b.elementCount >= b.maxElements {
+ return 0, errBufferFull
+ }
+
+ originalBuffer := b.buffer
+ b.buffer = appendHeader(b.buffer, namespace, name)
+
+ // buffer already full
+ if len(b.buffer)+tagSize > b.maxSize {
+ b.buffer = originalBuffer
+ return 0, errBufferFull
+ }
+
+ // We add as many value as possible
+ var position int
+ for idx, v := range values {
+ previousBuffer := b.buffer
+ if idx != 0 {
+ b.buffer = append(b.buffer, ':')
+ }
+
+ b.buffer = strconv.AppendFloat(b.buffer, v, 'f', precision, 64)
+
+ // Should we stop serializing and switch to another buffer
+ if len(b.buffer)+tagSize > b.maxSize {
+ b.buffer = previousBuffer
+ break
+ }
+ position = idx + 1
+ }
+
+ // we could not add a single value
+ if position == 0 {
+ b.buffer = originalBuffer
+ return 0, errBufferFull
+ }
+
+ b.buffer = append(b.buffer, '|')
+ b.buffer = append(b.buffer, metricSymbol...)
+ b.buffer = appendTagsAggregated(b.buffer, globalTags, tags)
+ b.buffer = appendContainerID(b.buffer)
+ b.writeSeparator()
+ b.elementCount++
+
+ if position != len(values) {
+ return position, errPartialWrite
+ }
+ return position, nil
+
+}
+
+func (b *statsdBuffer) writeDistribution(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendDistribution(b.buffer, namespace, globalTags, name, value, tags, rate)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeSet(namespace string, globalTags []string, name string, value string, tags []string, rate float64) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendSet(b.buffer, namespace, globalTags, name, value, tags, rate)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeTiming(namespace string, globalTags []string, name string, value float64, tags []string, rate float64) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendTiming(b.buffer, namespace, globalTags, name, value, tags, rate)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeEvent(event *Event, globalTags []string) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendEvent(b.buffer, event, globalTags)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) writeServiceCheck(serviceCheck *ServiceCheck, globalTags []string) error {
+ if b.elementCount >= b.maxElements {
+ return errBufferFull
+ }
+ originalBuffer := b.buffer
+ b.buffer = appendServiceCheck(b.buffer, serviceCheck, globalTags)
+ b.writeSeparator()
+ return b.validateNewElement(originalBuffer)
+}
+
+func (b *statsdBuffer) validateNewElement(originalBuffer []byte) error {
+ if len(b.buffer) > b.maxSize {
+ b.buffer = originalBuffer
+ return errBufferFull
+ }
+ b.elementCount++
+ return nil
+}
+
+func (b *statsdBuffer) writeSeparator() {
+ b.buffer = append(b.buffer, '\n')
+}
+
+func (b *statsdBuffer) reset() {
+ b.buffer = b.buffer[:0]
+ b.elementCount = 0
+}
+
+func (b *statsdBuffer) bytes() []byte {
+ return b.buffer
+}
diff --git a/statsd/buffer_pool.go b/statsd/buffer_pool.go
new file mode 100644
index 0000000..7a3e3c9
--- /dev/null
+++ b/statsd/buffer_pool.go
@@ -0,0 +1,40 @@
+package statsd
+
+type bufferPool struct {
+ pool chan *statsdBuffer
+ bufferMaxSize int
+ bufferMaxElements int
+}
+
+func newBufferPool(poolSize, bufferMaxSize, bufferMaxElements int) *bufferPool {
+ p := &bufferPool{
+ pool: make(chan *statsdBuffer, poolSize),
+ bufferMaxSize: bufferMaxSize,
+ bufferMaxElements: bufferMaxElements,
+ }
+ for i := 0; i < poolSize; i++ {
+ p.addNewBuffer()
+ }
+ return p
+}
+
+func (p *bufferPool) addNewBuffer() {
+ p.pool <- newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements)
+}
+
+func (p *bufferPool) borrowBuffer() *statsdBuffer {
+ select {
+ case b := <-p.pool:
+ return b
+ default:
+ return newStatsdBuffer(p.bufferMaxSize, p.bufferMaxElements)
+ }
+}
+
+func (p *bufferPool) returnBuffer(buffer *statsdBuffer) {
+ buffer.reset()
+ select {
+ case p.pool <- buffer:
+ default:
+ }
+}
diff --git a/statsd/buffer_pool_test.go b/statsd/buffer_pool_test.go
new file mode 100644
index 0000000..f150af7
--- /dev/null
+++ b/statsd/buffer_pool_test.go
@@ -0,0 +1,43 @@
+package statsd
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBufferPoolSize(t *testing.T) {
+ bufferPool := newBufferPool(10, 1024, 20)
+
+ assert.Equal(t, 10, cap(bufferPool.pool))
+ assert.Equal(t, 10, len(bufferPool.pool))
+}
+
+func TestBufferPoolBufferCreation(t *testing.T) {
+ bufferPool := newBufferPool(10, 1024, 20)
+ buffer := bufferPool.borrowBuffer()
+
+ assert.Equal(t, 1024, buffer.maxSize)
+ assert.Equal(t, 20, buffer.maxElements)
+}
+
+func TestBufferPoolEmpty(t *testing.T) {
+ bufferPool := newBufferPool(1, 1024, 20)
+ bufferPool.borrowBuffer()
+
+ assert.Equal(t, 0, len(bufferPool.pool))
+ buffer := bufferPool.borrowBuffer()
+ assert.NotNil(t, buffer.bytes())
+}
+
+func TestBufferReturn(t *testing.T) {
+ bufferPool := newBufferPool(1, 1024, 20)
+ buffer := bufferPool.borrowBuffer()
+ buffer.writeCount("", nil, "", 1, nil, 1)
+
+ assert.Equal(t, 0, len(bufferPool.pool))
+ bufferPool.returnBuffer(buffer)
+ assert.Equal(t, 1, len(bufferPool.pool))
+ buffer = bufferPool.borrowBuffer()
+ assert.Equal(t, 0, len(buffer.bytes()))
+}
diff --git a/statsd/buffer_test.go b/statsd/buffer_test.go
new file mode 100644
index 0000000..9096407
--- /dev/null
+++ b/statsd/buffer_test.go
@@ -0,0 +1,252 @@
+package statsd
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBufferGauge(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|g|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|g|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferCount(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeCount("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|c|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeCount("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|c|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferHistogram(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeHistogram("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|h|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeHistogram("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|h|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferDistribution(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeDistribution("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|d|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeDistribution("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|d|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+func TestBufferSet(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeSet("namespace.", []string{"tag:tag"}, "metric", "value", []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:value|s|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeSet("namespace.", []string{"tag:tag"}, "metric", "value", []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:value|s|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferTiming(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeTiming("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1.000000|ms|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeTiming("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1.000000|ms|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferEvent(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeEvent(&Event{Title: "title", Text: "text"}, []string{"tag:tag"})
+ assert.Nil(t, err)
+ assert.Equal(t, "_e{5,4}:title|text|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeEvent(&Event{Title: "title", Text: "text"}, []string{"tag:tag"})
+ assert.Nil(t, err)
+ assert.Equal(t, "_e{5,4}:title|text|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferServiceCheck(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ err := buffer.writeServiceCheck(&ServiceCheck{Name: "name", Status: Ok}, []string{"tag:tag"})
+ assert.Nil(t, err)
+ assert.Equal(t, "_sc|name|0|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ err = buffer.writeServiceCheck(&ServiceCheck{Name: "name", Status: Ok}, []string{"tag:tag"})
+ assert.Nil(t, err)
+ assert.Equal(t, "_sc|name|0|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferFullSize(t *testing.T) {
+ buffer := newStatsdBuffer(30, 10)
+ err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Len(t, buffer.bytes(), 30)
+ err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+}
+
+func TestBufferSeparator(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 10)
+ err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+ assert.Equal(t, "namespace.metric:1|g|#tag:tag\nnamespace.metric:1|g|#tag:tag\n", string(buffer.bytes()))
+}
+
+func TestBufferAggregated(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+ pos, err := buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1}, "", 12, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, pos)
+ assert.Equal(t, "namespace.metric:1|h|#tag:tag\n", string(buffer.bytes()))
+
+ buffer = newStatsdBuffer(1024, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 4, pos)
+ assert.Equal(t, "namespace.metric:1:2:3:4|h|#tag:tag\n", string(buffer.bytes()))
+
+ // max element already used
+ buffer = newStatsdBuffer(1024, 1)
+ buffer.elementCount = 1
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errBufferFull, err)
+
+ // not enought size to start serializing (tags and header too big)
+ buffer = newStatsdBuffer(4, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errBufferFull, err)
+
+ // not enought size to serializing one message
+ buffer = newStatsdBuffer(29, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errBufferFull, err)
+
+ // space for only 1 number
+ buffer = newStatsdBuffer(30, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errPartialWrite, err)
+ assert.Equal(t, 1, pos)
+ assert.Equal(t, "namespace.metric:1|h|#tag:tag\n", string(buffer.bytes()))
+
+ // first value too big
+ buffer = newStatsdBuffer(30, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{12, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errBufferFull, err)
+ assert.Equal(t, 0, pos)
+ assert.Equal(t, "", string(buffer.bytes())) // checking that the buffer was reset
+
+ // not enough space left
+ buffer = newStatsdBuffer(40, 1)
+ buffer.buffer = append(buffer.buffer, []byte("abcdefghij")...)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{12, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errBufferFull, err)
+ assert.Equal(t, 0, pos)
+ assert.Equal(t, "abcdefghij", string(buffer.bytes())) // checking that the buffer was reset
+
+ // space for only 2 number
+ buffer = newStatsdBuffer(32, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1, 2, 3, 4}, "", 12, -1)
+ assert.Equal(t, errPartialWrite, err)
+ assert.Equal(t, 2, pos)
+ assert.Equal(t, "namespace.metric:1:2|h|#tag:tag\n", string(buffer.bytes()))
+
+ // with a container ID field
+ patchContainerID("container-id")
+ defer resetContainerID()
+
+ buffer = newStatsdBuffer(1024, 1)
+ pos, err = buffer.writeAggregated([]byte("h"), "namespace.", []string{"tag:tag"}, "metric", []float64{1}, "", 12, -1)
+ assert.Nil(t, err)
+ assert.Equal(t, 1, pos)
+ assert.Equal(t, "namespace.metric:1|h|#tag:tag|c:container-id\n", string(buffer.bytes()))
+}
+
+func TestBufferMaxElement(t *testing.T) {
+ buffer := newStatsdBuffer(1024, 1)
+
+ err := buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Nil(t, err)
+
+ err = buffer.writeGauge("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeCount("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeHistogram("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeDistribution("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeSet("namespace.", []string{"tag:tag"}, "metric", "value", []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeTiming("namespace.", []string{"tag:tag"}, "metric", 1, []string{}, 1)
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeEvent(&Event{Title: "title", Text: "text"}, []string{"tag:tag"})
+ assert.Equal(t, errBufferFull, err)
+
+ err = buffer.writeServiceCheck(&ServiceCheck{Name: "name", Status: Ok}, []string{"tag:tag"})
+ assert.Equal(t, errBufferFull, err)
+}
diff --git a/statsd/buffered_metric_context.go b/statsd/buffered_metric_context.go
new file mode 100644
index 0000000..41404d9
--- /dev/null
+++ b/statsd/buffered_metric_context.go
@@ -0,0 +1,82 @@
+package statsd
+
+import (
+ "math/rand"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// bufferedMetricContexts represent the contexts for Histograms, Distributions
+// and Timing. Since those 3 metric types behave the same way and are sampled
+// with the same type they're represented by the same class.
+type bufferedMetricContexts struct {
+ nbContext uint64
+ mutex sync.RWMutex
+ values bufferedMetricMap
+ newMetric func(string, float64, string) *bufferedMetric
+
+ // Each bufferedMetricContexts uses its own random source and random
+ // lock to prevent goroutines from contending for the lock on the
+ // "math/rand" package-global random source (e.g. calls like
+ // "rand.Float64()" must acquire a shared lock to get the next
+ // pseudorandom number).
+ random *rand.Rand
+ randomLock sync.Mutex
+}
+
+func newBufferedContexts(newMetric func(string, float64, string) *bufferedMetric) bufferedMetricContexts {
+ return bufferedMetricContexts{
+ values: bufferedMetricMap{},
+ newMetric: newMetric,
+ // Note that calling "time.Now().UnixNano()" repeatedly quickly may return
+ // very similar values. That's fine for seeding the worker-specific random
+ // source because we just need an evenly distributed stream of float values.
+ // Do not use this random source for cryptographic randomness.
+ random: rand.New(rand.NewSource(time.Now().UnixNano())),
+ }
+}
+
+func (bc *bufferedMetricContexts) flush(metrics []metric) []metric {
+ bc.mutex.Lock()
+ values := bc.values
+ bc.values = bufferedMetricMap{}
+ bc.mutex.Unlock()
+
+ for _, d := range values {
+ metrics = append(metrics, d.flushUnsafe())
+ }
+ atomic.AddUint64(&bc.nbContext, uint64(len(values)))
+ return metrics
+}
+
+func (bc *bufferedMetricContexts) sample(name string, value float64, tags []string, rate float64) error {
+ if !shouldSample(rate, bc.random, &bc.randomLock) {
+ return nil
+ }
+
+ context, stringTags := getContextAndTags(name, tags)
+
+ bc.mutex.RLock()
+ if v, found := bc.values[context]; found {
+ v.sample(value)
+ bc.mutex.RUnlock()
+ return nil
+ }
+ bc.mutex.RUnlock()
+
+ bc.mutex.Lock()
+ // Check if another goroutines hasn't created the value betwen the 'RUnlock' and 'Lock'
+ if v, found := bc.values[context]; found {
+ v.sample(value)
+ bc.mutex.Unlock()
+ return nil
+ }
+ bc.values[context] = bc.newMetric(name, value, stringTags)
+ bc.mutex.Unlock()
+ return nil
+}
+
+func (bc *bufferedMetricContexts) getNbContext() uint64 {
+ return atomic.LoadUint64(&bc.nbContext)
+}
diff --git a/statsd/container.go b/statsd/container.go
new file mode 100644
index 0000000..b2331e8
--- /dev/null
+++ b/statsd/container.go
@@ -0,0 +1,82 @@
+package statsd
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "sync"
+)
+
+const (
+ // cgroupPath is the path to the cgroup file where we can find the container id if one exists.
+ cgroupPath = "/proc/self/cgroup"
+)
+
+const (
+ uuidSource = "[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}"
+ containerSource = "[0-9a-f]{64}"
+ taskSource = "[0-9a-f]{32}-\\d+"
+)
+
+var (
+ // expLine matches a line in the /proc/self/cgroup file. It has a submatch for the last element (path), which contains the container ID.
+ expLine = regexp.MustCompile(`^\d+:[^:]*:(.+)$`)
+
+ // expContainerID matches contained IDs and sources. Source: https://github.com/Qard/container-info/blob/master/index.js
+ expContainerID = regexp.MustCompile(fmt.Sprintf(`(%s|%s|%s)(?:.scope)?$`, uuidSource, containerSource, taskSource))
+
+ // containerID holds the container ID.
+ containerID = ""
+)
+
+// parseContainerID finds the first container ID reading from r and returns it.
+func parseContainerID(r io.Reader) string {
+ scn := bufio.NewScanner(r)
+ for scn.Scan() {
+ path := expLine.FindStringSubmatch(scn.Text())
+ if len(path) != 2 {
+ // invalid entry, continue
+ continue
+ }
+ if parts := expContainerID.FindStringSubmatch(path[1]); len(parts) == 2 {
+ return parts[1]
+ }
+ }
+ return ""
+}
+
+// readContainerID attempts to return the container ID from the provided file path or empty on failure.
+func readContainerID(fpath string) string {
+ f, err := os.Open(fpath)
+ if err != nil {
+ return ""
+ }
+ defer f.Close()
+ return parseContainerID(f)
+}
+
+// getContainerID returns the container ID configured at the client creation
+// It can either be auto-discovered with origin detection or provided by the user.
+// User-defined container ID is prioritized.
+func getContainerID() string {
+ return containerID
+}
+
+var initOnce sync.Once
+
+// initContainerID initializes the container ID.
+// It can either be provided by the user or read from cgroups.
+func initContainerID(userProvidedID string, cgroupFallback bool) {
+ initOnce.Do(func() {
+ if userProvidedID != "" {
+ containerID = userProvidedID
+ return
+ }
+
+ if cgroupFallback {
+ containerID = readContainerID(cgroupPath)
+ }
+ })
+}
diff --git a/statsd/container_test.go b/statsd/container_test.go
new file mode 100644
index 0000000..fe49dfb
--- /dev/null
+++ b/statsd/container_test.go
@@ -0,0 +1,58 @@
+package statsd
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseContainerID(t *testing.T) {
+ for input, expectedResult := range map[string]string{
+ `other_line
+10:hugetlb:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+9:cpuset:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+8:pids:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+7:freezer:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+6:cpu,cpuacct:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+5:perf_event:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+4:blkio:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+3:devices:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa
+2:net_cls,net_prio:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa`: "8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa",
+ "10:hugetlb:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa": "8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa",
+ "10:hugetlb:/kubepods": "",
+ "11:hugetlb:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da": "432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da",
+ "1:name=systemd:/docker/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376": "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376",
+ "1:name=systemd:/uuid/34dc0b5e-626f-2c5c-4c51-70e34b10e765": "34dc0b5e-626f-2c5c-4c51-70e34b10e765",
+ "1:name=systemd:/ecs/34dc0b5e626f2c5c4c5170e34b10e765-1234567890": "34dc0b5e626f2c5c4c5170e34b10e765-1234567890",
+ "1:name=systemd:/docker/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376.scope": "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376",
+ `1:name=systemd:/nope
+2:pids:/docker/34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376
+3:cpu:/invalid`: "34dc0b5e626f2c5c4c5170e34b10e7654ce36f0fcd532739f4445baabea03376",
+ } {
+ id := parseContainerID(strings.NewReader(input))
+ assert.Equal(t, expectedResult, id)
+ }
+}
+
+func TestReadContainerID(t *testing.T) {
+ cid := "8c046cb0b72cd4c99f51b5591cd5b095967f58ee003710a45280c28ee1a9c7fa"
+ cgroupContents := "10:hugetlb:/kubepods/burstable/podfd52ef25-a87d-11e9-9423-0800271a638e/" + cid
+
+ tmpFile, err := ioutil.TempFile(os.TempDir(), "fake-cgroup-")
+ assert.NoError(t, err)
+
+ defer os.Remove(tmpFile.Name())
+
+ _, err = io.WriteString(tmpFile, cgroupContents)
+ assert.NoError(t, err)
+
+ err = tmpFile.Close()
+ assert.NoError(t, err)
+
+ actualCID := readContainerID(tmpFile.Name())
+ assert.Equal(t, cid, actualCID)
+}
diff --git a/statsd/end_to_end_udp_test.go b/statsd/end_to_end_udp_test.go
new file mode 100644
index 0000000..fc05147
--- /dev/null
+++ b/statsd/end_to_end_udp_test.go
@@ -0,0 +1,348 @@
+package statsd
+
+import (
+ "os"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPipelineWithGlobalTags(t *testing.T) {
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ []string{"tag1", "tag2"},
+ WithTags([]string{"tag1", "tag2"}),
+ )
+
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestKownEnvTags(t *testing.T) {
+ entityIDEnvName := "DD_ENTITY_ID"
+ ddEnvName := "DD_ENV"
+ ddServiceName := "DD_SERVICE"
+ ddVersionName := "DD_VERSION"
+
+ defer func() { os.Unsetenv(entityIDEnvName) }()
+ defer func() { os.Unsetenv(ddEnvName) }()
+ defer func() { os.Unsetenv(ddServiceName) }()
+ defer func() { os.Unsetenv(ddVersionName) }()
+
+ os.Setenv(entityIDEnvName, "test_id")
+ os.Setenv(ddEnvName, "test_env")
+ os.Setenv(ddServiceName, "test_service")
+ os.Setenv(ddVersionName, "test_version")
+
+ expectedTags := []string{"dd.internal.entity_id:test_id", "env:test_env", "service:test_service", "version:test_version"}
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ expectedTags,
+ )
+
+ sort.Strings(client.tags)
+ assert.Equal(t, expectedTags, client.tags)
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestKnownEnvTagsWithCustomTags(t *testing.T) {
+ entityIDEnvName := "DD_ENTITY_ID"
+ ddEnvName := "DD_ENV"
+ ddServiceName := "DD_SERVICE"
+ ddVersionName := "DD_VERSION"
+
+ defer func() { os.Unsetenv(entityIDEnvName) }()
+ defer func() { os.Unsetenv(ddEnvName) }()
+ defer func() { os.Unsetenv(ddServiceName) }()
+ defer func() { os.Unsetenv(ddVersionName) }()
+
+ os.Setenv(entityIDEnvName, "test_id")
+ os.Setenv(ddEnvName, "test_env")
+ os.Setenv(ddServiceName, "test_service")
+ os.Setenv(ddVersionName, "test_version")
+
+ expectedTags := []string{"tag1", "tag2", "dd.internal.entity_id:test_id", "env:test_env",
+ "service:test_service", "version:test_version"}
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ expectedTags,
+ WithTags([]string{"tag1", "tag2"}),
+ )
+
+ ts.sendAllAndAssert(t, client)
+
+ sort.Strings(expectedTags)
+ sort.Strings(client.tags)
+ assert.Equal(t, expectedTags, client.tags)
+}
+
+func TestKnownEnvTagsEmptyString(t *testing.T) {
+ entityIDEnvName := "DD_ENTITY_ID"
+ ddEnvName := "DD_ENV"
+ ddServiceName := "DD_SERVICE"
+ ddVersionName := "DD_VERSION"
+
+ defer func() { os.Unsetenv(entityIDEnvName) }()
+ defer func() { os.Unsetenv(ddEnvName) }()
+ defer func() { os.Unsetenv(ddServiceName) }()
+ defer func() { os.Unsetenv(ddVersionName) }()
+
+ os.Setenv(entityIDEnvName, "")
+ os.Setenv(ddEnvName, "")
+ os.Setenv(ddServiceName, "")
+ os.Setenv(ddVersionName, "")
+
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ nil,
+ )
+
+ assert.Len(t, client.tags, 0)
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestContainerIDWithEntityID(t *testing.T) {
+ resetContainerID()
+
+ entityIDEnvName := "DD_ENTITY_ID"
+ defer func() { os.Unsetenv(entityIDEnvName) }()
+ os.Setenv(entityIDEnvName, "pod-uid")
+
+ expectedTags := []string{"dd.internal.entity_id:pod-uid"}
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ expectedTags,
+ WithContainerID("fake-container-id"),
+ )
+
+ sort.Strings(client.tags)
+ assert.Equal(t, expectedTags, client.tags)
+ ts.assertContainerID(t, "")
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestContainerIDWithoutEntityID(t *testing.T) {
+ resetContainerID()
+ os.Unsetenv("DD_ENTITY_ID")
+
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ []string{},
+ WithContainerID("fake-container-id"),
+ )
+
+ ts.assertContainerID(t, "fake-container-id")
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestOriginDetectionDisabled(t *testing.T) {
+ resetContainerID()
+ os.Unsetenv("DD_ENTITY_ID")
+
+ originDetectionEnvName := "DD_ORIGIN_DETECTION_ENABLED"
+ defer func() { os.Unsetenv(originDetectionEnvName) }()
+ os.Setenv(originDetectionEnvName, "false")
+
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ []string{},
+ )
+
+ ts.assertContainerID(t, "")
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestOriginDetectionEnabledWithEntityID(t *testing.T) {
+ resetContainerID()
+
+ entityIDEnvName := "DD_ENTITY_ID"
+ defer func() { os.Unsetenv(entityIDEnvName) }()
+ os.Setenv(entityIDEnvName, "pod-uid")
+
+ originDetectionEnvName := "DD_ORIGIN_DETECTION_ENABLED"
+ defer func() { os.Unsetenv(originDetectionEnvName) }()
+ os.Setenv(originDetectionEnvName, "true")
+
+ expectedTags := []string{"dd.internal.entity_id:pod-uid"}
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ expectedTags,
+ WithContainerID("fake-container-id"),
+ )
+
+ sort.Strings(client.tags)
+ assert.Equal(t, expectedTags, client.tags)
+ ts.assertContainerID(t, "")
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestPipelineWithGlobalTagsAndEnv(t *testing.T) {
+ orig := os.Getenv("DD_ENV")
+ os.Setenv("DD_ENV", "test")
+ defer os.Setenv("DD_ENV", orig)
+
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ []string{"tag1", "tag2", "env:test"},
+ WithTags([]string{"tag1", "tag2"}),
+ )
+
+ ts.sendAllAndAssert(t, client)
+}
+
+func TestTelemetryAllOptions(t *testing.T) {
+ orig := os.Getenv("DD_ENV")
+ os.Setenv("DD_ENV", "test")
+ defer os.Setenv("DD_ENV", orig)
+
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ []string{"tag1", "tag2", "env:test"},
+ WithExtendedClientSideAggregation(),
+ WithTags([]string{"tag1", "tag2"}),
+ WithNamespace("test_namespace"),
+ )
+
+ ts.sendAllAndAssert(t, client)
+}
+
+type testCase struct {
+ opt []Option
+ testFunc func(*testing.T, *testServer, *Client)
+}
+
+func getTestMap() map[string]testCase {
+ return map[string]testCase{
+ "Default": testCase{
+ []Option{},
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ },
+ },
+ "Default without aggregation": testCase{
+ []Option{
+ WithoutClientSideAggregation(),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ },
+ },
+ "With namespace": testCase{
+ []Option{
+ WithNamespace("test_namespace"),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ },
+ },
+ "With namespace dot": testCase{
+ []Option{
+ WithNamespace("test_namespace."),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ },
+ },
+ "With max messages per payload": testCase{
+ []Option{
+ WithMaxMessagesPerPayload(5),
+ // Make sure we hit the maxMessagesPerPayload before hitting the flush timeout
+ WithBufferFlushInterval(3 * time.Second),
+ WithWorkersCount(1),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ // We send 4 non aggregated metrics, 1 service_check and 1 event. So 2 reads (5 items per
+ // payload). Then we flush the aggregator that will send 5 metrics, so 1 read. Finally,
+ // the telemetry is 18 metrics flushed at a different time so 4 more payload for a
+ // total of 8 reads on the network
+ ts.assertNbRead(t, 8)
+ },
+ },
+ "With max messages per payload + WithoutClientSideAggregation": testCase{
+ []Option{
+ WithMaxMessagesPerPayload(5),
+ // Make sure we hit the maxMessagesPerPayload before hitting the flush timeout
+ WithBufferFlushInterval(3 * time.Second),
+ WithoutClientSideAggregation(),
+ WithWorkersCount(1),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ // We send 9 non aggregated metrics, 1 service_check and 1 event. So 3 reads (5 items
+ // per payload). Then the telemetry is 18 metrics flushed at a different time so 4 more
+ // payload for a total of 8 reads on the network
+ ts.assertNbRead(t, 7)
+ },
+ },
+ "ChannelMode without client side aggregation": testCase{
+ []Option{
+ WithoutClientSideAggregation(),
+ WithChannelMode(),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ ts.sendAllAndAssert(t, client)
+ },
+ },
+ "Basic client side aggregation": testCase{
+ []Option{},
+ func(t *testing.T, ts *testServer, client *Client) {
+ expectedMetrics := ts.sendAllMetricsForBasicAggregation(client)
+ ts.assert(t, client, expectedMetrics)
+ },
+ },
+ "Extended client side aggregation": testCase{
+ []Option{
+ WithExtendedClientSideAggregation(),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ expectedMetrics := ts.sendAllMetricsForExtendedAggregation(client)
+ ts.assert(t, client, expectedMetrics)
+ },
+ },
+ "Basic client side aggregation + ChannelMode": testCase{
+ []Option{
+ WithChannelMode(),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ expectedMetrics := ts.sendAllMetricsForBasicAggregation(client)
+ ts.assert(t, client, expectedMetrics)
+ },
+ },
+ "Extended client side aggregation + ChannelMode": testCase{
+ []Option{
+ WithExtendedClientSideAggregation(),
+ WithChannelMode(),
+ },
+ func(t *testing.T, ts *testServer, client *Client) {
+ expectedMetrics := ts.sendAllMetricsForExtendedAggregation(client)
+ ts.assert(t, client, expectedMetrics)
+ },
+ },
+ }
+}
+
+func TestFullPipelineUDP(t *testing.T) {
+ for testName, c := range getTestMap() {
+ t.Run(testName, func(t *testing.T) {
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ nil,
+ c.opt...,
+ )
+ c.testFunc(t, ts, client)
+ })
+ }
+}
diff --git a/statsd/end_to_end_uds_test.go b/statsd/end_to_end_uds_test.go
new file mode 100644
index 0000000..be4f4b9
--- /dev/null
+++ b/statsd/end_to_end_uds_test.go
@@ -0,0 +1,26 @@
+// +build !windows
+
+package statsd
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "testing"
+)
+
+func TestFullPipelineUDS(t *testing.T) {
+ for testName, c := range getTestMap() {
+ socketPath := fmt.Sprintf("/tmp/dsd_%d.socket", rand.Int())
+ t.Run(testName, func(t *testing.T) {
+ ts, client := newClientAndTestServer(t,
+ "uds",
+ "unix://"+socketPath,
+ nil,
+ c.opt...,
+ )
+ c.testFunc(t, ts, client)
+ })
+ os.Remove(socketPath)
+ }
+}
diff --git a/statsd/event.go b/statsd/event.go
new file mode 100644
index 0000000..a2ca4fa
--- /dev/null
+++ b/statsd/event.go
@@ -0,0 +1,75 @@
+package statsd
+
+import (
+ "fmt"
+ "time"
+)
+
+// Events support
+// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41
+// The reason why they got exported is so that client code can directly use the types.
+
+// EventAlertType is the alert type for events
+type EventAlertType string
+
+const (
+ // Info is the "info" AlertType for events
+ Info EventAlertType = "info"
+ // Error is the "error" AlertType for events
+ Error EventAlertType = "error"
+ // Warning is the "warning" AlertType for events
+ Warning EventAlertType = "warning"
+ // Success is the "success" AlertType for events
+ Success EventAlertType = "success"
+)
+
+// EventPriority is the event priority for events
+type EventPriority string
+
+const (
+ // Normal is the "normal" Priority for events
+ Normal EventPriority = "normal"
+ // Low is the "low" Priority for events
+ Low EventPriority = "low"
+)
+
+// An Event is an object that can be posted to your DataDog event stream.
+type Event struct {
+ // Title of the event. Required.
+ Title string
+ // Text is the description of the event.
+ Text string
+ // Timestamp is a timestamp for the event. If not provided, the dogstatsd
+ // server will set this to the current time.
+ Timestamp time.Time
+ // Hostname for the event.
+ Hostname string
+ // AggregationKey groups this event with others of the same key.
+ AggregationKey string
+ // Priority of the event. Can be statsd.Low or statsd.Normal.
+ Priority EventPriority
+ // SourceTypeName is a source type for the event.
+ SourceTypeName string
+ // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
+ // If absent, the default value applied by the dogstatsd server is Info.
+ AlertType EventAlertType
+ // Tags for the event.
+ Tags []string
+}
+
+// NewEvent creates a new event with the given title and text. Error checking
+// against these values is done at send-time, or upon running e.Check.
+func NewEvent(title, text string) *Event {
+ return &Event{
+ Title: title,
+ Text: text,
+ }
+}
+
+// Check verifies that an event is valid.
+func (e *Event) Check() error {
+ if len(e.Title) == 0 {
+ return fmt.Errorf("statsd.Event title is required")
+ }
+ return nil
+}
diff --git a/statsd/event_test.go b/statsd/event_test.go
new file mode 100644
index 0000000..43bb712
--- /dev/null
+++ b/statsd/event_test.go
@@ -0,0 +1,85 @@
+package statsd
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func encodeEvent(e *Event) (string, error) {
+ err := e.Check()
+ if err != nil {
+ return "", err
+ }
+ var buffer []byte
+ buffer = appendEvent(buffer, e, nil)
+ return string(buffer), nil
+}
+
+func TestEventEncode(t *testing.T) {
+ matrix := []struct {
+ event *Event
+ encoded string
+ }{
+ {
+ NewEvent("Hello", "Something happened to my event"),
+ `_e{5,30}:Hello|Something happened to my event`,
+ }, {
+ &Event{Title: "hi", Text: "okay", AggregationKey: "foo"},
+ `_e{2,4}:hi|okay|k:foo`,
+ }, {
+ &Event{Title: "hi", Text: "okay", AggregationKey: "foo", AlertType: Info},
+ `_e{2,4}:hi|okay|k:foo|t:info`,
+ }, {
+ &Event{Title: "hi", Text: "w/e", AlertType: Error, Priority: Normal},
+ `_e{2,3}:hi|w/e|p:normal|t:error`,
+ }, {
+ &Event{Title: "hi", Text: "uh", Tags: []string{"host:foo", "app:bar"}},
+ `_e{2,2}:hi|uh|#host:foo,app:bar`,
+ }, {
+ &Event{Title: "hi", Text: "line1\nline2", Tags: []string{"hello\nworld"}},
+ `_e{2,12}:hi|line1\nline2|#helloworld`,
+ },
+ }
+
+ for _, m := range matrix {
+ r, err := encodeEvent(m.event)
+ require.NoError(t, err)
+ assert.Equal(t, r, m.encoded)
+ }
+}
+
+func TestNewEventTitleMissing(t *testing.T) {
+ e := NewEvent("", "hi")
+ _, err := encodeEvent(e)
+ require.Error(t, err)
+ assert.Equal(t, "statsd.Event title is required", err.Error())
+}
+
+func TestNewEvent(t *testing.T) {
+ e := NewEvent("hello", "world")
+ e.Tags = []string{"tag1", "tag2"}
+ eventEncoded, err := encodeEvent(e)
+ require.NoError(t, err)
+ assert.Equal(t, "_e{5,5}:hello|world|#tag1,tag2", eventEncoded)
+ assert.Len(t, e.Tags, 2)
+}
+
+func TestNewEventTagsAppend(t *testing.T) {
+ e := NewEvent("hello", "world")
+ e.Tags = append(e.Tags, "tag1", "tag2")
+ eventEncoded, err := encodeEvent(e)
+ require.NoError(t, err)
+ assert.Equal(t, "_e{5,5}:hello|world|#tag1,tag2", eventEncoded)
+ assert.Len(t, e.Tags, 2)
+}
+
+func TestNewEventEmptyText(t *testing.T) {
+ e := NewEvent("hello", "")
+ e.Tags = append(e.Tags, "tag1", "tag2")
+ eventEncoded, err := encodeEvent(e)
+ require.NoError(t, err)
+ assert.Equal(t, "_e{5,0}:hello||#tag1,tag2", eventEncoded)
+ assert.Len(t, e.Tags, 2)
+}
diff --git a/statsd/fnv1a.go b/statsd/fnv1a.go
new file mode 100644
index 0000000..03dc8a0
--- /dev/null
+++ b/statsd/fnv1a.go
@@ -0,0 +1,39 @@
+package statsd
+
+const (
+ // FNV-1a
+ offset32 = uint32(2166136261)
+ prime32 = uint32(16777619)
+
+ // init32 is what 32 bits hash values should be initialized with.
+ init32 = offset32
+)
+
+// HashString32 returns the hash of s.
+func hashString32(s string) uint32 {
+ return addString32(init32, s)
+}
+
+// AddString32 adds the hash of s to the precomputed hash value h.
+func addString32(h uint32, s string) uint32 {
+ i := 0
+ n := (len(s) / 8) * 8
+
+ for i != n {
+ h = (h ^ uint32(s[i])) * prime32
+ h = (h ^ uint32(s[i+1])) * prime32
+ h = (h ^ uint32(s[i+2])) * prime32
+ h = (h ^ uint32(s[i+3])) * prime32
+ h = (h ^ uint32(s[i+4])) * prime32
+ h = (h ^ uint32(s[i+5])) * prime32
+ h = (h ^ uint32(s[i+6])) * prime32
+ h = (h ^ uint32(s[i+7])) * prime32
+ i += 8
+ }
+
+ for _, c := range s[i:] {
+ h = (h ^ uint32(c)) * prime32
+ }
+
+ return h
+}
diff --git a/statsd/format.go b/statsd/format.go
new file mode 100644
index 0000000..6e05ad5
--- /dev/null
+++ b/statsd/format.go
@@ -0,0 +1,272 @@
+package statsd
+
+import (
+ "strconv"
+ "strings"
+)
+
+var (
+ gaugeSymbol = []byte("g")
+ countSymbol = []byte("c")
+ histogramSymbol = []byte("h")
+ distributionSymbol = []byte("d")
+ setSymbol = []byte("s")
+ timingSymbol = []byte("ms")
+ tagSeparatorSymbol = ","
+ nameSeparatorSymbol = ":"
+)
+
+func appendHeader(buffer []byte, namespace string, name string) []byte {
+ if namespace != "" {
+ buffer = append(buffer, namespace...)
+ }
+ buffer = append(buffer, name...)
+ buffer = append(buffer, ':')
+ return buffer
+}
+
+func appendRate(buffer []byte, rate float64) []byte {
+ if rate < 1 {
+ buffer = append(buffer, "|@"...)
+ buffer = strconv.AppendFloat(buffer, rate, 'f', -1, 64)
+ }
+ return buffer
+}
+
+func appendWithoutNewlines(buffer []byte, s string) []byte {
+ // fastpath for strings without newlines
+ if strings.IndexByte(s, '\n') == -1 {
+ return append(buffer, s...)
+ }
+
+ for _, b := range []byte(s) {
+ if b != '\n' {
+ buffer = append(buffer, b)
+ }
+ }
+ return buffer
+}
+
+func appendTags(buffer []byte, globalTags []string, tags []string) []byte {
+ if len(globalTags) == 0 && len(tags) == 0 {
+ return buffer
+ }
+ buffer = append(buffer, "|#"...)
+ firstTag := true
+
+ for _, tag := range globalTags {
+ if !firstTag {
+ buffer = append(buffer, tagSeparatorSymbol...)
+ }
+ buffer = appendWithoutNewlines(buffer, tag)
+ firstTag = false
+ }
+ for _, tag := range tags {
+ if !firstTag {
+ buffer = append(buffer, tagSeparatorSymbol...)
+ }
+ buffer = appendWithoutNewlines(buffer, tag)
+ firstTag = false
+ }
+ return buffer
+}
+
+func appendTagsAggregated(buffer []byte, globalTags []string, tags string) []byte {
+ if len(globalTags) == 0 && tags == "" {
+ return buffer
+ }
+
+ buffer = append(buffer, "|#"...)
+ firstTag := true
+
+ for _, tag := range globalTags {
+ if !firstTag {
+ buffer = append(buffer, tagSeparatorSymbol...)
+ }
+ buffer = appendWithoutNewlines(buffer, tag)
+ firstTag = false
+ }
+ if tags != "" {
+ if !firstTag {
+ buffer = append(buffer, tagSeparatorSymbol...)
+ }
+ buffer = appendWithoutNewlines(buffer, tags)
+ }
+ return buffer
+}
+
+func appendFloatMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64, precision int) []byte {
+ buffer = appendHeader(buffer, namespace, name)
+ buffer = strconv.AppendFloat(buffer, value, 'f', precision, 64)
+ buffer = append(buffer, '|')
+ buffer = append(buffer, typeSymbol...)
+ buffer = appendRate(buffer, rate)
+ buffer = appendTags(buffer, globalTags, tags)
+ buffer = appendContainerID(buffer)
+ return buffer
+}
+
+func appendIntegerMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte {
+ buffer = appendHeader(buffer, namespace, name)
+ buffer = strconv.AppendInt(buffer, value, 10)
+ buffer = append(buffer, '|')
+ buffer = append(buffer, typeSymbol...)
+ buffer = appendRate(buffer, rate)
+ buffer = appendTags(buffer, globalTags, tags)
+ buffer = appendContainerID(buffer)
+ return buffer
+}
+
+func appendStringMetric(buffer []byte, typeSymbol []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte {
+ buffer = appendHeader(buffer, namespace, name)
+ buffer = append(buffer, value...)
+ buffer = append(buffer, '|')
+ buffer = append(buffer, typeSymbol...)
+ buffer = appendRate(buffer, rate)
+ buffer = appendTags(buffer, globalTags, tags)
+ buffer = appendContainerID(buffer)
+ return buffer
+}
+
+func appendGauge(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+ return appendFloatMetric(buffer, gaugeSymbol, namespace, globalTags, name, value, tags, rate, -1)
+}
+
+func appendCount(buffer []byte, namespace string, globalTags []string, name string, value int64, tags []string, rate float64) []byte {
+ return appendIntegerMetric(buffer, countSymbol, namespace, globalTags, name, value, tags, rate)
+}
+
+func appendHistogram(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+ return appendFloatMetric(buffer, histogramSymbol, namespace, globalTags, name, value, tags, rate, -1)
+}
+
+func appendDistribution(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+ return appendFloatMetric(buffer, distributionSymbol, namespace, globalTags, name, value, tags, rate, -1)
+}
+
+func appendSet(buffer []byte, namespace string, globalTags []string, name string, value string, tags []string, rate float64) []byte {
+ return appendStringMetric(buffer, setSymbol, namespace, globalTags, name, value, tags, rate)
+}
+
+func appendTiming(buffer []byte, namespace string, globalTags []string, name string, value float64, tags []string, rate float64) []byte {
+ return appendFloatMetric(buffer, timingSymbol, namespace, globalTags, name, value, tags, rate, 6)
+}
+
+func escapedEventTextLen(text string) int {
+ return len(text) + strings.Count(text, "\n")
+}
+
+func appendEscapedEventText(buffer []byte, text string) []byte {
+ for _, b := range []byte(text) {
+ if b != '\n' {
+ buffer = append(buffer, b)
+ } else {
+ buffer = append(buffer, "\\n"...)
+ }
+ }
+ return buffer
+}
+
+func appendEvent(buffer []byte, event *Event, globalTags []string) []byte {
+ escapedTextLen := escapedEventTextLen(event.Text)
+
+ buffer = append(buffer, "_e{"...)
+ buffer = strconv.AppendInt(buffer, int64(len(event.Title)), 10)
+ buffer = append(buffer, tagSeparatorSymbol...)
+ buffer = strconv.AppendInt(buffer, int64(escapedTextLen), 10)
+ buffer = append(buffer, "}:"...)
+ buffer = append(buffer, event.Title...)
+ buffer = append(buffer, '|')
+ if escapedTextLen != len(event.Text) {
+ buffer = appendEscapedEventText(buffer, event.Text)
+ } else {
+ buffer = append(buffer, event.Text...)
+ }
+
+ if !event.Timestamp.IsZero() {
+ buffer = append(buffer, "|d:"...)
+ buffer = strconv.AppendInt(buffer, int64(event.Timestamp.Unix()), 10)
+ }
+
+ if len(event.Hostname) != 0 {
+ buffer = append(buffer, "|h:"...)
+ buffer = append(buffer, event.Hostname...)
+ }
+
+ if len(event.AggregationKey) != 0 {
+ buffer = append(buffer, "|k:"...)
+ buffer = append(buffer, event.AggregationKey...)
+ }
+
+ if len(event.Priority) != 0 {
+ buffer = append(buffer, "|p:"...)
+ buffer = append(buffer, event.Priority...)
+ }
+
+ if len(event.SourceTypeName) != 0 {
+ buffer = append(buffer, "|s:"...)
+ buffer = append(buffer, event.SourceTypeName...)
+ }
+
+ if len(event.AlertType) != 0 {
+ buffer = append(buffer, "|t:"...)
+ buffer = append(buffer, string(event.AlertType)...)
+ }
+
+ buffer = appendTags(buffer, globalTags, event.Tags)
+ buffer = appendContainerID(buffer)
+ return buffer
+}
+
+func appendEscapedServiceCheckText(buffer []byte, text string) []byte {
+ for i := 0; i < len(text); i++ {
+ if text[i] == '\n' {
+ buffer = append(buffer, "\\n"...)
+ } else if text[i] == 'm' && i+1 < len(text) && text[i+1] == ':' {
+ buffer = append(buffer, "m\\:"...)
+ i++
+ } else {
+ buffer = append(buffer, text[i])
+ }
+ }
+ return buffer
+}
+
+func appendServiceCheck(buffer []byte, serviceCheck *ServiceCheck, globalTags []string) []byte {
+ buffer = append(buffer, "_sc|"...)
+ buffer = append(buffer, serviceCheck.Name...)
+ buffer = append(buffer, '|')
+ buffer = strconv.AppendInt(buffer, int64(serviceCheck.Status), 10)
+
+ if !serviceCheck.Timestamp.IsZero() {
+ buffer = append(buffer, "|d:"...)
+ buffer = strconv.AppendInt(buffer, int64(serviceCheck.Timestamp.Unix()), 10)
+ }
+
+ if len(serviceCheck.Hostname) != 0 {
+ buffer = append(buffer, "|h:"...)
+ buffer = append(buffer, serviceCheck.Hostname...)
+ }
+
+ buffer = appendTags(buffer, globalTags, serviceCheck.Tags)
+
+ if len(serviceCheck.Message) != 0 {
+ buffer = append(buffer, "|m:"...)
+ buffer = appendEscapedServiceCheckText(buffer, serviceCheck.Message)
+ }
+
+ buffer = appendContainerID(buffer)
+ return buffer
+}
+
+func appendSeparator(buffer []byte) []byte {
+ return append(buffer, '\n')
+}
+
+func appendContainerID(buffer []byte) []byte {
+ if containerID := getContainerID(); len(containerID) > 0 {
+ buffer = append(buffer, "|c:"...)
+ buffer = append(buffer, containerID...)
+ }
+ return buffer
+}
diff --git a/statsd/format_benchmark_test.go b/statsd/format_benchmark_test.go
new file mode 100644
index 0000000..62dbbba
--- /dev/null
+++ b/statsd/format_benchmark_test.go
@@ -0,0 +1,54 @@
+package statsd
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+var payloadSink []byte
+
+func benchmarkFormat(b *testing.B, tagsNumber int) {
+ payloadSink = make([]byte, 0, 1024*8)
+ var tags []string
+ for i := 0; i < tagsNumber; i++ {
+ tags = append(tags, fmt.Sprintf("tag%d:tag%d\n", i, i))
+ }
+ event := &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC),
+ Hostname: "hostname",
+ AggregationKey: "aggregationKey",
+ Priority: "priority",
+ SourceTypeName: "SourceTypeName",
+ AlertType: "alertType",
+ Tags: tags,
+ }
+ serviceCheck := &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC),
+ Hostname: "hostname",
+ Message: "message",
+ Tags: []string{"tag1:tag1"},
+ }
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ payloadSink = appendGauge(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1)
+ payloadSink = appendCount(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1)
+ payloadSink = appendHistogram(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1)
+ payloadSink = appendDistribution(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1)
+ payloadSink = appendSet(payloadSink[:0], "namespace", []string{}, "metric", "setelement", tags, 0.1)
+ payloadSink = appendTiming(payloadSink[:0], "namespace", []string{}, "metric", 1, tags, 0.1)
+ payloadSink = appendEvent(payloadSink[:0], event, []string{})
+ payloadSink = appendServiceCheck(payloadSink[:0], serviceCheck, []string{})
+ }
+}
+
+func BenchmarkFormat0(b *testing.B) { benchmarkFormat(b, 0) }
+func BenchmarkFormat1(b *testing.B) { benchmarkFormat(b, 1) }
+func BenchmarkFormat5(b *testing.B) { benchmarkFormat(b, 5) }
+func BenchmarkFormat10(b *testing.B) { benchmarkFormat(b, 10) }
+func BenchmarkFormat50(b *testing.B) { benchmarkFormat(b, 50) }
+func BenchmarkFormat100(b *testing.B) { benchmarkFormat(b, 100) }
diff --git a/statsd/format_test.go b/statsd/format_test.go
new file mode 100644
index 0000000..a65d7b5
--- /dev/null
+++ b/statsd/format_test.go
@@ -0,0 +1,335 @@
+package statsd
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestFormatAppendTags(t *testing.T) {
+ var buffer []byte
+ buffer = appendTags(buffer, []string{"global:tag"}, []string{"tag:tag", "tag2:tag2"})
+ assert.Equal(t, `|#global:tag,tag:tag,tag2:tag2`, string(buffer))
+
+ var buffer2 []byte
+ buffer2 = appendTags(buffer2, []string{"global:tag"}, nil)
+ assert.Equal(t, `|#global:tag`, string(buffer2))
+
+ var buffer3 []byte
+ buffer3 = appendTags(buffer3, nil, []string{"tag:tag", "tag2:tag2"})
+ assert.Equal(t, `|#tag:tag,tag2:tag2`, string(buffer3))
+
+ var buffer4 []byte
+ buffer4 = appendTags(buffer4, nil, nil)
+ assert.Equal(t, "", string(buffer4))
+}
+
+func TestFormatAppendTagsAggregated(t *testing.T) {
+ var buffer []byte
+ buffer = appendTagsAggregated(buffer, []string{"global:tag"}, "tag:tag,tag2:tag2")
+ assert.Equal(t, `|#global:tag,tag:tag,tag2:tag2`, string(buffer))
+
+ var buffer2 []byte
+ buffer2 = appendTagsAggregated(buffer2, []string{"global:tag"}, "")
+ assert.Equal(t, `|#global:tag`, string(buffer2))
+
+ var buffer3 []byte
+ buffer3 = appendTagsAggregated(buffer3, nil, "tag:tag,tag2:tag2")
+ assert.Equal(t, `|#tag:tag,tag2:tag2`, string(buffer3))
+
+ var buffer4 []byte
+ buffer4 = appendTagsAggregated(buffer4, nil, "")
+ assert.Equal(t, "", string(buffer4))
+}
+
+func TestFormatAppendGauge(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "namespace.", []string{"global:tag"}, "gauge", 1., []string{"tag:tag"}, 1)
+ assert.Equal(t, `namespace.gauge:1|g|#global:tag,tag:tag`, string(buffer))
+}
+
+func TestFormatAppendCount(t *testing.T) {
+ var buffer []byte
+ buffer = appendCount(buffer, "namespace.", []string{"global:tag"}, "count", 2, []string{"tag:tag"}, 1)
+ assert.Equal(t, `namespace.count:2|c|#global:tag,tag:tag`, string(buffer))
+}
+
+func TestFormatAppendHistogram(t *testing.T) {
+ var buffer []byte
+ buffer = appendHistogram(buffer, "namespace.", []string{"global:tag"}, "histogram", 3., []string{"tag:tag"}, 1)
+ assert.Equal(t, `namespace.histogram:3|h|#global:tag,tag:tag`, string(buffer))
+}
+
+func TestFormatAppendDistribution(t *testing.T) {
+ var buffer []byte
+ buffer = appendDistribution(buffer, "namespace.", []string{"global:tag"}, "distribution", 4., []string{"tag:tag"}, 1)
+ assert.Equal(t, `namespace.distribution:4|d|#global:tag,tag:tag`, string(buffer))
+}
+
+func TestFormatAppendSet(t *testing.T) {
+ var buffer []byte
+ buffer = appendSet(buffer, "namespace.", []string{"global:tag"}, "set", "five", []string{"tag:tag"}, 1)
+ assert.Equal(t, `namespace.set:five|s|#global:tag,tag:tag`, string(buffer))
+}
+
+func TestFormatAppendTiming(t *testing.T) {
+ var buffer []byte
+ buffer = appendTiming(buffer, "namespace.", []string{"global:tag"}, "timing", 6., []string{"tag:tag"}, 1)
+ assert.Equal(t, `namespace.timing:6.000000|ms|#global:tag,tag:tag`, string(buffer))
+}
+
+func TestFormatNoTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", []string{}, "gauge", 1., []string{}, 1)
+ assert.Equal(t, `gauge:1|g`, string(buffer))
+}
+
+func TestFormatOneTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", []string{}, "gauge", 1., []string{"tag1:tag1"}, 1)
+ assert.Equal(t, `gauge:1|g|#tag1:tag1`, string(buffer))
+}
+
+func TestFormatTwoTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", []string{}, "metric", 1., []string{"tag1:tag1", "tag2:tag2"}, 1)
+ assert.Equal(t, `metric:1|g|#tag1:tag1,tag2:tag2`, string(buffer))
+}
+
+func TestFormatRate(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", []string{}, "metric", 1., []string{}, 0.1)
+ assert.Equal(t, `metric:1|g|@0.1`, string(buffer))
+}
+
+func TestFormatRateAndTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", []string{}, "metric", 1., []string{"tag1:tag1"}, 0.1)
+ assert.Equal(t, `metric:1|g|@0.1|#tag1:tag1`, string(buffer))
+}
+
+func TestFormatNil(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", nil, "metric", 1., nil, 1)
+ assert.Equal(t, `metric:1|g`, string(buffer))
+}
+
+func TestFormatTagRemoveNewLines(t *testing.T) {
+ var buffer []byte
+ buffer = appendGauge(buffer, "", []string{"tag\n:d\nog\n"}, "metric", 1., []string{"\ntag\n:d\nog2\n"}, 0.1)
+ assert.Equal(t, `metric:1|g|@0.1|#tag:dog,tag:dog2`, string(buffer))
+}
+
+func TestFormatEvent(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText`, string(buffer))
+}
+
+func TestFormatEventEscapeText(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "\nEventText\nLine2\n\nLine4\n",
+ }, []string{})
+ assert.Equal(t, `_e{9,29}:EvenTitle|\nEventText\nLine2\n\nLine4\n`, string(buffer))
+}
+
+func TestFormatEventTimeStamp(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC),
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|d:1471219200`, string(buffer))
+}
+
+func TestFormatEventHostname(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ Hostname: "hostname",
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|h:hostname`, string(buffer))
+}
+
+func TestFormatEventAggregationKey(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ AggregationKey: "aggregationKey",
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|k:aggregationKey`, string(buffer))
+}
+
+func TestFormatEventPriority(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ Priority: "priority",
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|p:priority`, string(buffer))
+}
+
+func TestFormatEventSourceTypeName(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ SourceTypeName: "sourceTypeName",
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|s:sourceTypeName`, string(buffer))
+}
+
+func TestFormatEventAlertType(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ AlertType: "alertType",
+ }, []string{})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|t:alertType`, string(buffer))
+}
+
+func TestFormatEventOneTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ }, []string{"tag:test"})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|#tag:test`, string(buffer))
+}
+
+func TestFormatEventTwoTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ Tags: []string{"tag1:test"},
+ }, []string{"tag2:test"})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|#tag2:test,tag1:test`, string(buffer))
+}
+
+func TestFormatEventAllOptions(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{
+ Title: "EvenTitle",
+ Text: "EventText",
+ Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC),
+ Hostname: "hostname",
+ AggregationKey: "aggregationKey",
+ Priority: "priority",
+ SourceTypeName: "SourceTypeName",
+ AlertType: "alertType",
+ Tags: []string{"tag:normal"},
+ }, []string{"tag:global"})
+ assert.Equal(t, `_e{9,9}:EvenTitle|EventText|d:1471219200|h:hostname|k:aggregationKey|p:priority|s:SourceTypeName|t:alertType|#tag:global,tag:normal`, string(buffer))
+}
+
+func TestFormatEventNil(t *testing.T) {
+ var buffer []byte
+ buffer = appendEvent(buffer, &Event{}, []string{})
+ assert.Equal(t, `_e{0,0}:|`, string(buffer))
+}
+
+func TestFormatServiceCheck(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ }, []string{})
+ assert.Equal(t, `_sc|service.check|0`, string(buffer))
+}
+
+func TestFormatServiceCheckEscape(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Message: "\n\nmessagem:hello...\n\nm:aa\nm:m",
+ }, []string{})
+ assert.Equal(t, `_sc|service.check|0|m:\n\nmessagem\:hello...\n\nm\:aa\nm\:m`, string(buffer))
+}
+
+func TestFormatServiceCheckTimestamp(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC),
+ }, []string{})
+ assert.Equal(t, `_sc|service.check|0|d:1471219200`, string(buffer))
+}
+
+func TestFormatServiceCheckHostname(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Hostname: "hostname",
+ }, []string{})
+ assert.Equal(t, `_sc|service.check|0|h:hostname`, string(buffer))
+}
+
+func TestFormatServiceCheckMessage(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Message: "message",
+ }, []string{})
+ assert.Equal(t, `_sc|service.check|0|m:message`, string(buffer))
+}
+
+func TestFormatServiceCheckOneTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Tags: []string{"tag:tag"},
+ }, []string{})
+ assert.Equal(t, `_sc|service.check|0|#tag:tag`, string(buffer))
+}
+
+func TestFormatServiceCheckTwoTag(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Tags: []string{"tag1:tag1"},
+ }, []string{"tag2:tag2"})
+ assert.Equal(t, `_sc|service.check|0|#tag2:tag2,tag1:tag1`, string(buffer))
+}
+
+func TestFormatServiceCheckAllOptions(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{
+ Name: "service.check",
+ Status: Ok,
+ Timestamp: time.Date(2016, time.August, 15, 0, 0, 0, 0, time.UTC),
+ Hostname: "hostname",
+ Message: "message",
+ Tags: []string{"tag1:tag1"},
+ }, []string{"tag2:tag2"})
+ assert.Equal(t, `_sc|service.check|0|d:1471219200|h:hostname|#tag2:tag2,tag1:tag1|m:message`, string(buffer))
+}
+
+func TestFormatServiceCheckNil(t *testing.T) {
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, &ServiceCheck{}, nil)
+ assert.Equal(t, `_sc||0`, string(buffer))
+}
+
+func TestFormatSeparator(t *testing.T) {
+ var buffer []byte
+ buffer = appendSeparator(buffer)
+ assert.Equal(t, "\n", string(buffer))
+}
diff --git a/statsd/metrics.go b/statsd/metrics.go
new file mode 100644
index 0000000..82f11ac
--- /dev/null
+++ b/statsd/metrics.go
@@ -0,0 +1,181 @@
+package statsd
+
+import (
+ "math"
+ "sync"
+ "sync/atomic"
+)
+
+/*
+Those are metrics type that can be aggregated on the client side:
+ - Gauge
+ - Count
+ - Set
+*/
+
+type countMetric struct {
+ value int64
+ name string
+ tags []string
+}
+
+func newCountMetric(name string, value int64, tags []string) *countMetric {
+ return &countMetric{
+ value: value,
+ name: name,
+ tags: copySlice(tags),
+ }
+}
+
+func (c *countMetric) sample(v int64) {
+ atomic.AddInt64(&c.value, v)
+}
+
+func (c *countMetric) flushUnsafe() metric {
+ return metric{
+ metricType: count,
+ name: c.name,
+ tags: c.tags,
+ rate: 1,
+ ivalue: c.value,
+ }
+}
+
+// Gauge
+
+type gaugeMetric struct {
+ value uint64
+ name string
+ tags []string
+}
+
+func newGaugeMetric(name string, value float64, tags []string) *gaugeMetric {
+ return &gaugeMetric{
+ value: math.Float64bits(value),
+ name: name,
+ tags: copySlice(tags),
+ }
+}
+
+func (g *gaugeMetric) sample(v float64) {
+ atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+func (g *gaugeMetric) flushUnsafe() metric {
+ return metric{
+ metricType: gauge,
+ name: g.name,
+ tags: g.tags,
+ rate: 1,
+ fvalue: math.Float64frombits(g.value),
+ }
+}
+
+// Set
+
+type setMetric struct {
+ data map[string]struct{}
+ name string
+ tags []string
+ sync.Mutex
+}
+
+func newSetMetric(name string, value string, tags []string) *setMetric {
+ set := &setMetric{
+ data: map[string]struct{}{},
+ name: name,
+ tags: copySlice(tags),
+ }
+ set.data[value] = struct{}{}
+ return set
+}
+
+func (s *setMetric) sample(v string) {
+ s.Lock()
+ defer s.Unlock()
+ s.data[v] = struct{}{}
+}
+
+// Sets are aggregated on the agent side too. We flush the keys so a set from
+// multiple application can be correctly aggregated on the agent side.
+func (s *setMetric) flushUnsafe() []metric {
+ if len(s.data) == 0 {
+ return nil
+ }
+
+ metrics := make([]metric, len(s.data))
+ i := 0
+ for value := range s.data {
+ metrics[i] = metric{
+ metricType: set,
+ name: s.name,
+ tags: s.tags,
+ rate: 1,
+ svalue: value,
+ }
+ i++
+ }
+ return metrics
+}
+
+// Histograms, Distributions and Timings
+
+type bufferedMetric struct {
+ sync.Mutex
+
+ data []float64
+ name string
+ // Histograms and Distributions store tags as one string since we need
+ // to compute its size multiple time when serializing.
+ tags string
+ mtype metricType
+}
+
+func (s *bufferedMetric) sample(v float64) {
+ s.Lock()
+ defer s.Unlock()
+ s.data = append(s.data, v)
+}
+
+func (s *bufferedMetric) flushUnsafe() metric {
+ return metric{
+ metricType: s.mtype,
+ name: s.name,
+ stags: s.tags,
+ rate: 1,
+ fvalues: s.data,
+ }
+}
+
+type histogramMetric = bufferedMetric
+
+func newHistogramMetric(name string, value float64, stringTags string) *histogramMetric {
+ return &histogramMetric{
+ data: []float64{value},
+ name: name,
+ tags: stringTags,
+ mtype: histogramAggregated,
+ }
+}
+
+type distributionMetric = bufferedMetric
+
+func newDistributionMetric(name string, value float64, stringTags string) *distributionMetric {
+ return &distributionMetric{
+ data: []float64{value},
+ name: name,
+ tags: stringTags,
+ mtype: distributionAggregated,
+ }
+}
+
+type timingMetric = bufferedMetric
+
+func newTimingMetric(name string, value float64, stringTags string) *timingMetric {
+ return &timingMetric{
+ data: []float64{value},
+ name: name,
+ tags: stringTags,
+ mtype: timingAggregated,
+ }
+}
diff --git a/statsd/metrics_test.go b/statsd/metrics_test.go
new file mode 100644
index 0000000..41bb743
--- /dev/null
+++ b/statsd/metrics_test.go
@@ -0,0 +1,232 @@
+package statsd
+
+import (
+ "math"
+ "sort"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewCountMetric(t *testing.T) {
+ c := newCountMetric("test", 21, []string{"tag1", "tag2"})
+ assert.Equal(t, c.value, int64(21))
+ assert.Equal(t, c.name, "test")
+ assert.Equal(t, c.tags, []string{"tag1", "tag2"})
+}
+
+func TestCountMetricSample(t *testing.T) {
+ c := newCountMetric("test", 21, []string{"tag1", "tag2"})
+ c.sample(12)
+ assert.Equal(t, c.value, int64(33))
+ assert.Equal(t, c.name, "test")
+ assert.Equal(t, c.tags, []string{"tag1", "tag2"})
+}
+
+func TestFlushUnsafeCountMetricSample(t *testing.T) {
+ c := newCountMetric("test", 21, []string{"tag1", "tag2"})
+ m := c.flushUnsafe()
+ assert.Equal(t, m.metricType, count)
+ assert.Equal(t, m.ivalue, int64(21))
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.tags, []string{"tag1", "tag2"})
+
+ c.sample(12)
+ m = c.flushUnsafe()
+ assert.Equal(t, m.metricType, count)
+ assert.Equal(t, m.ivalue, int64(33))
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.tags, []string{"tag1", "tag2"})
+}
+
+func TestNewGaugeMetric(t *testing.T) {
+ g := newGaugeMetric("test", 21, []string{"tag1", "tag2"})
+ assert.Equal(t, math.Float64frombits(g.value), float64(21))
+ assert.Equal(t, g.name, "test")
+ assert.Equal(t, g.tags, []string{"tag1", "tag2"})
+}
+
+func TestGaugeMetricSample(t *testing.T) {
+ g := newGaugeMetric("test", 21, []string{"tag1", "tag2"})
+ g.sample(12)
+ assert.Equal(t, math.Float64frombits(g.value), float64(12))
+ assert.Equal(t, g.name, "test")
+ assert.Equal(t, g.tags, []string{"tag1", "tag2"})
+}
+
+func TestFlushUnsafeGaugeMetricSample(t *testing.T) {
+ g := newGaugeMetric("test", 21, []string{"tag1", "tag2"})
+ m := g.flushUnsafe()
+ assert.Equal(t, m.metricType, gauge)
+ assert.Equal(t, m.fvalue, float64(21))
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.tags, []string{"tag1", "tag2"})
+
+ g.sample(12)
+ m = g.flushUnsafe()
+ assert.Equal(t, m.metricType, gauge)
+ assert.Equal(t, m.fvalue, float64(12))
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.tags, []string{"tag1", "tag2"})
+}
+
+func TestNewSetMetric(t *testing.T) {
+ s := newSetMetric("test", "value1", []string{"tag1", "tag2"})
+ assert.Equal(t, s.data, map[string]struct{}{"value1": struct{}{}})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, []string{"tag1", "tag2"})
+}
+
+func TestSetMetricSample(t *testing.T) {
+ s := newSetMetric("test", "value1", []string{"tag1", "tag2"})
+ s.sample("value2")
+ assert.Equal(t, s.data, map[string]struct{}{"value1": struct{}{}, "value2": struct{}{}})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, []string{"tag1", "tag2"})
+}
+
+func TestFlushUnsafeSetMetricSample(t *testing.T) {
+ s := newSetMetric("test", "value1", []string{"tag1", "tag2"})
+ m := s.flushUnsafe()
+
+ require.Len(t, m, 1)
+
+ assert.Equal(t, m[0].metricType, set)
+ assert.Equal(t, m[0].svalue, "value1")
+ assert.Equal(t, m[0].name, "test")
+ assert.Equal(t, m[0].tags, []string{"tag1", "tag2"})
+
+ s.sample("value1")
+ s.sample("value2")
+ m = s.flushUnsafe()
+
+ sort.Slice(m, func(i, j int) bool {
+ return strings.Compare(m[i].svalue, m[j].svalue) != 1
+ })
+
+ require.Len(t, m, 2)
+ assert.Equal(t, m[0].metricType, set)
+ assert.Equal(t, m[0].svalue, "value1")
+ assert.Equal(t, m[0].name, "test")
+ assert.Equal(t, m[0].tags, []string{"tag1", "tag2"})
+ assert.Equal(t, m[1].metricType, set)
+ assert.Equal(t, m[1].svalue, "value2")
+ assert.Equal(t, m[1].name, "test")
+ assert.Equal(t, m[1].tags, []string{"tag1", "tag2"})
+}
+
+func TestNewHistogramMetric(t *testing.T) {
+ s := newHistogramMetric("test", 1.0, "tag1,tag2")
+ assert.Equal(t, s.data, []float64{1.0})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, "tag1,tag2")
+ assert.Equal(t, s.mtype, histogramAggregated)
+}
+
+func TestHistogramMetricSample(t *testing.T) {
+ s := newHistogramMetric("test", 1.0, "tag1,tag2")
+ s.sample(123.45)
+ assert.Equal(t, s.data, []float64{1.0, 123.45})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, "tag1,tag2")
+ assert.Equal(t, s.mtype, histogramAggregated)
+}
+
+func TestFlushUnsafeHistogramMetricSample(t *testing.T) {
+ s := newHistogramMetric("test", 1.0, "tag1,tag2")
+ m := s.flushUnsafe()
+
+ assert.Equal(t, m.metricType, histogramAggregated)
+ assert.Equal(t, m.fvalues, []float64{1.0})
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.stags, "tag1,tag2")
+ assert.Nil(t, m.tags)
+
+ s.sample(21)
+ s.sample(123.45)
+ m = s.flushUnsafe()
+
+ assert.Equal(t, m.metricType, histogramAggregated)
+ assert.Equal(t, m.fvalues, []float64{1.0, 21.0, 123.45})
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.stags, "tag1,tag2")
+ assert.Nil(t, m.tags)
+}
+
+func TestNewDistributionMetric(t *testing.T) {
+ s := newDistributionMetric("test", 1.0, "tag1,tag2")
+ assert.Equal(t, s.data, []float64{1.0})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, "tag1,tag2")
+ assert.Equal(t, s.mtype, distributionAggregated)
+}
+
+func TestDistributionMetricSample(t *testing.T) {
+ s := newDistributionMetric("test", 1.0, "tag1,tag2")
+ s.sample(123.45)
+ assert.Equal(t, s.data, []float64{1.0, 123.45})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, "tag1,tag2")
+ assert.Equal(t, s.mtype, distributionAggregated)
+}
+
+func TestFlushUnsafeDistributionMetricSample(t *testing.T) {
+ s := newDistributionMetric("test", 1.0, "tag1,tag2")
+ m := s.flushUnsafe()
+
+ assert.Equal(t, m.metricType, distributionAggregated)
+ assert.Equal(t, m.fvalues, []float64{1.0})
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.stags, "tag1,tag2")
+ assert.Nil(t, m.tags)
+
+ s.sample(21)
+ s.sample(123.45)
+ m = s.flushUnsafe()
+
+ assert.Equal(t, m.metricType, distributionAggregated)
+ assert.Equal(t, m.fvalues, []float64{1.0, 21.0, 123.45})
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.stags, "tag1,tag2")
+ assert.Nil(t, m.tags)
+}
+
+func TestNewTimingMetric(t *testing.T) {
+ s := newTimingMetric("test", 1.0, "tag1,tag2")
+ assert.Equal(t, s.data, []float64{1.0})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, "tag1,tag2")
+ assert.Equal(t, s.mtype, timingAggregated)
+}
+
+func TestTimingMetricSample(t *testing.T) {
+ s := newTimingMetric("test", 1.0, "tag1,tag2")
+ s.sample(123.45)
+ assert.Equal(t, s.data, []float64{1.0, 123.45})
+ assert.Equal(t, s.name, "test")
+ assert.Equal(t, s.tags, "tag1,tag2")
+ assert.Equal(t, s.mtype, timingAggregated)
+}
+
+func TestFlushUnsafeTimingMetricSample(t *testing.T) {
+ s := newTimingMetric("test", 1.0, "tag1,tag2")
+ m := s.flushUnsafe()
+
+ assert.Equal(t, m.metricType, timingAggregated)
+ assert.Equal(t, m.fvalues, []float64{1.0})
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.stags, "tag1,tag2")
+ assert.Nil(t, m.tags)
+
+ s.sample(21)
+ s.sample(123.45)
+ m = s.flushUnsafe()
+
+ assert.Equal(t, m.metricType, timingAggregated)
+ assert.Equal(t, m.fvalues, []float64{1.0, 21.0, 123.45})
+ assert.Equal(t, m.name, "test")
+ assert.Equal(t, m.stags, "tag1,tag2")
+ assert.Nil(t, m.tags)
+}
diff --git a/statsd/mocks/statsd.go b/statsd/mocks/statsd.go
new file mode 100644
index 0000000..80135c7
--- /dev/null
+++ b/statsd/mocks/statsd.go
@@ -0,0 +1,274 @@
+// Code generated by MockGen. DO NOT EDIT.
+// Source: statsd.go
+
+// Package mock_statsd is a generated GoMock package.
+package mock_statsd
+
+import (
+ reflect "reflect"
+ time "time"
+
+ statsd "github.com/DataDog/datadog-go/v5/statsd"
+ gomock "github.com/golang/mock/gomock"
+)
+
+// MockClientInterface is a mock of ClientInterface interface.
+type MockClientInterface struct {
+ ctrl *gomock.Controller
+ recorder *MockClientInterfaceMockRecorder
+}
+
+// MockClientInterfaceMockRecorder is the mock recorder for MockClientInterface.
+type MockClientInterfaceMockRecorder struct {
+ mock *MockClientInterface
+}
+
+// NewMockClientInterface creates a new mock instance.
+func NewMockClientInterface(ctrl *gomock.Controller) *MockClientInterface {
+ mock := &MockClientInterface{ctrl: ctrl}
+ mock.recorder = &MockClientInterfaceMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use.
+func (m *MockClientInterface) EXPECT() *MockClientInterfaceMockRecorder {
+ return m.recorder
+}
+
+// Close mocks base method.
+func (m *MockClientInterface) Close() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Close")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Close indicates an expected call of Close.
+func (mr *MockClientInterfaceMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockClientInterface)(nil).Close))
+}
+
+// Count mocks base method.
+func (m *MockClientInterface) Count(name string, value int64, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Count", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Count indicates an expected call of Count.
+func (mr *MockClientInterfaceMockRecorder) Count(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Count", reflect.TypeOf((*MockClientInterface)(nil).Count), name, value, tags, rate)
+}
+
+// Decr mocks base method.
+func (m *MockClientInterface) Decr(name string, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Decr", name, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Decr indicates an expected call of Decr.
+func (mr *MockClientInterfaceMockRecorder) Decr(name, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Decr", reflect.TypeOf((*MockClientInterface)(nil).Decr), name, tags, rate)
+}
+
+// Distribution mocks base method.
+func (m *MockClientInterface) Distribution(name string, value float64, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Distribution", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Distribution indicates an expected call of Distribution.
+func (mr *MockClientInterfaceMockRecorder) Distribution(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Distribution", reflect.TypeOf((*MockClientInterface)(nil).Distribution), name, value, tags, rate)
+}
+
+// Event mocks base method.
+func (m *MockClientInterface) Event(e *statsd.Event) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Event", e)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Event indicates an expected call of Event.
+func (mr *MockClientInterfaceMockRecorder) Event(e interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Event", reflect.TypeOf((*MockClientInterface)(nil).Event), e)
+}
+
+// Flush mocks base method.
+func (m *MockClientInterface) Flush() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Flush")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Flush indicates an expected call of Flush.
+func (mr *MockClientInterfaceMockRecorder) Flush() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Flush", reflect.TypeOf((*MockClientInterface)(nil).Flush))
+}
+
+// Gauge mocks base method.
+func (m *MockClientInterface) Gauge(name string, value float64, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Gauge", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Gauge indicates an expected call of Gauge.
+func (mr *MockClientInterfaceMockRecorder) Gauge(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gauge", reflect.TypeOf((*MockClientInterface)(nil).Gauge), name, value, tags, rate)
+}
+
+// GetTelemetry mocks base method.
+func (m *MockClientInterface) GetTelemetry() statsd.Telemetry {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetTelemetry")
+ ret0, _ := ret[0].(statsd.Telemetry)
+ return ret0
+}
+
+// GetTelemetry indicates an expected call of GetTelemetry.
+func (mr *MockClientInterfaceMockRecorder) GetTelemetry() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTelemetry", reflect.TypeOf((*MockClientInterface)(nil).GetTelemetry))
+}
+
+// Histogram mocks base method.
+func (m *MockClientInterface) Histogram(name string, value float64, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Histogram", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Histogram indicates an expected call of Histogram.
+func (mr *MockClientInterfaceMockRecorder) Histogram(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Histogram", reflect.TypeOf((*MockClientInterface)(nil).Histogram), name, value, tags, rate)
+}
+
+// Incr mocks base method.
+func (m *MockClientInterface) Incr(name string, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Incr", name, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Incr indicates an expected call of Incr.
+func (mr *MockClientInterfaceMockRecorder) Incr(name, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Incr", reflect.TypeOf((*MockClientInterface)(nil).Incr), name, tags, rate)
+}
+
+// IsClosed mocks base method.
+func (m *MockClientInterface) IsClosed() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "IsClosed")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// IsClosed indicates an expected call of IsClosed.
+func (mr *MockClientInterfaceMockRecorder) IsClosed() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsClosed", reflect.TypeOf((*MockClientInterface)(nil).IsClosed))
+}
+
+// ServiceCheck mocks base method.
+func (m *MockClientInterface) ServiceCheck(sc *statsd.ServiceCheck) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ServiceCheck", sc)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// ServiceCheck indicates an expected call of ServiceCheck.
+func (mr *MockClientInterfaceMockRecorder) ServiceCheck(sc interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServiceCheck", reflect.TypeOf((*MockClientInterface)(nil).ServiceCheck), sc)
+}
+
+// Set mocks base method.
+func (m *MockClientInterface) Set(name, value string, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Set", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Set indicates an expected call of Set.
+func (mr *MockClientInterfaceMockRecorder) Set(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockClientInterface)(nil).Set), name, value, tags, rate)
+}
+
+// SimpleEvent mocks base method.
+func (m *MockClientInterface) SimpleEvent(title, text string) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimpleEvent", title, text)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SimpleEvent indicates an expected call of SimpleEvent.
+func (mr *MockClientInterfaceMockRecorder) SimpleEvent(title, text interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimpleEvent", reflect.TypeOf((*MockClientInterface)(nil).SimpleEvent), title, text)
+}
+
+// SimpleServiceCheck mocks base method.
+func (m *MockClientInterface) SimpleServiceCheck(name string, status statsd.ServiceCheckStatus) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "SimpleServiceCheck", name, status)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// SimpleServiceCheck indicates an expected call of SimpleServiceCheck.
+func (mr *MockClientInterfaceMockRecorder) SimpleServiceCheck(name, status interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SimpleServiceCheck", reflect.TypeOf((*MockClientInterface)(nil).SimpleServiceCheck), name, status)
+}
+
+// TimeInMilliseconds mocks base method.
+func (m *MockClientInterface) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "TimeInMilliseconds", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// TimeInMilliseconds indicates an expected call of TimeInMilliseconds.
+func (mr *MockClientInterfaceMockRecorder) TimeInMilliseconds(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeInMilliseconds", reflect.TypeOf((*MockClientInterface)(nil).TimeInMilliseconds), name, value, tags, rate)
+}
+
+// Timing mocks base method.
+func (m *MockClientInterface) Timing(name string, value time.Duration, tags []string, rate float64) error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Timing", name, value, tags, rate)
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Timing indicates an expected call of Timing.
+func (mr *MockClientInterfaceMockRecorder) Timing(name, value, tags, rate interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timing", reflect.TypeOf((*MockClientInterface)(nil).Timing), name, value, tags, rate)
+}
diff --git a/statsd/noop.go b/statsd/noop.go
new file mode 100644
index 0000000..5c09398
--- /dev/null
+++ b/statsd/noop.go
@@ -0,0 +1,96 @@
+package statsd
+
+import "time"
+
+// NoOpClient is a statsd client that does nothing. Can be useful in testing
+// situations for library users.
+type NoOpClient struct{}
+
+// Gauge does nothing and returns nil
+func (n *NoOpClient) Gauge(name string, value float64, tags []string, rate float64) error {
+ return nil
+}
+
+// Count does nothing and returns nil
+func (n *NoOpClient) Count(name string, value int64, tags []string, rate float64) error {
+ return nil
+}
+
+// Histogram does nothing and returns nil
+func (n *NoOpClient) Histogram(name string, value float64, tags []string, rate float64) error {
+ return nil
+}
+
+// Distribution does nothing and returns nil
+func (n *NoOpClient) Distribution(name string, value float64, tags []string, rate float64) error {
+ return nil
+}
+
+// Decr does nothing and returns nil
+func (n *NoOpClient) Decr(name string, tags []string, rate float64) error {
+ return nil
+}
+
+// Incr does nothing and returns nil
+func (n *NoOpClient) Incr(name string, tags []string, rate float64) error {
+ return nil
+}
+
+// Set does nothing and returns nil
+func (n *NoOpClient) Set(name string, value string, tags []string, rate float64) error {
+ return nil
+}
+
+// Timing does nothing and returns nil
+func (n *NoOpClient) Timing(name string, value time.Duration, tags []string, rate float64) error {
+ return nil
+}
+
+// TimeInMilliseconds does nothing and returns nil
+func (n *NoOpClient) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
+ return nil
+}
+
+// Event does nothing and returns nil
+func (n *NoOpClient) Event(e *Event) error {
+ return nil
+}
+
+// SimpleEvent does nothing and returns nil
+func (n *NoOpClient) SimpleEvent(title, text string) error {
+ return nil
+}
+
+// ServiceCheck does nothing and returns nil
+func (n *NoOpClient) ServiceCheck(sc *ServiceCheck) error {
+ return nil
+}
+
+// SimpleServiceCheck does nothing and returns nil
+func (n *NoOpClient) SimpleServiceCheck(name string, status ServiceCheckStatus) error {
+ return nil
+}
+
+// Close does nothing and returns nil
+func (n *NoOpClient) Close() error {
+ return nil
+}
+
+// Flush does nothing and returns nil
+func (n *NoOpClient) Flush() error {
+ return nil
+}
+
+// IsClosed does nothing and return false
+func (n *NoOpClient) IsClosed() bool {
+ return false
+}
+
+// GetTelemetry does nothing and returns an empty Telemetry
+func (n *NoOpClient) GetTelemetry() Telemetry {
+ return Telemetry{}
+}
+
+// Verify that NoOpClient implements the ClientInterface.
+// https://golang.org/doc/faq#guarantee_satisfies_interface
+var _ ClientInterface = &NoOpClient{}
diff --git a/statsd/noop_test.go b/statsd/noop_test.go
new file mode 100644
index 0000000..2aaf013
--- /dev/null
+++ b/statsd/noop_test.go
@@ -0,0 +1,30 @@
+package statsd
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNoOpClient(t *testing.T) {
+ a := assert.New(t)
+ c := NoOpClient{}
+ tags := []string{"a:b"}
+
+ a.Nil(c.Gauge("asd", 123.4, tags, 56.0))
+ a.Nil(c.Count("asd", 1234, tags, 56.0))
+ a.Nil(c.Histogram("asd", 12.34, tags, 56.0))
+ a.Nil(c.Distribution("asd", 1.234, tags, 56.0))
+ a.Nil(c.Decr("asd", tags, 56.0))
+ a.Nil(c.Incr("asd", tags, 56.0))
+ a.Nil(c.Set("asd", "asd", tags, 56.0))
+ a.Nil(c.Timing("asd", time.Second, tags, 56.0))
+ a.Nil(c.TimeInMilliseconds("asd", 1234.5, tags, 56.0))
+ a.Nil(c.Event(nil))
+ a.Nil(c.SimpleEvent("asd", "zxc"))
+ a.Nil(c.ServiceCheck(nil))
+ a.Nil(c.SimpleServiceCheck("asd", Ok))
+ a.Nil(c.Close())
+ a.Nil(c.Flush())
+}
diff --git a/statsd/options.go b/statsd/options.go
new file mode 100644
index 0000000..0728a97
--- /dev/null
+++ b/statsd/options.go
@@ -0,0 +1,348 @@
+package statsd
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "time"
+)
+
+var (
+ defaultNamespace = ""
+ defaultTags = []string{}
+ defaultMaxBytesPerPayload = 0
+ defaultMaxMessagesPerPayload = math.MaxInt32
+ defaultBufferPoolSize = 0
+ defaultBufferFlushInterval = 100 * time.Millisecond
+ defaultWorkerCount = 32
+ defaultSenderQueueSize = 0
+ defaultWriteTimeout = 100 * time.Millisecond
+ defaultTelemetry = true
+ defaultReceivingMode = mutexMode
+ defaultChannelModeBufferSize = 4096
+ defaultAggregationFlushInterval = 2 * time.Second
+ defaultAggregation = true
+ defaultExtendedAggregation = false
+ defaultOriginDetection = true
+)
+
+// Options contains the configuration options for a client.
+type Options struct {
+ namespace string
+ tags []string
+ maxBytesPerPayload int
+ maxMessagesPerPayload int
+ bufferPoolSize int
+ bufferFlushInterval time.Duration
+ workersCount int
+ senderQueueSize int
+ writeTimeout time.Duration
+ telemetry bool
+ receiveMode receivingMode
+ channelModeBufferSize int
+ aggregationFlushInterval time.Duration
+ aggregation bool
+ extendedAggregation bool
+ telemetryAddr string
+ originDetection bool
+ containerID string
+}
+
+func resolveOptions(options []Option) (*Options, error) {
+ o := &Options{
+ namespace: defaultNamespace,
+ tags: defaultTags,
+ maxBytesPerPayload: defaultMaxBytesPerPayload,
+ maxMessagesPerPayload: defaultMaxMessagesPerPayload,
+ bufferPoolSize: defaultBufferPoolSize,
+ bufferFlushInterval: defaultBufferFlushInterval,
+ workersCount: defaultWorkerCount,
+ senderQueueSize: defaultSenderQueueSize,
+ writeTimeout: defaultWriteTimeout,
+ telemetry: defaultTelemetry,
+ receiveMode: defaultReceivingMode,
+ channelModeBufferSize: defaultChannelModeBufferSize,
+ aggregationFlushInterval: defaultAggregationFlushInterval,
+ aggregation: defaultAggregation,
+ extendedAggregation: defaultExtendedAggregation,
+ originDetection: defaultOriginDetection,
+ }
+
+ for _, option := range options {
+ err := option(o)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// Option is a client option. Can return an error if validation fails.
+type Option func(*Options) error
+
+// WithNamespace sets a string to be prepend to all metrics, events and service checks name.
+//
+// A '.' will automatically be added after the namespace if needed. For example a metrics 'test' with a namespace 'prod'
+// will produce a final metric named 'prod.test'.
+func WithNamespace(namespace string) Option {
+ return func(o *Options) error {
+ if strings.HasSuffix(namespace, ".") {
+ o.namespace = namespace
+ } else {
+ o.namespace = namespace + "."
+ }
+ return nil
+ }
+}
+
+// WithTags sets global tags to be applied to every metrics, events and service checks.
+func WithTags(tags []string) Option {
+ return func(o *Options) error {
+ o.tags = tags
+ return nil
+ }
+}
+
+// WithMaxMessagesPerPayload sets the maximum number of metrics, events and/or service checks that a single payload can
+// contain.
+//
+// The default is 'math.MaxInt32' which will most likely let the WithMaxBytesPerPayload option take precedence. This
+// option can be set to `1` to create an unbuffered client (each metrics/event/service check will be send in its own
+// payload to the agent).
+func WithMaxMessagesPerPayload(maxMessagesPerPayload int) Option {
+ return func(o *Options) error {
+ o.maxMessagesPerPayload = maxMessagesPerPayload
+ return nil
+ }
+}
+
+// WithMaxBytesPerPayload sets the maximum number of bytes a single payload can contain. Each sample, even and service
+// check must be lower than this value once serialized or an `MessageTooLongError` is returned.
+//
+// The default value 0 which will set the option to the optimal size for the transport protocol used: 1432 for UDP and
+// named pipe and 8192 for UDS. Those values offer the best performances.
+// Be careful when changing this option, see
+// https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#ensure-proper-packet-sizes.
+func WithMaxBytesPerPayload(MaxBytesPerPayload int) Option {
+ return func(o *Options) error {
+ o.maxBytesPerPayload = MaxBytesPerPayload
+ return nil
+ }
+}
+
+// WithBufferPoolSize sets the size of the pool of buffers used to serialized metrics, events and service_checks.
+//
+// The default, 0, will set the option to the optimal size for the transport protocol used: 2048 for UDP and named pipe
+// and 512 for UDS.
+func WithBufferPoolSize(bufferPoolSize int) Option {
+ return func(o *Options) error {
+ o.bufferPoolSize = bufferPoolSize
+ return nil
+ }
+}
+
+// WithBufferFlushInterval sets the interval after which the current buffer is flushed.
+//
+// A buffers are used to serialized data, they're flushed either when full (see WithMaxBytesPerPayload) or when it's
+// been open for longer than this interval.
+//
+// With apps sending a high number of metrics/events/service_checks the interval rarely timeout. But with slow sending
+// apps increasing this value will reduce the number of payload sent on the wire as more data is serialized in the same
+// payload.
+//
+// Default is 100ms
+func WithBufferFlushInterval(bufferFlushInterval time.Duration) Option {
+ return func(o *Options) error {
+ o.bufferFlushInterval = bufferFlushInterval
+ return nil
+ }
+}
+
+// WithWorkersCount sets the number of workers that will be used to serialized data.
+//
+// Those workers allow the use of multiple buffers at the same time (see WithBufferPoolSize) to reduce lock contention.
+//
+// Default is 32.
+func WithWorkersCount(workersCount int) Option {
+ return func(o *Options) error {
+ if workersCount < 1 {
+ return fmt.Errorf("workersCount must be a positive integer")
+ }
+ o.workersCount = workersCount
+ return nil
+ }
+}
+
+// WithSenderQueueSize sets the size of the sender queue in number of buffers.
+//
+// After data has been serialized in a buffer they're pushed to a queue that the sender will consume and then each one
+// ot the agent.
+//
+// The default value 0 will set the option to the optimal size for the transport protocol used: 2048 for UDP and named
+// pipe and 512 for UDS.
+func WithSenderQueueSize(senderQueueSize int) Option {
+ return func(o *Options) error {
+ o.senderQueueSize = senderQueueSize
+ return nil
+ }
+}
+
+// WithWriteTimeout sets the timeout for network communication with the Agent, after this interval a payload is
+// dropped. This is only used for UDS and named pipes connection.
+func WithWriteTimeout(writeTimeout time.Duration) Option {
+ return func(o *Options) error {
+ o.writeTimeout = writeTimeout
+ return nil
+ }
+}
+
+// WithChannelMode make the client use channels to receive metrics
+//
+// This determines how the client receive metrics from the app (for example when calling the `Gauge()` method).
+// The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the
+// metric can be handled (WithMutexMode option). By default the client use mutexes.
+//
+// WithChannelMode uses a channel (see WithChannelModeBufferSize to configure its size) to receive metrics and drops metrics if
+// the channel is full. Sending metrics in this mode is much slower that WithMutexMode (because of the channel), but will not
+// block the application. This mode is made for application using many goroutines, sending the same metrics, at a very
+// high volume. The goal is to not slow down the application at the cost of dropping metrics and having a lower max
+// throughput.
+func WithChannelMode() Option {
+ return func(o *Options) error {
+ o.receiveMode = channelMode
+ return nil
+ }
+}
+
+// WithMutexMode will use mutex to receive metrics from the app throught the API.
+//
+// This determines how the client receive metrics from the app (for example when calling the `Gauge()` method).
+// The client will either drop the metrics if its buffers are full (WithChannelMode option) or block the caller until the
+// metric can be handled (WithMutexMode option). By default the client use mutexes.
+//
+// WithMutexMode uses mutexes to receive metrics which is much faster than channels but can cause some lock contention
+// when used with a high number of goroutines sendint the same metrics. Mutexes are sharded based on the metrics name
+// which limit mutex contention when multiple goroutines send different metrics (see WithWorkersCount). This is the
+// default behavior which will produce the best throughput.
+func WithMutexMode() Option {
+ return func(o *Options) error {
+ o.receiveMode = mutexMode
+ return nil
+ }
+}
+
+// WithChannelModeBufferSize sets the size of the channel holding incoming metrics when WithChannelMode is used.
+func WithChannelModeBufferSize(bufferSize int) Option {
+ return func(o *Options) error {
+ o.channelModeBufferSize = bufferSize
+ return nil
+ }
+}
+
+// WithAggregationInterval sets the interval at which aggregated metrics are flushed. See WithClientSideAggregation and
+// WithExtendedClientSideAggregation for more.
+//
+// The default interval is 2s. The interval must divide the Agent reporting period (default=10s) evenly to reduce "aliasing"
+// that can cause values to appear irregular/spiky.
+//
+// For example a 3s aggregation interval will create spikes in the final graph: a application sending a count metric
+// that increments at a constant 1000 time per second will appear noisy with an interval of 3s. This is because
+// client-side aggregation would report every 3 seconds, while the agent is reporting every 10 seconds. This means in
+// each agent bucket, the values are: 9000, 9000, 12000.
+func WithAggregationInterval(interval time.Duration) Option {
+ return func(o *Options) error {
+ o.aggregationFlushInterval = interval
+ return nil
+ }
+}
+
+// WithClientSideAggregation enables client side aggregation for Gauges, Counts and Sets.
+func WithClientSideAggregation() Option {
+ return func(o *Options) error {
+ o.aggregation = true
+ return nil
+ }
+}
+
+// WithoutClientSideAggregation disables client side aggregation.
+func WithoutClientSideAggregation() Option {
+ return func(o *Options) error {
+ o.aggregation = false
+ o.extendedAggregation = false
+ return nil
+ }
+}
+
+// WithExtendedClientSideAggregation enables client side aggregation for all types. This feature is only compatible with
+// Agent's version >=6.25.0 && <7.0.0 or Agent's versions >=7.25.0.
+func WithExtendedClientSideAggregation() Option {
+ return func(o *Options) error {
+ o.aggregation = true
+ o.extendedAggregation = true
+ return nil
+ }
+}
+
+// WithoutTelemetry disables the client telemetry.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry
+func WithoutTelemetry() Option {
+ return func(o *Options) error {
+ o.telemetry = false
+ return nil
+ }
+}
+
+// WithTelemetryAddr sets a different address for telemetry metrics. By default the same address as the client is used
+// for telemetry.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/high_throughput/#client-side-telemetry
+func WithTelemetryAddr(addr string) Option {
+ return func(o *Options) error {
+ o.telemetryAddr = addr
+ return nil
+ }
+}
+
+// WithoutOriginDetection disables the client origin detection.
+// When enabled, the client tries to discover its container ID and sends it to the Agent
+// to enrich the metrics with container tags.
+// Origin detection can also be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false
+// The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows.
+// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp
+func WithoutOriginDetection() Option {
+ return func(o *Options) error {
+ o.originDetection = false
+ return nil
+ }
+}
+
+// WithOriginDetection enables the client origin detection.
+// This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0.
+// When enabled, the client tries to discover its container ID and sends it to the Agent
+// to enrich the metrics with container tags.
+// Origin detection can be disabled by configuring the environment variabe DD_ORIGIN_DETECTION_ENABLED=false
+// The client tries to read the container ID by parsing the file /proc/self/cgroup, this is not supported on Windows.
+// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID.
+//
+// More on this here: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp
+func WithOriginDetection() Option {
+ return func(o *Options) error {
+ o.originDetection = true
+ return nil
+ }
+}
+
+// WithContainerID allows passing the container ID, this will be used by the Agent to enrich metrics with container tags.
+// This feature requires Datadog Agent version >=6.35.0 && <7.0.0 or Agent versions >=7.35.0.
+// When configured, the provided container ID is prioritized over the container ID discovered via Origin Detection.
+// The client prioritizes the value passed via DD_ENTITY_ID (if set) over the container ID.
+func WithContainerID(id string) Option {
+ return func(o *Options) error {
+ o.containerID = id
+ return nil
+ }
+}
diff --git a/statsd/options_test.go b/statsd/options_test.go
new file mode 100644
index 0000000..da42716
--- /dev/null
+++ b/statsd/options_test.go
@@ -0,0 +1,115 @@
+package statsd
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestDefaultOptions(t *testing.T) {
+ options, err := resolveOptions([]Option{})
+
+ assert.NoError(t, err)
+ assert.Equal(t, options.namespace, defaultNamespace)
+ assert.Equal(t, options.tags, defaultTags)
+ assert.Equal(t, options.maxBytesPerPayload, defaultMaxBytesPerPayload)
+ assert.Equal(t, options.maxMessagesPerPayload, defaultMaxMessagesPerPayload)
+ assert.Equal(t, options.bufferPoolSize, defaultBufferPoolSize)
+ assert.Equal(t, options.bufferFlushInterval, defaultBufferFlushInterval)
+ assert.Equal(t, options.workersCount, defaultWorkerCount)
+ assert.Equal(t, options.senderQueueSize, defaultSenderQueueSize)
+ assert.Equal(t, options.writeTimeout, defaultWriteTimeout)
+ assert.Equal(t, options.telemetry, defaultTelemetry)
+ assert.Equal(t, options.receiveMode, defaultReceivingMode)
+ assert.Equal(t, options.channelModeBufferSize, defaultChannelModeBufferSize)
+ assert.Equal(t, options.aggregationFlushInterval, defaultAggregationFlushInterval)
+ assert.Equal(t, options.aggregation, defaultAggregation)
+ assert.Equal(t, options.extendedAggregation, defaultExtendedAggregation)
+ assert.Zero(t, options.telemetryAddr)
+}
+
+func TestOptions(t *testing.T) {
+ testNamespace := "datadog."
+ testTags := []string{"rocks"}
+ testMaxBytesPerPayload := 2048
+ testMaxMessagePerPayload := 1024
+ testBufferPoolSize := 32
+ testBufferFlushInterval := 48 * time.Second
+ testBufferShardCount := 28
+ testSenderQueueSize := 64
+ testWriteTimeout := 1 * time.Minute
+ testChannelBufferSize := 500
+ testAggregationWindow := 10 * time.Second
+ testTelemetryAddr := "localhost:1234"
+
+ options, err := resolveOptions([]Option{
+ WithNamespace(testNamespace),
+ WithTags(testTags),
+ WithMaxBytesPerPayload(testMaxBytesPerPayload),
+ WithMaxMessagesPerPayload(testMaxMessagePerPayload),
+ WithBufferPoolSize(testBufferPoolSize),
+ WithBufferFlushInterval(testBufferFlushInterval),
+ WithWorkersCount(testBufferShardCount),
+ WithSenderQueueSize(testSenderQueueSize),
+ WithWriteTimeout(testWriteTimeout),
+ WithoutTelemetry(),
+ WithChannelMode(),
+ WithChannelModeBufferSize(testChannelBufferSize),
+ WithAggregationInterval(testAggregationWindow),
+ WithClientSideAggregation(),
+ WithTelemetryAddr(testTelemetryAddr),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, options.namespace, testNamespace)
+ assert.Equal(t, options.tags, testTags)
+ assert.Equal(t, options.maxBytesPerPayload, testMaxBytesPerPayload)
+ assert.Equal(t, options.maxMessagesPerPayload, testMaxMessagePerPayload)
+ assert.Equal(t, options.bufferPoolSize, testBufferPoolSize)
+ assert.Equal(t, options.bufferFlushInterval, testBufferFlushInterval)
+ assert.Equal(t, options.workersCount, testBufferShardCount)
+ assert.Equal(t, options.senderQueueSize, testSenderQueueSize)
+ assert.Equal(t, options.writeTimeout, testWriteTimeout)
+ assert.Equal(t, options.telemetry, false)
+ assert.Equal(t, options.receiveMode, channelMode)
+ assert.Equal(t, options.channelModeBufferSize, testChannelBufferSize)
+ assert.Equal(t, options.aggregationFlushInterval, testAggregationWindow)
+ assert.Equal(t, options.aggregation, true)
+ assert.Equal(t, options.extendedAggregation, false)
+ assert.Equal(t, options.telemetryAddr, testTelemetryAddr)
+}
+
+func TestExtendedAggregation(t *testing.T) {
+ options, err := resolveOptions([]Option{
+ WithoutClientSideAggregation(),
+ WithExtendedClientSideAggregation(),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, options.aggregation, true)
+ assert.Equal(t, options.extendedAggregation, true)
+}
+
+func TestResetOptions(t *testing.T) {
+ options, err := resolveOptions([]Option{
+ WithChannelMode(),
+ WithMutexMode(),
+ WithoutClientSideAggregation(),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, options.receiveMode, mutexMode)
+ assert.Equal(t, options.aggregation, false)
+ assert.Equal(t, options.extendedAggregation, false)
+}
+func TestOptionsNamespaceWithoutDot(t *testing.T) {
+ testNamespace := "datadog"
+
+ options, err := resolveOptions([]Option{
+ WithNamespace(testNamespace),
+ })
+
+ assert.NoError(t, err)
+ assert.Equal(t, options.namespace, testNamespace+".")
+}
diff --git a/statsd/pipe.go b/statsd/pipe.go
new file mode 100644
index 0000000..84c38e9
--- /dev/null
+++ b/statsd/pipe.go
@@ -0,0 +1,13 @@
+// +build !windows
+
+package statsd
+
+import (
+ "errors"
+ "io"
+ "time"
+)
+
+func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (io.WriteCloser, error) {
+ return nil, errors.New("Windows Named Pipes are only supported on Windows")
+}
diff --git a/statsd/pipe_windows.go b/statsd/pipe_windows.go
new file mode 100644
index 0000000..5ab60f0
--- /dev/null
+++ b/statsd/pipe_windows.go
@@ -0,0 +1,75 @@
+// +build windows
+
+package statsd
+
+import (
+ "net"
+ "sync"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+)
+
+type pipeWriter struct {
+ mu sync.RWMutex
+ conn net.Conn
+ timeout time.Duration
+ pipepath string
+}
+
+func (p *pipeWriter) Write(data []byte) (n int, err error) {
+ conn, err := p.ensureConnection()
+ if err != nil {
+ return 0, err
+ }
+
+ p.mu.RLock()
+ conn.SetWriteDeadline(time.Now().Add(p.timeout))
+ p.mu.RUnlock()
+
+ n, err = conn.Write(data)
+ if err != nil {
+ if e, ok := err.(net.Error); !ok || !e.Temporary() {
+ // disconnected; retry again on next attempt
+ p.mu.Lock()
+ p.conn = nil
+ p.mu.Unlock()
+ }
+ }
+ return n, err
+}
+
+func (p *pipeWriter) ensureConnection() (net.Conn, error) {
+ p.mu.RLock()
+ conn := p.conn
+ p.mu.RUnlock()
+ if conn != nil {
+ return conn, nil
+ }
+
+ // looks like we might need to connect - try again with write locking.
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.conn != nil {
+ return p.conn, nil
+ }
+ newconn, err := winio.DialPipe(p.pipepath, nil)
+ if err != nil {
+ return nil, err
+ }
+ p.conn = newconn
+ return newconn, nil
+}
+
+func (p *pipeWriter) Close() error {
+ return p.conn.Close()
+}
+
+func newWindowsPipeWriter(pipepath string, writeTimeout time.Duration) (*pipeWriter, error) {
+ // Defer connection establishment to first write
+ return &pipeWriter{
+ conn: nil,
+ timeout: writeTimeout,
+ pipepath: pipepath,
+ }, nil
+}
diff --git a/statsd/pipe_windows_test.go b/statsd/pipe_windows_test.go
new file mode 100644
index 0000000..1efbf2b
--- /dev/null
+++ b/statsd/pipe_windows_test.go
@@ -0,0 +1,134 @@
+// +build windows
+
+package statsd
+
+import (
+ "io/ioutil"
+ "net"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func createNamedPipe(t *testing.T) (string, *os.File, net.Listener) {
+ f, err := ioutil.TempFile("", "test-pipe-")
+ require.Nil(t, err)
+
+ pipepath := WindowsPipeAddressPrefix + f.Name()
+ ln, err := winio.ListenPipe(pipepath, &winio.PipeConfig{
+ SecurityDescriptor: "D:AI(A;;GA;;;WD)",
+ InputBufferSize: 1_000_000,
+ })
+ if err != nil {
+ os.Remove(f.Name())
+ t.Fatal(err)
+ }
+ return pipepath, f, ln
+}
+
+// acceptOne accepts one single connection from ln, reads 512 bytes from it
+// and sends it to the out channel, afterwards closing the connection.
+func acceptOne(t *testing.T, ln net.Listener, out chan string) {
+ conn, err := ln.Accept()
+ require.Nil(t, err)
+
+ buf := make([]byte, 512)
+ n, err := conn.Read(buf)
+ require.Nil(t, err)
+
+ conn.Close()
+ out <- string(buf[:n])
+}
+
+func TestPipeWriter(t *testing.T) {
+ pipepath, f, ln := createNamedPipe(t)
+ defer os.Remove(f.Name())
+
+ out := make(chan string)
+ go acceptOne(t, ln, out)
+
+ client, err := New(pipepath)
+ require.Nil(t, err)
+
+ err = client.Gauge("metric", 1, []string{"key:val"}, 1)
+ require.Nil(t, err)
+
+ got := <-out
+ assert.Equal(t, got, "metric:1|g|#key:val\n")
+}
+
+func TestPipeWriterEnv(t *testing.T) {
+ pipepath, f, ln := createNamedPipe(t)
+ defer os.Remove(f.Name())
+
+ out := make(chan string)
+ go acceptOne(t, ln, out)
+
+ os.Setenv(agentHostEnvVarName, pipepath)
+ defer os.Unsetenv(agentHostEnvVarName)
+
+ client, err := New("")
+ require.Nil(t, err)
+
+ err = client.Gauge("metric", 1, []string{"key:val"}, 1)
+ require.Nil(t, err)
+
+ got := <-out
+ assert.Equal(t, got, "metric:1|g|#key:val\n")
+}
+
+func TestPipeWriterReconnect(t *testing.T) {
+ pipepath, f, ln := createNamedPipe(t)
+ defer os.Remove(f.Name())
+
+ out := make(chan string)
+ go acceptOne(t, ln, out)
+
+ client, err := New(pipepath)
+ require.Nil(t, err)
+
+ // first attempt works, then connection closes
+ err = client.Gauge("metric", 1, []string{"key:val"}, 1)
+ require.Nil(t, err, "Failed to send gauge: %s", err)
+
+ timeout := time.After(5 * time.Second)
+ select {
+ case got := <-out:
+ assert.Equal(t, got, "metric:1|g|#key:val\n")
+ case <-timeout:
+ t.Fatal("timeout receiving the first metric")
+ }
+
+ // second attempt fails by attempting the same connection
+ go acceptOne(t, ln, out)
+ err = client.Gauge("metric", 2, []string{"key:val"}, 1)
+ require.Nil(t, err, "Failed to send second gauge: %s", err)
+
+ timeout = time.After(100 * time.Millisecond)
+ select {
+ case <-out:
+ t.Fatal("Second attempt should have timed out")
+ case <-timeout:
+ // ok
+ }
+
+ // subsequent attempts succeed with new connection
+ for n := 0; n < 3; n++ {
+ err = client.Gauge("metric", 3, []string{"key:val"}, 1)
+ require.Nil(t, err, "Failed to send second gauge: %s", err)
+
+ timeout = time.After(3 * time.Second)
+ select {
+ case got := <-out:
+ assert.Equal(t, got, "metric:3|g|#key:val\n")
+ return
+ case <-timeout:
+ continue
+ }
+ }
+ t.Fatal("failed to reconnect")
+}
diff --git a/statsd/sender.go b/statsd/sender.go
new file mode 100644
index 0000000..500d53c
--- /dev/null
+++ b/statsd/sender.go
@@ -0,0 +1,111 @@
+package statsd
+
+import (
+ "io"
+ "sync/atomic"
+)
+
+// senderTelemetry contains telemetry about the health of the sender
+type senderTelemetry struct {
+ totalPayloadsSent uint64
+ totalPayloadsDroppedQueueFull uint64
+ totalPayloadsDroppedWriter uint64
+ totalBytesSent uint64
+ totalBytesDroppedQueueFull uint64
+ totalBytesDroppedWriter uint64
+}
+
+type sender struct {
+ transport io.WriteCloser
+ pool *bufferPool
+ queue chan *statsdBuffer
+ telemetry *senderTelemetry
+ stop chan struct{}
+ flushSignal chan struct{}
+}
+
+func newSender(transport io.WriteCloser, queueSize int, pool *bufferPool) *sender {
+ sender := &sender{
+ transport: transport,
+ pool: pool,
+ queue: make(chan *statsdBuffer, queueSize),
+ telemetry: &senderTelemetry{},
+ stop: make(chan struct{}),
+ flushSignal: make(chan struct{}),
+ }
+
+ go sender.sendLoop()
+ return sender
+}
+
+func (s *sender) send(buffer *statsdBuffer) {
+ select {
+ case s.queue <- buffer:
+ default:
+ atomic.AddUint64(&s.telemetry.totalPayloadsDroppedQueueFull, 1)
+ atomic.AddUint64(&s.telemetry.totalBytesDroppedQueueFull, uint64(len(buffer.bytes())))
+ s.pool.returnBuffer(buffer)
+ }
+}
+
+func (s *sender) write(buffer *statsdBuffer) {
+ _, err := s.transport.Write(buffer.bytes())
+ if err != nil {
+ atomic.AddUint64(&s.telemetry.totalPayloadsDroppedWriter, 1)
+ atomic.AddUint64(&s.telemetry.totalBytesDroppedWriter, uint64(len(buffer.bytes())))
+ } else {
+ atomic.AddUint64(&s.telemetry.totalPayloadsSent, 1)
+ atomic.AddUint64(&s.telemetry.totalBytesSent, uint64(len(buffer.bytes())))
+ }
+ s.pool.returnBuffer(buffer)
+}
+
+func (s *sender) flushTelemetryMetrics(t *Telemetry) {
+ t.TotalPayloadsSent = atomic.LoadUint64(&s.telemetry.totalPayloadsSent)
+ t.TotalPayloadsDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedQueueFull)
+ t.TotalPayloadsDroppedWriter = atomic.LoadUint64(&s.telemetry.totalPayloadsDroppedWriter)
+
+ t.TotalBytesSent = atomic.LoadUint64(&s.telemetry.totalBytesSent)
+ t.TotalBytesDroppedQueueFull = atomic.LoadUint64(&s.telemetry.totalBytesDroppedQueueFull)
+ t.TotalBytesDroppedWriter = atomic.LoadUint64(&s.telemetry.totalBytesDroppedWriter)
+}
+
+func (s *sender) sendLoop() {
+ defer close(s.stop)
+ for {
+ select {
+ case buffer := <-s.queue:
+ s.write(buffer)
+ case <-s.stop:
+ return
+ case <-s.flushSignal:
+ // At that point we know that the workers are paused (the statsd client
+ // will pause them before calling sender.flush()).
+ // So we can fully flush the input queue
+ s.flushInputQueue()
+ s.flushSignal <- struct{}{}
+ }
+ }
+}
+
+func (s *sender) flushInputQueue() {
+ for {
+ select {
+ case buffer := <-s.queue:
+ s.write(buffer)
+ default:
+ return
+ }
+ }
+}
+func (s *sender) flush() {
+ s.flushSignal <- struct{}{}
+ <-s.flushSignal
+}
+
+func (s *sender) close() error {
+ s.stop <- struct{}{}
+ <-s.stop
+ s.flushInputQueue()
+ return s.transport.Close()
+}
diff --git a/statsd/sender_test.go b/statsd/sender_test.go
new file mode 100644
index 0000000..f2efef5
--- /dev/null
+++ b/statsd/sender_test.go
@@ -0,0 +1,99 @@
+package statsd
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+)
+
+type mockedWriter struct {
+ mock.Mock
+}
+
+func (w *mockedWriter) Write(data []byte) (n int, err error) {
+ args := w.Called(data)
+ return args.Int(0), args.Error(1)
+}
+
+func (w *mockedWriter) Close() error {
+ args := w.Called()
+ return args.Error(0)
+}
+
+func TestSender(t *testing.T) {
+ writer := new(mockedWriter)
+ writer.On("Write", mock.Anything).Return(1, nil)
+ writer.On("Close").Return(nil)
+ pool := newBufferPool(10, 1024, 1)
+ sender := newSender(writer, 10, pool)
+ buffer := pool.borrowBuffer()
+ buffer.writeSeparator() // add some dummy data
+
+ sender.send(buffer)
+
+ err := sender.close()
+ assert.Nil(t, err)
+ writer.AssertCalled(t, "Write", []byte("\n"))
+ assert.Equal(t, 10, len(pool.pool))
+
+ assert.Equal(t, uint64(1), sender.telemetry.totalPayloadsSent)
+ assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedQueueFull)
+ assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedWriter)
+ assert.Equal(t, uint64(1), sender.telemetry.totalBytesSent)
+ assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedQueueFull)
+ assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedWriter)
+
+}
+
+func TestSenderBufferFullTelemetry(t *testing.T) {
+ writer := new(mockedWriter)
+ writer.On("Write", mock.Anything).Return(0, nil)
+ writer.On("Close").Return(nil)
+
+ // a sender with a queue of 1 message
+ pool := newBufferPool(10, 1024, 1)
+ sender := newSender(writer, 0, pool)
+
+ // close the sender to prevent it from consuming the queue
+ sender.close()
+
+ // fill the queue to its max
+ buffer := pool.borrowBuffer()
+ buffer.writeSeparator() // add some dummy data
+ sender.send(buffer)
+
+ assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsSent)
+ assert.Equal(t, uint64(1), sender.telemetry.totalPayloadsDroppedQueueFull)
+ assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedWriter)
+
+ assert.Equal(t, uint64(0), sender.telemetry.totalBytesSent)
+ assert.Equal(t, uint64(1), sender.telemetry.totalBytesDroppedQueueFull)
+ assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedWriter)
+}
+
+func TestSenderWriteError(t *testing.T) {
+ writer := new(mockedWriter)
+ writer.On("Write", mock.Anything).Return(1, fmt.Errorf("some write error"))
+ writer.On("Close").Return(nil)
+ pool := newBufferPool(10, 1024, 1)
+ sender := newSender(writer, 10, pool)
+ buffer := pool.borrowBuffer()
+ buffer.writeSeparator() // add some dummy data
+
+ sender.send(buffer)
+
+ err := sender.close()
+ assert.Nil(t, err)
+ writer.AssertCalled(t, "Write", []byte("\n"))
+ assert.Equal(t, 10, len(pool.pool))
+
+ assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsSent)
+ assert.Equal(t, uint64(0), sender.telemetry.totalPayloadsDroppedQueueFull)
+ assert.Equal(t, uint64(1), sender.telemetry.totalPayloadsDroppedWriter)
+
+ assert.Equal(t, uint64(0), sender.telemetry.totalBytesSent)
+ assert.Equal(t, uint64(0), sender.telemetry.totalBytesDroppedQueueFull)
+ assert.Equal(t, uint64(1), sender.telemetry.totalBytesDroppedWriter)
+}
diff --git a/statsd/service_check.go b/statsd/service_check.go
new file mode 100644
index 0000000..e285046
--- /dev/null
+++ b/statsd/service_check.go
@@ -0,0 +1,57 @@
+package statsd
+
+import (
+ "fmt"
+ "time"
+)
+
+// ServiceCheckStatus support
+type ServiceCheckStatus byte
+
+const (
+ // Ok is the "ok" ServiceCheck status
+ Ok ServiceCheckStatus = 0
+ // Warn is the "warning" ServiceCheck status
+ Warn ServiceCheckStatus = 1
+ // Critical is the "critical" ServiceCheck status
+ Critical ServiceCheckStatus = 2
+ // Unknown is the "unknown" ServiceCheck status
+ Unknown ServiceCheckStatus = 3
+)
+
+// A ServiceCheck is an object that contains status of DataDog service check.
+type ServiceCheck struct {
+ // Name of the service check. Required.
+ Name string
+ // Status of service check. Required.
+ Status ServiceCheckStatus
+ // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd
+ // server will set this to the current time.
+ Timestamp time.Time
+ // Hostname for the serviceCheck.
+ Hostname string
+ // A message describing the current state of the serviceCheck.
+ Message string
+ // Tags for the serviceCheck.
+ Tags []string
+}
+
+// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking
+// against these values is done at send-time, or upon running sc.Check.
+func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck {
+ return &ServiceCheck{
+ Name: name,
+ Status: status,
+ }
+}
+
+// Check verifies that a service check is valid.
+func (sc *ServiceCheck) Check() error {
+ if len(sc.Name) == 0 {
+ return fmt.Errorf("statsd.ServiceCheck name is required")
+ }
+ if byte(sc.Status) < 0 || byte(sc.Status) > 3 {
+ return fmt.Errorf("statsd.ServiceCheck status has invalid value")
+ }
+ return nil
+}
diff --git a/statsd/service_check_test.go b/statsd/service_check_test.go
new file mode 100644
index 0000000..b89bfd6
--- /dev/null
+++ b/statsd/service_check_test.go
@@ -0,0 +1,96 @@
+package statsd
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func encodeSC(sc *ServiceCheck) (string, error) {
+ err := sc.Check()
+ if err != nil {
+ return "", err
+ }
+ var buffer []byte
+ buffer = appendServiceCheck(buffer, sc, nil)
+ return string(buffer), nil
+}
+
+func TestServiceChecks(t *testing.T) {
+ matrix := []struct {
+ serviceCheck *ServiceCheck
+ expectedEncode string
+ }{
+ {
+ NewServiceCheck("DataCatService", Ok),
+ `_sc|DataCatService|0`,
+ }, {
+ NewServiceCheck("DataCatService", Warn),
+ `_sc|DataCatService|1`,
+ }, {
+ NewServiceCheck("DataCatService", Critical),
+ `_sc|DataCatService|2`,
+ }, {
+ NewServiceCheck("DataCatService", Unknown),
+ `_sc|DataCatService|3`,
+ }, {
+ &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat"},
+ `_sc|DataCatService|0|h:DataStation.Cat`,
+ }, {
+ &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message"},
+ `_sc|DataCatService|0|h:DataStation.Cat|m:Here goes valuable message`,
+ }, {
+ &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш"},
+ `_sc|DataCatService|0|h:DataStation.Cat|m:Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш`,
+ }, {
+ &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message", Tags: []string{"host:foo", "app:bar"}},
+ `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes valuable message`,
+ }, {
+ &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes \n that should be escaped", Tags: []string{"host:foo", "app:b\nar"}},
+ `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes \n that should be escaped`,
+ }, {
+ &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes m: that should be escaped", Tags: []string{"host:foo", "app:bar"}},
+ `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes m\: that should be escaped`,
+ },
+ }
+
+ for _, m := range matrix {
+ scEncoded, err := encodeSC(m.serviceCheck)
+ require.NoError(t, err)
+ assert.Equal(t, m.expectedEncode, scEncoded)
+ }
+
+}
+
+func TestNameMissing(t *testing.T) {
+ sc := NewServiceCheck("", Ok)
+ _, err := encodeSC(sc)
+ require.Error(t, err)
+ assert.Equal(t, "statsd.ServiceCheck name is required", err.Error())
+}
+
+func TestUnknownStatus(t *testing.T) {
+ sc := NewServiceCheck("sc", ServiceCheckStatus(5))
+ _, err := encodeSC(sc)
+ require.Error(t, err)
+ assert.Equal(t, "statsd.ServiceCheck status has invalid value", err.Error())
+}
+
+func TestNewServiceCheckWithTags(t *testing.T) {
+ sc := NewServiceCheck("hello", Warn)
+ sc.Tags = []string{"tag1", "tag2"}
+ s, err := encodeSC(sc)
+ require.NoError(t, err)
+ assert.Equal(t, "_sc|hello|1|#tag1,tag2", s)
+ assert.Len(t, sc.Tags, 2)
+}
+
+func TestNewServiceCheckWithTagsAppend(t *testing.T) {
+ sc := NewServiceCheck("hello", Warn)
+ sc.Tags = append(sc.Tags, "tag1", "tag2")
+ s, err := encodeSC(sc)
+ require.NoError(t, err)
+ assert.Equal(t, "_sc|hello|1|#tag1,tag2", s)
+ assert.Len(t, sc.Tags, 2)
+}
diff --git a/statsd/statsd.go b/statsd/statsd.go
index 0ae4dac..b1bcce0 100644
--- a/statsd/statsd.go
+++ b/statsd/statsd.go
@@ -6,45 +6,31 @@ adding tags and histograms and pushing upstream to Datadog.
Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD.
-Example Usage:
-
- // Create the client
- c, err := statsd.New("127.0.0.1:8125")
- if err != nil {
- log.Fatal(err)
- }
- // Prefix every metric with the app name
- c.Namespace = "flubber."
- // Send the EC2 availability zone as a tag with every metric
- c.Tags = append(c.Tags, "us-east-1a")
- err = c.Gauge("request.duration", 1.2, nil, 1)
-
statsd is based on go-statsd-client.
*/
package statsd
+//go:generate mockgen -source=statsd.go -destination=mocks/statsd.go
+
import (
- "bytes"
"errors"
"fmt"
"io"
- "math/rand"
+ "os"
"strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
)
/*
-OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes
+OptimalUDPPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes
is optimal for regular networks with an MTU of 1500 so datagrams don't get
fragmented. It's generally recommended not to fragment UDP datagrams as losing
a single fragment will cause the entire datagram to be lost.
-
-This can be increased if your network has a greater MTU or you don't mind UDP
-datagrams getting fragmented. The practical limit is MaxUDPPayloadSize
*/
-const OptimalPayloadSize = 1432
+const OptimalUDPPayloadSize = 1432
/*
MaxUDPPayloadSize defines the maximum payload size for a UDP datagram.
@@ -54,6 +40,20 @@ any number greater than that will see frames being cut out.
*/
const MaxUDPPayloadSize = 65467
+// DefaultUDPBufferPoolSize is the default size of the buffer pool for UDP clients.
+const DefaultUDPBufferPoolSize = 2048
+
+// DefaultUDSBufferPoolSize is the default size of the buffer pool for UDS clients.
+const DefaultUDSBufferPoolSize = 512
+
+/*
+DefaultMaxAgentPayloadSize is the default maximum payload size the agent
+can receive. This can be adjusted by changing dogstatsd_buffer_size in the
+agent configuration file datadog.yaml. This is also used as the optimal payload size
+for UDS datagrams.
+*/
+const DefaultMaxAgentPayloadSize = 8192
+
/*
UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket
traffic instead of UDP.
@@ -61,307 +61,553 @@ traffic instead of UDP.
const UnixAddressPrefix = "unix://"
/*
-Stat suffixes
+WindowsPipeAddressPrefix holds the prefix to use to enable Windows Named Pipes
+traffic instead of UDP.
*/
-var (
- gaugeSuffix = []byte("|g")
- countSuffix = []byte("|c")
- histogramSuffix = []byte("|h")
- distributionSuffix = []byte("|d")
- decrSuffix = []byte("-1|c")
- incrSuffix = []byte("1|c")
- setSuffix = []byte("|s")
- timingSuffix = []byte("|ms")
+const WindowsPipeAddressPrefix = `\\.\pipe\`
+
+const (
+ agentHostEnvVarName = "DD_AGENT_HOST"
+ agentPortEnvVarName = "DD_DOGSTATSD_PORT"
+ defaultUDPPort = "8125"
+)
+
+const (
+ // ddEntityID specifies client-side user-specified entity ID injection.
+ // This env var can be set to the Pod UID on Kubernetes via the downward API.
+ // Docs: https://docs.datadoghq.com/developers/dogstatsd/?tab=kubernetes#origin-detection-over-udp
+ ddEntityID = "DD_ENTITY_ID"
+
+ // ddEntityIDTag specifies the tag name for the client-side entity ID injection
+ // The Agent expects this tag to contain a non-prefixed Kubernetes Pod UID.
+ ddEntityIDTag = "dd.internal.entity_id"
+
+ // originDetectionEnabled specifies the env var to enable/disable sending the container ID field.
+ originDetectionEnabled = "DD_ORIGIN_DETECTION_ENABLED"
+)
+
+/*
+ddEnvTagsMapping is a mapping of each "DD_" prefixed environment variable
+to a specific tag name. We use a slice to keep the order and simplify tests.
+*/
+var ddEnvTagsMapping = []struct{ envName, tagName string }{
+ {ddEntityID, ddEntityIDTag}, // Client-side entity ID injection for container tagging.
+ {"DD_ENV", "env"}, // The name of the env in which the service runs.
+ {"DD_SERVICE", "service"}, // The name of the running service.
+ {"DD_VERSION", "version"}, // The current version of the running service.
+}
+
+type metricType int
+
+const (
+ gauge metricType = iota
+ count
+ histogram
+ histogramAggregated
+ distribution
+ distributionAggregated
+ set
+ timing
+ timingAggregated
+ event
+ serviceCheck
+)
+
+type receivingMode int
+
+const (
+ mutexMode receivingMode = iota
+ channelMode
+)
+
+const (
+ writerNameUDP string = "udp"
+ writerNameUDS string = "uds"
+ writerWindowsPipe string = "pipe"
)
-// A statsdWriter offers a standard interface regardless of the underlying
-// protocol. For now UDS and UPD writers are available.
-type statsdWriter interface {
- Write(data []byte) (n int, err error)
- SetWriteTimeout(time.Duration) error
+type metric struct {
+ metricType metricType
+ namespace string
+ globalTags []string
+ name string
+ fvalue float64
+ fvalues []float64
+ ivalue int64
+ svalue string
+ evalue *Event
+ scvalue *ServiceCheck
+ tags []string
+ stags string
+ rate float64
+}
+
+type noClientErr string
+
+// ErrNoClient is returned if statsd reporting methods are invoked on
+// a nil client.
+const ErrNoClient = noClientErr("statsd client is nil")
+
+func (e noClientErr) Error() string {
+ return string(e)
+}
+
+// ClientInterface is an interface that exposes the common client functions for the
+// purpose of being able to provide a no-op client or even mocking. This can aid
+// downstream users' with their testing.
+type ClientInterface interface {
+ // Gauge measures the value of a metric at a particular time.
+ Gauge(name string, value float64, tags []string, rate float64) error
+
+ // Count tracks how many times something happened per second.
+ Count(name string, value int64, tags []string, rate float64) error
+
+ // Histogram tracks the statistical distribution of a set of values on each host.
+ Histogram(name string, value float64, tags []string, rate float64) error
+
+ // Distribution tracks the statistical distribution of a set of values across your infrastructure.
+ Distribution(name string, value float64, tags []string, rate float64) error
+
+ // Decr is just Count of -1
+ Decr(name string, tags []string, rate float64) error
+
+ // Incr is just Count of 1
+ Incr(name string, tags []string, rate float64) error
+
+ // Set counts the number of unique elements in a group.
+ Set(name string, value string, tags []string, rate float64) error
+
+ // Timing sends timing information, it is an alias for TimeInMilliseconds
+ Timing(name string, value time.Duration, tags []string, rate float64) error
+
+ // TimeInMilliseconds sends timing information in milliseconds.
+ // It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
+ TimeInMilliseconds(name string, value float64, tags []string, rate float64) error
+
+ // Event sends the provided Event.
+ Event(e *Event) error
+
+ // SimpleEvent sends an event with the provided title and text.
+ SimpleEvent(title, text string) error
+
+ // ServiceCheck sends the provided ServiceCheck.
+ ServiceCheck(sc *ServiceCheck) error
+
+ // SimpleServiceCheck sends an serviceCheck with the provided name and status.
+ SimpleServiceCheck(name string, status ServiceCheckStatus) error
+
+ // Close the client connection.
Close() error
+
+ // Flush forces a flush of all the queued dogstatsd payloads.
+ Flush() error
+
+ // IsClosed returns if the client has been closed.
+ IsClosed() bool
+
+ // GetTelemetry return the telemetry metrics for the client since it started.
+ GetTelemetry() Telemetry
}
// A Client is a handle for sending messages to dogstatsd. It is safe to
// use one Client from multiple goroutines simultaneously.
type Client struct {
- // Writer handles the underlying networking protocol
- writer statsdWriter
- // Namespace to prepend to all statsd calls
- Namespace string
- // Tags are global tags to be added to every statsd call
- Tags []string
- // skipErrors turns off error passing and allows UDS to emulate UDP behaviour
- SkipErrors bool
- // BufferLength is the length of the buffer in commands.
- bufferLength int
- flushTime time.Duration
- commands []string
- buffer bytes.Buffer
- stop chan struct{}
- sync.Mutex
-}
-
-// New returns a pointer to a new Client given an addr in the format "hostname:port" or
-// "unix:///path/to/socket".
-func New(addr string) (*Client, error) {
- if strings.HasPrefix(addr, UnixAddressPrefix) {
- w, err := newUdsWriter(addr[len(UnixAddressPrefix)-1:])
- if err != nil {
- return nil, err
+ // Sender handles the underlying networking protocol
+ sender *sender
+ // namespace to prepend to all statsd calls
+ namespace string
+ // tags are global tags to be added to every statsd call
+ tags []string
+ flushTime time.Duration
+ telemetry *statsdTelemetry
+ telemetryClient *telemetryClient
+ stop chan struct{}
+ wg sync.WaitGroup
+ workers []*worker
+ closerLock sync.Mutex
+ workersMode receivingMode
+ aggregatorMode receivingMode
+ agg *aggregator
+ aggExtended *aggregator
+ options []Option
+ addrOption string
+ isClosed bool
+}
+
+// statsdTelemetry contains telemetry metrics about the client
+type statsdTelemetry struct {
+ totalMetricsGauge uint64
+ totalMetricsCount uint64
+ totalMetricsHistogram uint64
+ totalMetricsDistribution uint64
+ totalMetricsSet uint64
+ totalMetricsTiming uint64
+ totalEvents uint64
+ totalServiceChecks uint64
+ totalDroppedOnReceive uint64
+}
+
+// Verify that Client implements the ClientInterface.
+// https://golang.org/doc/faq#guarantee_satisfies_interface
+var _ ClientInterface = &Client{}
+
+func resolveAddr(addr string) string {
+ envPort := ""
+ if addr == "" {
+ addr = os.Getenv(agentHostEnvVarName)
+ envPort = os.Getenv(agentPortEnvVarName)
+ }
+
+ if addr == "" {
+ return ""
+ }
+
+ if !strings.HasPrefix(addr, WindowsPipeAddressPrefix) && !strings.HasPrefix(addr, UnixAddressPrefix) {
+ if !strings.Contains(addr, ":") {
+ if envPort != "" {
+ addr = fmt.Sprintf("%s:%s", addr, envPort)
+ } else {
+ addr = fmt.Sprintf("%s:%s", addr, defaultUDPPort)
+ }
}
- return NewWithWriter(w)
}
- w, err := newUDPWriter(addr)
- if err != nil {
- return nil, err
- }
- return NewWithWriter(w)
+ return addr
}
-// NewWithWriter creates a new Client with given writer. Writer is a
-// io.WriteCloser + SetWriteTimeout(time.Duration) error
-func NewWithWriter(w statsdWriter) (*Client, error) {
- client := &Client{writer: w, SkipErrors: false}
- return client, nil
+func createWriter(addr string, writeTimeout time.Duration) (io.WriteCloser, string, error) {
+ addr = resolveAddr(addr)
+ if addr == "" {
+ return nil, "", errors.New("No address passed and autodetection from environment failed")
+ }
+
+ switch {
+ case strings.HasPrefix(addr, WindowsPipeAddressPrefix):
+ w, err := newWindowsPipeWriter(addr, writeTimeout)
+ return w, writerWindowsPipe, err
+ case strings.HasPrefix(addr, UnixAddressPrefix):
+ w, err := newUDSWriter(addr[len(UnixAddressPrefix):], writeTimeout)
+ return w, writerNameUDS, err
+ default:
+ w, err := newUDPWriter(addr, writeTimeout)
+ return w, writerNameUDP, err
+ }
}
-// NewBuffered returns a Client that buffers its output and sends it in chunks.
-// Buflen is the length of the buffer in number of commands.
-func NewBuffered(addr string, buflen int) (*Client, error) {
- client, err := New(addr)
+// New returns a pointer to a new Client given an addr in the format "hostname:port" for UDP,
+// "unix:///path/to/socket" for UDS or "\\.\pipe\path\to\pipe" for Windows Named Pipes.
+func New(addr string, options ...Option) (*Client, error) {
+ o, err := resolveOptions(options)
if err != nil {
return nil, err
}
- client.bufferLength = buflen
- client.commands = make([]string, 0, buflen)
- client.flushTime = time.Millisecond * 100
- client.stop = make(chan struct{}, 1)
- go client.watch()
- return client, nil
-}
-// format a message from its name, value, tags and rate. Also adds global
-// namespace and tags.
-func (c *Client) format(name string, value interface{}, suffix []byte, tags []string, rate float64) string {
- var buf bytes.Buffer
- if c.Namespace != "" {
- buf.WriteString(c.Namespace)
+ w, writerType, err := createWriter(addr, o.writeTimeout)
+ if err != nil {
+ return nil, err
}
- buf.WriteString(name)
- buf.WriteString(":")
-
- switch val := value.(type) {
- case float64:
- buf.Write(strconv.AppendFloat([]byte{}, val, 'f', 6, 64))
- case int64:
- buf.Write(strconv.AppendInt([]byte{}, val, 10))
-
- case string:
- buf.WriteString(val)
-
- default:
- // do nothing
+ client, err := newWithWriter(w, o, writerType)
+ if err == nil {
+ client.options = append(client.options, options...)
+ client.addrOption = addr
}
- buf.Write(suffix)
+ return client, err
+}
- if rate < 1 {
- buf.WriteString(`|@`)
- buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64))
+// NewWithWriter creates a new Client with given writer. Writer is a
+// io.WriteCloser
+func NewWithWriter(w io.WriteCloser, options ...Option) (*Client, error) {
+ o, err := resolveOptions(options)
+ if err != nil {
+ return nil, err
}
-
- writeTagString(&buf, c.Tags, tags)
-
- return buf.String()
+ return newWithWriter(w, o, "custom")
}
-// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP.
-func (c *Client) SetWriteTimeout(d time.Duration) error {
+// CloneWithExtraOptions create a new Client with extra options
+func CloneWithExtraOptions(c *Client, options ...Option) (*Client, error) {
if c == nil {
- return nil
+ return nil, ErrNoClient
}
- return c.writer.SetWriteTimeout(d)
+
+ if c.addrOption == "" {
+ return nil, fmt.Errorf("can't clone client with no addrOption")
+ }
+ opt := append(c.options, options...)
+ return New(c.addrOption, opt...)
}
-func (c *Client) watch() {
- ticker := time.NewTicker(c.flushTime)
+func newWithWriter(w io.WriteCloser, o *Options, writerName string) (*Client, error) {
+ c := Client{
+ namespace: o.namespace,
+ tags: o.tags,
+ telemetry: &statsdTelemetry{},
+ }
- for {
- select {
- case <-ticker.C:
- c.Lock()
- if len(c.commands) > 0 {
- // FIXME: eating error here
- c.flushLocked()
+ hasEntityID := false
+ // Inject values of DD_* environment variables as global tags.
+ for _, mapping := range ddEnvTagsMapping {
+ if value := os.Getenv(mapping.envName); value != "" {
+ if mapping.envName == ddEntityID {
+ hasEntityID = true
}
- c.Unlock()
- case <-c.stop:
- ticker.Stop()
- return
+ c.tags = append(c.tags, fmt.Sprintf("%s:%s", mapping.tagName, value))
}
}
-}
-func (c *Client) append(cmd string) error {
- c.Lock()
- defer c.Unlock()
- c.commands = append(c.commands, cmd)
- // if we should flush, lets do it
- if len(c.commands) == c.bufferLength {
- if err := c.flushLocked(); err != nil {
- return err
+ if !hasEntityID {
+ initContainerID(o.containerID, isOriginDetectionEnabled(o, hasEntityID))
+ }
+
+ if o.maxBytesPerPayload == 0 {
+ if writerName == writerNameUDS {
+ o.maxBytesPerPayload = DefaultMaxAgentPayloadSize
+ } else {
+ o.maxBytesPerPayload = OptimalUDPPayloadSize
+ }
+ }
+ if o.bufferPoolSize == 0 {
+ if writerName == writerNameUDS {
+ o.bufferPoolSize = DefaultUDSBufferPoolSize
+ } else {
+ o.bufferPoolSize = DefaultUDPBufferPoolSize
+ }
+ }
+ if o.senderQueueSize == 0 {
+ if writerName == writerNameUDS {
+ o.senderQueueSize = DefaultUDSBufferPoolSize
+ } else {
+ o.senderQueueSize = DefaultUDPBufferPoolSize
}
}
- return nil
-}
-func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, []int) {
- c.buffer.Reset() //clear buffer
+ bufferPool := newBufferPool(o.bufferPoolSize, o.maxBytesPerPayload, o.maxMessagesPerPayload)
+ c.sender = newSender(w, o.senderQueueSize, bufferPool)
+ c.aggregatorMode = o.receiveMode
- var frames [][]byte
- var ncmds []int
- sepBytes := []byte(sep)
- sepLen := len(sep)
+ c.workersMode = o.receiveMode
+ // channelMode mode at the worker level is not enabled when
+ // ExtendedAggregation is since the user app will not directly
+ // use the worker (the aggregator sit between the app and the
+ // workers).
+ if o.extendedAggregation {
+ c.workersMode = mutexMode
+ }
- elem := 0
- for _, cmd := range cmds {
- needed := len(cmd)
+ if o.aggregation || o.extendedAggregation {
+ c.agg = newAggregator(&c)
+ c.agg.start(o.aggregationFlushInterval)
- if elem != 0 {
- needed = needed + sepLen
- }
+ if o.extendedAggregation {
+ c.aggExtended = c.agg
- if c.buffer.Len()+needed <= maxSize {
- if elem != 0 {
- c.buffer.Write(sepBytes)
+ if c.aggregatorMode == channelMode {
+ c.agg.startReceivingMetric(o.channelModeBufferSize, o.workersCount)
}
- c.buffer.WriteString(cmd)
- elem++
- } else {
- frames = append(frames, copyAndResetBuffer(&c.buffer))
- ncmds = append(ncmds, elem)
- // if cmd is bigger than maxSize it will get flushed on next loop
- c.buffer.WriteString(cmd)
- elem = 1
}
}
- //add whatever is left! if there's actually something
- if c.buffer.Len() > 0 {
- frames = append(frames, copyAndResetBuffer(&c.buffer))
- ncmds = append(ncmds, elem)
+ for i := 0; i < o.workersCount; i++ {
+ w := newWorker(bufferPool, c.sender)
+ c.workers = append(c.workers, w)
+
+ if c.workersMode == channelMode {
+ w.startReceivingMetric(o.channelModeBufferSize)
+ }
+ }
+
+ c.flushTime = o.bufferFlushInterval
+ c.stop = make(chan struct{}, 1)
+
+ c.wg.Add(1)
+ go func() {
+ defer c.wg.Done()
+ c.watch()
+ }()
+
+ if o.telemetry {
+ if o.telemetryAddr == "" {
+ c.telemetryClient = newTelemetryClient(&c, writerName, c.agg != nil)
+ } else {
+ var err error
+ c.telemetryClient, err = newTelemetryClientWithCustomAddr(&c, writerName, o.telemetryAddr, c.agg != nil, bufferPool, o.writeTimeout)
+ if err != nil {
+ return nil, err
+ }
+ }
+ c.telemetryClient.run(&c.wg, c.stop)
}
- return frames, ncmds
+ return &c, nil
}
-func copyAndResetBuffer(buf *bytes.Buffer) []byte {
- tmpBuf := make([]byte, buf.Len())
- copy(tmpBuf, buf.Bytes())
- buf.Reset()
- return tmpBuf
+func (c *Client) watch() {
+ ticker := time.NewTicker(c.flushTime)
+
+ for {
+ select {
+ case <-ticker.C:
+ for _, w := range c.workers {
+ w.flush()
+ }
+ case <-c.stop:
+ ticker.Stop()
+ return
+ }
+ }
}
-// Flush forces a flush of the pending commands in the buffer
+// Flush forces a flush of all the queued dogstatsd payloads This method is
+// blocking and will not return until everything is sent through the network.
+// In mutexMode, this will also block sampling new data to the client while the
+// workers and sender are flushed.
func (c *Client) Flush() error {
if c == nil {
- return nil
+ return ErrNoClient
}
- c.Lock()
- defer c.Unlock()
- return c.flushLocked()
-}
-
-// flush the commands in the buffer. Lock must be held by caller.
-func (c *Client) flushLocked() error {
- frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize)
- var err error
- cmdsFlushed := 0
- for i, data := range frames {
- _, e := c.writer.Write(data)
- if e != nil {
- err = e
- break
- }
- cmdsFlushed += flushable[i]
+ if c.agg != nil {
+ c.agg.flush()
}
-
- // clear the slice with a slice op, doesn't realloc
- if cmdsFlushed == len(c.commands) {
- c.commands = c.commands[:0]
- } else {
- //this case will cause a future realloc...
- // drop problematic command though (sorry).
- c.commands = c.commands[cmdsFlushed+1:]
+ for _, w := range c.workers {
+ w.pause()
+ defer w.unpause()
+ w.flushUnsafe()
}
- return err
+ // Now that the worker are pause the sender can flush the queue between
+ // worker and senders
+ c.sender.flush()
+ return nil
}
-func (c *Client) sendMsg(msg string) error {
- // return an error if message is bigger than MaxUDPPayloadSize
- if len(msg) > MaxUDPPayloadSize {
- return errors.New("message size exceeds MaxUDPPayloadSize")
- }
+// IsClosed returns if the client has been closed.
+func (c *Client) IsClosed() bool {
+ c.closerLock.Lock()
+ defer c.closerLock.Unlock()
+ return c.isClosed
+}
- // if this client is buffered, then we'll just append this
- if c.bufferLength > 0 {
- return c.append(msg)
- }
+func (c *Client) flushTelemetryMetrics(t *Telemetry) {
+ t.TotalMetricsGauge = atomic.LoadUint64(&c.telemetry.totalMetricsGauge)
+ t.TotalMetricsCount = atomic.LoadUint64(&c.telemetry.totalMetricsCount)
+ t.TotalMetricsSet = atomic.LoadUint64(&c.telemetry.totalMetricsSet)
+ t.TotalMetricsHistogram = atomic.LoadUint64(&c.telemetry.totalMetricsHistogram)
+ t.TotalMetricsDistribution = atomic.LoadUint64(&c.telemetry.totalMetricsDistribution)
+ t.TotalMetricsTiming = atomic.LoadUint64(&c.telemetry.totalMetricsTiming)
+ t.TotalEvents = atomic.LoadUint64(&c.telemetry.totalEvents)
+ t.TotalServiceChecks = atomic.LoadUint64(&c.telemetry.totalServiceChecks)
+ t.TotalDroppedOnReceive = atomic.LoadUint64(&c.telemetry.totalDroppedOnReceive)
+}
- _, err := c.writer.Write([]byte(msg))
+// GetTelemetry return the telemetry metrics for the client since it started.
+func (c *Client) GetTelemetry() Telemetry {
+ return c.telemetryClient.getTelemetry()
+}
+
+func (c *Client) send(m metric) error {
+ h := hashString32(m.name)
+ worker := c.workers[h%uint32(len(c.workers))]
- if c.SkipErrors {
+ if c.workersMode == channelMode {
+ select {
+ case worker.inputMetrics <- m:
+ default:
+ atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1)
+ }
return nil
}
- return err
+ return worker.processMetric(m)
}
-// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags.
-func (c *Client) send(name string, value interface{}, suffix []byte, tags []string, rate float64) error {
- if c == nil {
- return nil
- }
- if rate < 1 && rand.Float64() > rate {
+// sendBlocking is used by the aggregator to inject aggregated metrics.
+func (c *Client) sendBlocking(m metric) error {
+ m.globalTags = c.tags
+ m.namespace = c.namespace
+
+ h := hashString32(m.name)
+ worker := c.workers[h%uint32(len(c.workers))]
+ return worker.processMetric(m)
+}
+
+func (c *Client) sendToAggregator(mType metricType, name string, value float64, tags []string, rate float64, f bufferedMetricSampleFunc) error {
+ if c.aggregatorMode == channelMode {
+ select {
+ case c.aggExtended.inputMetrics <- metric{metricType: mType, name: name, fvalue: value, tags: tags, rate: rate}:
+ default:
+ atomic.AddUint64(&c.telemetry.totalDroppedOnReceive, 1)
+ }
return nil
}
- data := c.format(name, value, suffix, tags, rate)
- return c.sendMsg(data)
+ return f(name, value, tags, rate)
}
// Gauge measures the value of a metric at a particular time.
func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error {
- return c.send(name, value, gaugeSuffix, tags, rate)
+ if c == nil {
+ return ErrNoClient
+ }
+ atomic.AddUint64(&c.telemetry.totalMetricsGauge, 1)
+ if c.agg != nil {
+ return c.agg.gauge(name, value, tags)
+ }
+ return c.send(metric{metricType: gauge, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
}
// Count tracks how many times something happened per second.
func (c *Client) Count(name string, value int64, tags []string, rate float64) error {
- return c.send(name, value, countSuffix, tags, rate)
+ if c == nil {
+ return ErrNoClient
+ }
+ atomic.AddUint64(&c.telemetry.totalMetricsCount, 1)
+ if c.agg != nil {
+ return c.agg.count(name, value, tags)
+ }
+ return c.send(metric{metricType: count, name: name, ivalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
}
// Histogram tracks the statistical distribution of a set of values on each host.
func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error {
- return c.send(name, value, histogramSuffix, tags, rate)
+ if c == nil {
+ return ErrNoClient
+ }
+ atomic.AddUint64(&c.telemetry.totalMetricsHistogram, 1)
+ if c.aggExtended != nil {
+ return c.sendToAggregator(histogram, name, value, tags, rate, c.aggExtended.histogram)
+ }
+ return c.send(metric{metricType: histogram, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
}
// Distribution tracks the statistical distribution of a set of values across your infrastructure.
func (c *Client) Distribution(name string, value float64, tags []string, rate float64) error {
- return c.send(name, value, distributionSuffix, tags, rate)
+ if c == nil {
+ return ErrNoClient
+ }
+ atomic.AddUint64(&c.telemetry.totalMetricsDistribution, 1)
+ if c.aggExtended != nil {
+ return c.sendToAggregator(distribution, name, value, tags, rate, c.aggExtended.distribution)
+ }
+ return c.send(metric{metricType: distribution, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
}
// Decr is just Count of -1
func (c *Client) Decr(name string, tags []string, rate float64) error {
- return c.send(name, nil, decrSuffix, tags, rate)
+ return c.Count(name, -1, tags, rate)
}
// Incr is just Count of 1
func (c *Client) Incr(name string, tags []string, rate float64) error {
- return c.send(name, nil, incrSuffix, tags, rate)
+ return c.Count(name, 1, tags, rate)
}
// Set counts the number of unique elements in a group.
func (c *Client) Set(name string, value string, tags []string, rate float64) error {
- return c.send(name, value, setSuffix, tags, rate)
+ if c == nil {
+ return ErrNoClient
+ }
+ atomic.AddUint64(&c.telemetry.totalMetricsSet, 1)
+ if c.agg != nil {
+ return c.agg.set(name, value, tags)
+ }
+ return c.send(metric{metricType: set, name: name, svalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
}
// Timing sends timing information, it is an alias for TimeInMilliseconds
@@ -372,19 +618,23 @@ func (c *Client) Timing(name string, value time.Duration, tags []string, rate fl
// TimeInMilliseconds sends timing information in milliseconds.
// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing)
func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error {
- return c.send(name, value, timingSuffix, tags, rate)
+ if c == nil {
+ return ErrNoClient
+ }
+ atomic.AddUint64(&c.telemetry.totalMetricsTiming, 1)
+ if c.aggExtended != nil {
+ return c.sendToAggregator(timing, name, value, tags, rate, c.aggExtended.timing)
+ }
+ return c.send(metric{metricType: timing, name: name, fvalue: value, tags: tags, rate: rate, globalTags: c.tags, namespace: c.namespace})
}
// Event sends the provided Event.
func (c *Client) Event(e *Event) error {
if c == nil {
- return nil
- }
- stat, err := e.Encode(c.Tags...)
- if err != nil {
- return err
+ return ErrNoClient
}
- return c.sendMsg(stat)
+ atomic.AddUint64(&c.telemetry.totalEvents, 1)
+ return c.send(metric{metricType: event, evalue: e, rate: 1, globalTags: c.tags, namespace: c.namespace})
}
// SimpleEvent sends an event with the provided title and text.
@@ -396,13 +646,10 @@ func (c *Client) SimpleEvent(title, text string) error {
// ServiceCheck sends the provided ServiceCheck.
func (c *Client) ServiceCheck(sc *ServiceCheck) error {
if c == nil {
- return nil
- }
- stat, err := sc.Encode(c.Tags...)
- if err != nil {
- return err
+ return ErrNoClient
}
- return c.sendMsg(stat)
+ atomic.AddUint64(&c.telemetry.totalServiceChecks, 1)
+ return c.send(metric{metricType: serviceCheck, scvalue: sc, rate: 1, globalTags: c.tags, namespace: c.namespace})
}
// SimpleServiceCheck sends an serviceCheck with the provided name and status.
@@ -414,267 +661,76 @@ func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) erro
// Close the client connection.
func (c *Client) Close() error {
if c == nil {
- return nil
- }
- select {
- case c.stop <- struct{}{}:
- default:
+ return ErrNoClient
}
- // if this client is buffered, flush before closing the writer
- if c.bufferLength > 0 {
- if err := c.Flush(); err != nil {
- return err
- }
- }
+ // Acquire closer lock to ensure only one thread can close the stop channel
+ c.closerLock.Lock()
+ defer c.closerLock.Unlock()
- return c.writer.Close()
-}
-
-// Events support
-// EventAlertType and EventAlertPriority became exported types after this issue was submitted: https://github.com/DataDog/datadog-go/issues/41
-// The reason why they got exported is so that client code can directly use the types.
-
-// EventAlertType is the alert type for events
-type EventAlertType string
-
-const (
- // Info is the "info" AlertType for events
- Info EventAlertType = "info"
- // Error is the "error" AlertType for events
- Error EventAlertType = "error"
- // Warning is the "warning" AlertType for events
- Warning EventAlertType = "warning"
- // Success is the "success" AlertType for events
- Success EventAlertType = "success"
-)
-
-// EventPriority is the event priority for events
-type EventPriority string
-
-const (
- // Normal is the "normal" Priority for events
- Normal EventPriority = "normal"
- // Low is the "low" Priority for events
- Low EventPriority = "low"
-)
-
-// An Event is an object that can be posted to your DataDog event stream.
-type Event struct {
- // Title of the event. Required.
- Title string
- // Text is the description of the event. Required.
- Text string
- // Timestamp is a timestamp for the event. If not provided, the dogstatsd
- // server will set this to the current time.
- Timestamp time.Time
- // Hostname for the event.
- Hostname string
- // AggregationKey groups this event with others of the same key.
- AggregationKey string
- // Priority of the event. Can be statsd.Low or statsd.Normal.
- Priority EventPriority
- // SourceTypeName is a source type for the event.
- SourceTypeName string
- // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success.
- // If absent, the default value applied by the dogstatsd server is Info.
- AlertType EventAlertType
- // Tags for the event.
- Tags []string
-}
-
-// NewEvent creates a new event with the given title and text. Error checking
-// against these values is done at send-time, or upon running e.Check.
-func NewEvent(title, text string) *Event {
- return &Event{
- Title: title,
- Text: text,
- }
-}
-
-// Check verifies that an event is valid.
-func (e Event) Check() error {
- if len(e.Title) == 0 {
- return fmt.Errorf("statsd.Event title is required")
- }
- if len(e.Text) == 0 {
- return fmt.Errorf("statsd.Event text is required")
- }
- return nil
-}
-
-// Encode returns the dogstatsd wire protocol representation for an event.
-// Tags may be passed which will be added to the encoded output but not to
-// the Event's list of tags, eg. for default tags.
-func (e Event) Encode(tags ...string) (string, error) {
- err := e.Check()
- if err != nil {
- return "", err
- }
- text := e.escapedText()
-
- var buffer bytes.Buffer
- buffer.WriteString("_e{")
- buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10))
- buffer.WriteRune(',')
- buffer.WriteString(strconv.FormatInt(int64(len(text)), 10))
- buffer.WriteString("}:")
- buffer.WriteString(e.Title)
- buffer.WriteRune('|')
- buffer.WriteString(text)
-
- if !e.Timestamp.IsZero() {
- buffer.WriteString("|d:")
- buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10))
- }
-
- if len(e.Hostname) != 0 {
- buffer.WriteString("|h:")
- buffer.WriteString(e.Hostname)
+ if c.isClosed {
+ return nil
}
- if len(e.AggregationKey) != 0 {
- buffer.WriteString("|k:")
- buffer.WriteString(e.AggregationKey)
-
+ // Notify all other threads that they should stop
+ select {
+ case <-c.stop:
+ return nil
+ default:
}
+ close(c.stop)
- if len(e.Priority) != 0 {
- buffer.WriteString("|p:")
- buffer.WriteString(string(e.Priority))
+ if c.workersMode == channelMode {
+ for _, w := range c.workers {
+ w.stopReceivingMetric()
+ }
}
- if len(e.SourceTypeName) != 0 {
- buffer.WriteString("|s:")
- buffer.WriteString(e.SourceTypeName)
+ // flush the aggregator first
+ if c.agg != nil {
+ if c.aggExtended != nil && c.aggregatorMode == channelMode {
+ c.agg.stopReceivingMetric()
+ }
+ c.agg.stop()
}
- if len(e.AlertType) != 0 {
- buffer.WriteString("|t:")
- buffer.WriteString(string(e.AlertType))
- }
+ // Wait for the threads to stop
+ c.wg.Wait()
- writeTagString(&buffer, tags, e.Tags)
+ c.Flush()
- return buffer.String(), nil
+ c.isClosed = true
+ return c.sender.close()
}
-// ServiceCheckStatus support
-type ServiceCheckStatus byte
-
-const (
- // Ok is the "ok" ServiceCheck status
- Ok ServiceCheckStatus = 0
- // Warn is the "warning" ServiceCheck status
- Warn ServiceCheckStatus = 1
- // Critical is the "critical" ServiceCheck status
- Critical ServiceCheckStatus = 2
- // Unknown is the "unknown" ServiceCheck status
- Unknown ServiceCheckStatus = 3
-)
-
-// An ServiceCheck is an object that contains status of DataDog service check.
-type ServiceCheck struct {
- // Name of the service check. Required.
- Name string
- // Status of service check. Required.
- Status ServiceCheckStatus
- // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd
- // server will set this to the current time.
- Timestamp time.Time
- // Hostname for the serviceCheck.
- Hostname string
- // A message describing the current state of the serviceCheck.
- Message string
- // Tags for the serviceCheck.
- Tags []string
-}
-
-// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking
-// against these values is done at send-time, or upon running sc.Check.
-func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck {
- return &ServiceCheck{
- Name: name,
- Status: status,
+// isOriginDetectionEnabled returns whether the clients should fill the container field.
+//
+// If DD_ENTITY_ID is set, we don't send the container ID
+// If a user-defined container ID is provided, we don't ignore origin detection
+// as dd.internal.entity_id is prioritized over the container field for backward compatibility.
+// If DD_ENTITY_ID is not set, we try to fill the container field automatically unless
+// DD_ORIGIN_DETECTION_ENABLED is explicitly set to false.
+func isOriginDetectionEnabled(o *Options, hasEntityID bool) bool {
+ if !o.originDetection || hasEntityID || o.containerID != "" {
+ // originDetection is explicitly disabled
+ // or DD_ENTITY_ID was found
+ // or a user-defined container ID was provided
+ return false
}
-}
-// Check verifies that an event is valid.
-func (sc ServiceCheck) Check() error {
- if len(sc.Name) == 0 {
- return fmt.Errorf("statsd.ServiceCheck name is required")
- }
- if byte(sc.Status) < 0 || byte(sc.Status) > 3 {
- return fmt.Errorf("statsd.ServiceCheck status has invalid value")
+ envVarValue := os.Getenv(originDetectionEnabled)
+ if envVarValue == "" {
+ // DD_ORIGIN_DETECTION_ENABLED is not set
+ // default to true
+ return true
}
- return nil
-}
-// Encode returns the dogstatsd wire protocol representation for an serviceCheck.
-// Tags may be passed which will be added to the encoded output but not to
-// the Event's list of tags, eg. for default tags.
-func (sc ServiceCheck) Encode(tags ...string) (string, error) {
- err := sc.Check()
+ enabled, err := strconv.ParseBool(envVarValue)
if err != nil {
- return "", err
- }
- message := sc.escapedMessage()
-
- var buffer bytes.Buffer
- buffer.WriteString("_sc|")
- buffer.WriteString(sc.Name)
- buffer.WriteRune('|')
- buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10))
-
- if !sc.Timestamp.IsZero() {
- buffer.WriteString("|d:")
- buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10))
- }
-
- if len(sc.Hostname) != 0 {
- buffer.WriteString("|h:")
- buffer.WriteString(sc.Hostname)
+ // Error due to an unsupported DD_ORIGIN_DETECTION_ENABLED value
+ // default to true
+ return true
}
- writeTagString(&buffer, tags, sc.Tags)
-
- if len(message) != 0 {
- buffer.WriteString("|m:")
- buffer.WriteString(message)
- }
-
- return buffer.String(), nil
-}
-
-func (e Event) escapedText() string {
- return strings.Replace(e.Text, "\n", "\\n", -1)
-}
-
-func (sc ServiceCheck) escapedMessage() string {
- msg := strings.Replace(sc.Message, "\n", "\\n", -1)
- return strings.Replace(msg, "m:", `m\:`, -1)
-}
-
-func removeNewlines(str string) string {
- return strings.Replace(str, "\n", "", -1)
-}
-
-func writeTagString(w io.Writer, tagList1, tagList2 []string) {
- // the tag lists may be shared with other callers, so we cannot modify
- // them in any way (which means we cannot append to them either)
- // therefore we must make an entirely separate copy just for this call
- totalLen := len(tagList1) + len(tagList2)
- if totalLen == 0 {
- return
- }
- tags := make([]string, 0, totalLen)
- tags = append(tags, tagList1...)
- tags = append(tags, tagList2...)
-
- io.WriteString(w, "|#")
- io.WriteString(w, removeNewlines(tags[0]))
- for _, tag := range tags[1:] {
- io.WriteString(w, ",")
- io.WriteString(w, removeNewlines(tag))
- }
+ return enabled
}
diff --git a/statsd/statsd_benchmark_test.go b/statsd/statsd_benchmark_test.go
index b3ad979..b3143ba 100644
--- a/statsd/statsd_benchmark_test.go
+++ b/statsd/statsd_benchmark_test.go
@@ -1,68 +1,202 @@
-package statsd
+package statsd_test
import (
"fmt"
- "strconv"
+ "io"
+ "log"
+ "net"
+ "os"
+ "sync/atomic"
"testing"
+
+ "github.com/DataDog/datadog-go/v5/statsd"
)
-var statBytes []byte
-var stat string
-
-// Results:
-// BenchmarkStatBuildGauge_Sprintf-8 500 45699958 ns/op
-// BenchmarkStatBuildGauge_Concat-8 1000 23452863 ns/op
-// BenchmarkStatBuildGauge_BytesAppend-8 1000 21705121 ns/op
-func BenchmarkStatBuildGauge_Sprintf(b *testing.B) {
- for n := 0; n < b.N; n++ {
- for x := 0; x < 100000; x++ {
- stat = fmt.Sprintf("%f|g", 3.14159)
+const writerNameUDP = "udp"
+const writerNameUDS = "uds"
+
+func setupUDSClientServer(b *testing.B, options []statsd.Option) (*statsd.Client, net.Listener) {
+ sockAddr := "/tmp/test.sock"
+ if err := os.RemoveAll(sockAddr); err != nil {
+ log.Fatal(err)
+ }
+ conn, err := net.Listen("unix", sockAddr)
+ if err != nil {
+ log.Fatal("listen error:", err)
+ }
+ go func() {
+ for {
+ _, err := conn.Accept()
+ if err != nil {
+ return
+ }
}
+ }()
+ client, err := statsd.New("unix://"+sockAddr, options...)
+ if err != nil {
+ b.Error(err)
}
+ return client, conn
}
-func BenchmarkStatBuildGauge_Concat(b *testing.B) {
- for n := 0; n < b.N; n++ {
- for x := 0; x < 100000; x++ {
- stat = strconv.FormatFloat(3.14159, 'f', -1, 64) + "|g"
- }
+func setupUDPClientServer(b *testing.B, options []statsd.Option) (*statsd.Client, *net.UDPConn) {
+ addr, err := net.ResolveUDPAddr("udp", ":0")
+ if err != nil {
+ b.Error(err)
+ }
+ conn, err := net.ListenUDP("udp", addr)
+ if err != nil {
+ b.Error(err)
}
+
+ client, err := statsd.New(conn.LocalAddr().String(), options...)
+ if err != nil {
+ b.Error(err)
+ }
+ return client, conn
}
-func BenchmarkStatBuildGauge_BytesAppend(b *testing.B) {
- suffix := []byte("|g")
+func setupClient(b *testing.B, transport string, extraOptions []statsd.Option) (*statsd.Client, io.Closer) {
+ options := []statsd.Option{statsd.WithMaxMessagesPerPayload(1024)}
+ options = append(options, extraOptions...)
- for n := 0; n < b.N; n++ {
- for x := 0; x < 100000; x++ {
- statBytes = []byte{}
- statBytes = append(strconv.AppendFloat(statBytes, 3.14159, 'f', -1, 64), suffix...)
- }
+ if transport == writerNameUDP {
+ return setupUDPClientServer(b, options)
}
+ return setupUDSClientServer(b, options)
}
-func BenchmarkStatBuildCount_Sprintf(b *testing.B) {
- for n := 0; n < b.N; n++ {
- for x := 0; x < 100000; x++ {
- stat = fmt.Sprintf("%d|c", 314)
+func benchmarkStatsdDifferentMetrics(b *testing.B, transport string, extraOptions ...statsd.Option) {
+ client, conn := setupClient(b, transport, extraOptions)
+ defer conn.Close()
+
+ n := int32(0)
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ testNumber := atomic.AddInt32(&n, 1)
+ name := fmt.Sprintf("test.metric%d", testNumber)
+ for pb.Next() {
+ client.Gauge(name, 1, []string{"tag:tag"}, 1)
}
- }
+ })
+ client.Flush()
+ t := client.GetTelemetry()
+ reportMetric(b, float64(t.TotalDroppedOnReceive)/float64(t.TotalMetrics)*100, "%_dropRate")
+
+ b.StopTimer()
+ client.Close()
}
-func BenchmarkStatBuildCount_Concat(b *testing.B) {
- for n := 0; n < b.N; n++ {
- for x := 0; x < 100000; x++ {
- stat = strconv.FormatInt(314, 10) + "|c"
+func benchmarkStatsdSameMetrics(b *testing.B, transport string, extraOptions ...statsd.Option) {
+ client, conn := setupClient(b, transport, extraOptions)
+ defer conn.Close()
+
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ client.Gauge("test.metric", 1, []string{"tag:tag"}, 1)
}
- }
+ })
+ client.Flush()
+ t := client.GetTelemetry()
+ reportMetric(b, float64(t.TotalDroppedOnReceive)/float64(t.TotalMetrics)*100, "%_dropRate")
+
+ b.StopTimer()
+ client.Close()
}
-func BenchmarkStatBuildCount_BytesAppend(b *testing.B) {
- suffix := []byte("|c")
+/*
+UDP with the same metric
+*/
- for n := 0; n < b.N; n++ {
- for x := 0; x < 100000; x++ {
- statBytes = []byte{}
- statBytes = append(strconv.AppendInt(statBytes, 314, 10), suffix...)
- }
- }
+// blocking + no aggregation
+func BenchmarkStatsdUDPSameMetricMutex(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation())
+}
+
+// dropping + no aggregation
+func BenchmarkStatsdUDPSameMetricChannel(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation())
+}
+
+// blocking + aggregation
+func BenchmarkStatsdUDPSameMetricMutexAggregation(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithClientSideAggregation())
+}
+
+// dropping + aggregation
+func BenchmarkStatsdUDPSameMetricChannelAggregation(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithClientSideAggregation())
+}
+
+/*
+UDP with the different metrics
+*/
+
+// blocking + no aggregation
+func BenchmarkStatsdUDPDifferentMetricMutex(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation())
+}
+
+// dropping + no aggregation
+func BenchmarkStatsdUDPDifferentMetricChannel(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation())
+}
+
+// blocking + aggregation
+func BenchmarkStatsdUDPDifferentMetricMutexAggregation(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithMutexMode(), statsd.WithClientSideAggregation())
+}
+
+// dropping + aggregation
+func BenchmarkStatsdUDPDifferentMetricChannelAggregation(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDP, statsd.WithChannelMode(), statsd.WithClientSideAggregation())
+}
+
+/*
+UDS with the same metric
+*/
+// blocking + no aggregation
+func BenchmarkStatsdUDSSameMetricMutex(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation())
+}
+
+// dropping + no aggregation
+func BenchmarkStatsdUDSSameMetricChannel(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation())
+}
+
+// blocking + aggregation
+func BenchmarkStatsdUDSSameMetricMutexAggregation(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithClientSideAggregation())
+}
+
+// dropping + aggregation
+func BenchmarkStatsdUDSSameMetricChannelAggregation(b *testing.B) {
+ benchmarkStatsdSameMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithClientSideAggregation())
+}
+
+/*
+UDS with different metrics
+*/
+// blocking + no aggregation
+func BenchmarkStatsdUDPSifferentMetricMutex(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithoutClientSideAggregation())
+}
+
+// dropping + no aggregation
+func BenchmarkStatsdUDSDifferentMetricChannel(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithoutClientSideAggregation())
+}
+
+// blocking + aggregation
+func BenchmarkStatsdUDPSifferentMetricMutexAggregation(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithMutexMode(), statsd.WithClientSideAggregation())
+}
+
+// dropping + aggregation
+func BenchmarkStatsdUDSDifferentMetricChannelAggregation(b *testing.B) {
+ benchmarkStatsdDifferentMetrics(b, writerNameUDS, statsd.WithChannelMode(), statsd.WithClientSideAggregation())
}
diff --git a/statsd/statsd_test.go b/statsd/statsd_test.go
index 0f2c141..d2d24fb 100644
--- a/statsd/statsd_test.go
+++ b/statsd/statsd_test.go
@@ -1,46 +1,16 @@
-// Copyright 2013 Ooyala, Inc.
-
package statsd
import (
- "bytes"
"fmt"
- "io"
- "io/ioutil"
- "net"
"os"
- "path/filepath"
- "reflect"
- "strconv"
"strings"
+ "sync"
"testing"
"time"
-)
-var dogstatsdTests = []struct {
- GlobalNamespace string
- GlobalTags []string
- Method string
- Metric string
- Value interface{}
- Tags []string
- Rate float64
- Expected string
-}{
- {"", nil, "Gauge", "test.gauge", 1.0, nil, 1.0, "test.gauge:1.000000|g"},
- {"", nil, "Gauge", "test.gauge", 1.0, nil, 0.999999, "test.gauge:1.000000|g|@0.999999"},
- {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA"}, 1.0, "test.gauge:1.000000|g|#tagA"},
- {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA", "tagB"}, 1.0, "test.gauge:1.000000|g|#tagA,tagB"},
- {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA"}, 0.999999, "test.gauge:1.000000|g|@0.999999|#tagA"},
- {"", nil, "Count", "test.count", int64(1), []string{"tagA"}, 1.0, "test.count:1|c|#tagA"},
- {"", nil, "Count", "test.count", int64(-1), []string{"tagA"}, 1.0, "test.count:-1|c|#tagA"},
- {"", nil, "Histogram", "test.histogram", 2.3, []string{"tagA"}, 1.0, "test.histogram:2.300000|h|#tagA"},
- {"", nil, "Distribution", "test.distribution", 2.3, []string{"tagA"}, 1.0, "test.distribution:2.300000|d|#tagA"},
- {"", nil, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "test.set:uuid|s|#tagA"},
- {"flubber.", nil, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "flubber.test.set:uuid|s|#tagA"},
- {"", []string{"tagC"}, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "test.set:uuid|s|#tagC,tagA"},
- {"", nil, "Count", "test.count", int64(1), []string{"hello\nworld"}, 1.0, "test.count:1|c|#helloworld"},
-}
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
func assertNotPanics(t *testing.T, f func()) {
defer func() {
@@ -51,907 +21,342 @@ func assertNotPanics(t *testing.T, f func()) {
f()
}
-func TestClientUDP(t *testing.T) {
- addr := "localhost:1201"
- udpAddr, err := net.ResolveUDPAddr("udp", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- server, err := net.ListenUDP("udp", udpAddr)
- if err != nil {
- t.Fatal(err)
+func TestNilError(t *testing.T) {
+ var c *Client
+ tests := []func() error{
+ func() error { return c.Flush() },
+ func() error { return c.Close() },
+ func() error { return c.Count("", 0, nil, 1) },
+ func() error { return c.Incr("", nil, 1) },
+ func() error { return c.Decr("", nil, 1) },
+ func() error { return c.Histogram("", 0, nil, 1) },
+ func() error { return c.Distribution("", 0, nil, 1) },
+ func() error { return c.Gauge("", 0, nil, 1) },
+ func() error { return c.Set("", "", nil, 1) },
+ func() error { return c.Timing("", time.Second, nil, 1) },
+ func() error { return c.TimeInMilliseconds("", 1, nil, 1) },
+ func() error { return c.Event(NewEvent("", "")) },
+ func() error { return c.SimpleEvent("", "") },
+ func() error { return c.ServiceCheck(NewServiceCheck("", Ok)) },
+ func() error { return c.SimpleServiceCheck("", Ok) },
+ func() error {
+ _, err := CloneWithExtraOptions(nil, WithChannelMode())
+ return err
+ },
}
- defer server.Close()
-
- client, err := New(addr)
- if err != nil {
- t.Fatal(err)
+ for i, f := range tests {
+ var err error
+ assertNotPanics(t, func() { err = f() })
+ if err != ErrNoClient {
+ t.Errorf("Test case %d: expected ErrNoClient, got %#v", i, err)
+ }
}
+}
- clientTest(t, server, client)
+func TestDoubleClosePanic(t *testing.T) {
+ c, err := New("localhost:8125")
+ assert.NoError(t, err)
+ c.Close()
+ c.Close()
}
type statsdWriterWrapper struct {
- io.WriteCloser
+ data []string
}
-func (statsdWriterWrapper) SetWriteTimeout(time.Duration) error {
+func (s *statsdWriterWrapper) Close() error {
return nil
}
-func TestClientWithConn(t *testing.T) {
- server, conn, err := os.Pipe()
- if err != nil {
- t.Fatal(err)
- }
-
- client, err := NewWithWriter(statsdWriterWrapper{conn})
- if err != nil {
- t.Fatal(err)
- }
-
- clientTest(t, server, client)
-}
-
-func clientTest(t *testing.T, server io.Reader, client *Client) {
- for _, tt := range dogstatsdTests {
- client.Namespace = tt.GlobalNamespace
- client.Tags = tt.GlobalTags
- method := reflect.ValueOf(client).MethodByName(tt.Method)
- e := method.Call([]reflect.Value{
- reflect.ValueOf(tt.Metric),
- reflect.ValueOf(tt.Value),
- reflect.ValueOf(tt.Tags),
- reflect.ValueOf(tt.Rate)})[0]
- errInter := e.Interface()
- if errInter != nil {
- t.Fatal(errInter.(error))
- }
-
- bytes := make([]byte, 1024)
- n, err := server.Read(bytes)
- if err != nil {
- t.Fatal(err)
- }
- message := bytes[:n]
- if string(message) != tt.Expected {
- t.Errorf("Expected: %s. Actual: %s", tt.Expected, string(message))
- }
- }
-}
-
-func TestClientUDS(t *testing.T) {
- dir, err := ioutil.TempDir("", "socket")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir) // clean up
-
- addr := filepath.Join(dir, "dsd.socket")
-
- udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- server, err := net.ListenUnixgram("unixgram", udsAddr)
- if err != nil {
- t.Fatal(err)
- }
- defer server.Close()
-
- addrParts := []string{UnixAddressPrefix, addr}
- client, err := New(strings.Join(addrParts, ""))
- if err != nil {
- t.Fatal(err)
- }
-
- for _, tt := range dogstatsdTests {
- client.Namespace = tt.GlobalNamespace
- client.Tags = tt.GlobalTags
- method := reflect.ValueOf(client).MethodByName(tt.Method)
- e := method.Call([]reflect.Value{
- reflect.ValueOf(tt.Metric),
- reflect.ValueOf(tt.Value),
- reflect.ValueOf(tt.Tags),
- reflect.ValueOf(tt.Rate)})[0]
- errInter := e.Interface()
- if errInter != nil {
- t.Fatal(errInter.(error))
- }
-
- bytes := make([]byte, 1024)
- n, err := server.Read(bytes)
- if err != nil {
- t.Fatal(err)
- }
- message := bytes[:n]
- if string(message) != tt.Expected {
- t.Errorf("Expected: %s. Actual: %s", tt.Expected, string(message))
+func (s *statsdWriterWrapper) Write(p []byte) (n int, err error) {
+ for _, m := range strings.Split(string(p), "\n") {
+ if m != "" {
+ s.data = append(s.data, m)
}
}
+ return len(p), nil
}
-func TestClientUDSClose(t *testing.T) {
- dir, err := ioutil.TempDir("", "socket")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir) // clean up
+func TestNewWithWriter(t *testing.T) {
+ w := statsdWriterWrapper{}
+ client, err := NewWithWriter(&w, WithoutTelemetry())
+ require.Nil(t, err)
- addr := filepath.Join(dir, "dsd.socket")
+ ts := &testServer{}
+ expected := ts.sendAllType(client)
+ client.Close()
- addrParts := []string{UnixAddressPrefix, addr}
- client, err := New(strings.Join(addrParts, ""))
- if err != nil {
- t.Fatal(err)
- }
-
- assertNotPanics(t, func() { client.Close() })
+ ts.assertMetric(t, w.data, expected)
}
-func TestBufferedClient(t *testing.T) {
- addr := "localhost:1201"
- udpAddr, err := net.ResolveUDPAddr("udp", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- server, err := net.ListenUDP("udp", udpAddr)
- if err != nil {
- t.Fatal(err)
- }
- defer server.Close()
-
- bufferLength := 9
- client, err := NewBuffered(addr, bufferLength)
- if err != nil {
- t.Fatal(err)
- }
-
- client.Namespace = "foo."
- client.Tags = []string{"dd:2"}
-
- dur, _ := time.ParseDuration("123us")
-
- client.Incr("ic", nil, 1)
- client.Decr("dc", nil, 1)
- client.Count("cc", 1, nil, 1)
- client.Gauge("gg", 10, nil, 1)
- client.Histogram("hh", 1, nil, 1)
- client.Distribution("dd", 1, nil, 1)
- client.Timing("tt", dur, nil, 1)
- client.Set("ss", "ss", nil, 1)
-
- if len(client.commands) != (bufferLength - 1) {
- t.Errorf("Expected client to have buffered %d commands, but found %d\n", (bufferLength - 1), len(client.commands))
- }
-
- client.Set("ss", "xx", nil, 1)
- client.Lock()
- err = client.flushLocked()
- client.Unlock()
- if err != nil {
- t.Errorf("Error sending: %s", err)
- }
-
- if len(client.commands) != 0 {
- t.Errorf("Expecting send to flush commands, but found %d\n", len(client.commands))
- }
-
- buffer := make([]byte, 4096)
- n, err := io.ReadAtLeast(server, buffer, 1)
- result := string(buffer[:n])
-
- if err != nil {
- t.Error(err)
- }
-
- expected := []string{
- `foo.ic:1|c|#dd:2`,
- `foo.dc:-1|c|#dd:2`,
- `foo.cc:1|c|#dd:2`,
- `foo.gg:10.000000|g|#dd:2`,
- `foo.hh:1.000000|h|#dd:2`,
- `foo.dd:1.000000|d|#dd:2`,
- `foo.tt:0.123000|ms|#dd:2`,
- `foo.ss:ss|s|#dd:2`,
- `foo.ss:xx|s|#dd:2`,
- }
-
- for i, res := range strings.Split(result, "\n") {
- if res != expected[i] {
- t.Errorf("Got `%s`, expected `%s`", res, expected[i])
- }
- }
-
- client.Event(&Event{Title: "title1", Text: "text1", Priority: Normal, AlertType: Success, Tags: []string{"tagg"}})
- client.SimpleEvent("event1", "text1")
-
- if len(client.commands) != 2 {
- t.Errorf("Expected to find %d commands, but found %d\n", 2, len(client.commands))
- }
-
- client.Lock()
- err = client.flushLocked()
- client.Unlock()
-
- if err != nil {
- t.Errorf("Error sending: %s", err)
+// TestConcurrentSend sends various metric types in separate goroutines to
+// trigger any possible data races. It is intended to be run with the data race
+// detector enabled.
+func TestConcurrentSend(t *testing.T) {
+ tests := []struct {
+ description string
+ clientOptions []Option
+ }{
+ {
+ description: "Client with default options",
+ clientOptions: []Option{},
+ },
+ {
+ description: "Client with mutex mode enabled",
+ clientOptions: []Option{WithMutexMode()},
+ },
+ {
+ description: "Client with channel mode enabled",
+ clientOptions: []Option{WithChannelMode()},
+ },
}
- if len(client.commands) != 0 {
- t.Errorf("Expecting send to flush commands, but found %d\n", len(client.commands))
- }
+ for _, test := range tests {
+ test := test // Capture range variable.
+ t.Run(test.description, func(t *testing.T) {
+ t.Parallel()
- buffer = make([]byte, 1024)
- n, err = io.ReadAtLeast(server, buffer, 1)
- result = string(buffer[:n])
+ client, err := New("localhost:9876", test.clientOptions...)
+ require.Nil(t, err, fmt.Sprintf("failed to create client: %s", err))
- if err != nil {
- t.Error(err)
- }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ client.Gauge("name", 1, []string{"tag"}, 0.1)
+ wg.Done()
+ }()
- if n == 0 {
- t.Errorf("Read 0 bytes but expected more.")
- }
+ wg.Add(1)
+ go func() {
+ client.Count("name", 1, []string{"tag"}, 0.1)
+ wg.Done()
+ }()
- expected = []string{
- `_e{6,5}:title1|text1|p:normal|t:success|#dd:2,tagg`,
- `_e{6,5}:event1|text1|#dd:2`,
- }
+ wg.Add(1)
+ go func() {
+ client.Timing("name", 1, []string{"tag"}, 0.1)
+ wg.Done()
+ }()
- for i, res := range strings.Split(result, "\n") {
- if res != expected[i] {
- t.Errorf("Got `%s`, expected `%s`", res, expected[i])
- }
+ wg.Wait()
+ err = client.Close()
+ require.Nil(t, err, fmt.Sprintf("failed to close client: %s", err))
+ })
}
-
}
-func TestBufferedClientBackground(t *testing.T) {
- addr := "localhost:1201"
- udpAddr, err := net.ResolveUDPAddr("udp", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- server, err := net.ListenUDP("udp", udpAddr)
- if err != nil {
- t.Fatal(err)
- }
- defer server.Close()
-
- bufferLength := 5
- client, err := NewBuffered(addr, bufferLength)
- if err != nil {
- t.Fatal(err)
- }
- defer client.Close()
-
- client.Namespace = "foo."
- client.Tags = []string{"dd:2"}
-
- client.Count("cc", 1, nil, 1)
- client.Gauge("gg", 10, nil, 1)
- client.Histogram("hh", 1, nil, 1)
- client.Distribution("dd", 1, nil, 1)
- client.Set("ss", "ss", nil, 1)
- client.Set("ss", "xx", nil, 1)
-
- time.Sleep(client.flushTime * 2)
- client.Lock()
- if len(client.commands) != 0 {
- t.Errorf("Watch goroutine should have flushed commands, but found %d\n", len(client.commands))
- }
- client.Unlock()
-}
-
-func TestBufferedClientFlush(t *testing.T) {
- addr := "localhost:1201"
- udpAddr, err := net.ResolveUDPAddr("udp", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- server, err := net.ListenUDP("udp", udpAddr)
- if err != nil {
- t.Fatal(err)
- }
- defer server.Close()
-
- bufferLength := 5
- client, err := NewBuffered(addr, bufferLength)
- if err != nil {
- t.Fatal(err)
- }
- defer client.Close()
-
- client.Namespace = "foo."
- client.Tags = []string{"dd:2"}
-
- client.Count("cc", 1, nil, 1)
- client.Gauge("gg", 10, nil, 1)
- client.Histogram("hh", 1, nil, 1)
- client.Distribution("dd", 1, nil, 1)
- client.Set("ss", "ss", nil, 1)
- client.Set("ss", "xx", nil, 1)
-
- client.Flush()
-
- client.Lock()
- if len(client.commands) != 0 {
- t.Errorf("Flush should have flushed commands, but found %d\n", len(client.commands))
- }
- client.Unlock()
+// TestCloseRace close the client multiple times in separate goroutines to
+// trigger any possible data races. It is intended to be run with the data race
+// detector enabled.
+func TestCloseRace(t *testing.T) {
+ c, err := New("localhost:8125")
+ assert.NoError(t, err)
+ start := make(chan struct{})
+ var wg sync.WaitGroup
+ for j := 0; j < 100; j++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ <-start
+ c.Close()
+ }()
+ }
+ close(start)
+ wg.Wait()
}
-func TestJoinMaxSize(t *testing.T) {
- c := Client{}
- elements := []string{"abc", "abcd", "ab", "xyz", "foobaz", "x", "wwxxyyzz"}
- res, n := c.joinMaxSize(elements, " ", 8)
-
- if len(res) != len(n) && len(res) != 4 {
- t.Errorf("Was expecting 4 frames to flush but got: %v - %v", n, res)
- }
- if n[0] != 2 {
- t.Errorf("Was expecting 2 elements in first frame but got: %v", n[0])
- }
- if string(res[0]) != "abc abcd" {
- t.Errorf("Join should have returned \"abc abcd\" in frame, but found: %s", res[0])
- }
- if n[1] != 2 {
- t.Errorf("Was expecting 2 elements in second frame but got: %v - %v", n[1], n)
- }
- if string(res[1]) != "ab xyz" {
- t.Errorf("Join should have returned \"ab xyz\" in frame, but found: %s", res[1])
- }
- if n[2] != 2 {
- t.Errorf("Was expecting 2 elements in third frame but got: %v - %v", n[2], n)
- }
- if string(res[2]) != "foobaz x" {
- t.Errorf("Join should have returned \"foobaz x\" in frame, but found: %s", res[2])
- }
- if n[3] != 1 {
- t.Errorf("Was expecting 1 element in fourth frame but got: %v - %v", n[3], n)
- }
- if string(res[3]) != "wwxxyyzz" {
- t.Errorf("Join should have returned \"wwxxyyzz\" in frame, but found: %s", res[3])
- }
-
- res, n = c.joinMaxSize(elements, " ", 11)
-
- if len(res) != len(n) && len(res) != 3 {
- t.Errorf("Was expecting 3 frames to flush but got: %v - %v", n, res)
- }
- if n[0] != 3 {
- t.Errorf("Was expecting 3 elements in first frame but got: %v", n[0])
- }
- if string(res[0]) != "abc abcd ab" {
- t.Errorf("Join should have returned \"abc abcd ab\" in frame, but got: %s", res[0])
- }
- if n[1] != 2 {
- t.Errorf("Was expecting 2 elements in second frame but got: %v", n[1])
- }
- if string(res[1]) != "xyz foobaz" {
- t.Errorf("Join should have returned \"xyz foobaz\" in frame, but got: %s", res[1])
- }
- if n[2] != 2 {
- t.Errorf("Was expecting 2 elements in third frame but got: %v", n[2])
- }
- if string(res[2]) != "x wwxxyyzz" {
- t.Errorf("Join should have returned \"x wwxxyyzz\" in frame, but got: %s", res[2])
- }
-
- res, n = c.joinMaxSize(elements, " ", 8)
-
- if len(res) != len(n) && len(res) != 7 {
- t.Errorf("Was expecting 7 frames to flush but got: %v - %v", n, res)
- }
- if n[0] != 1 {
- t.Errorf("Separator is long, expected a single element in frame but got: %d - %v", n[0], res)
- }
- if string(res[0]) != "abc" {
- t.Errorf("Join should have returned \"abc\" in first frame, but got: %s", res)
- }
- if n[1] != 1 {
- t.Errorf("Separator is long, expected a single element in frame but got: %d - %v", n[1], res)
- }
- if string(res[1]) != "abcd" {
- t.Errorf("Join should have returned \"abcd\" in second frame, but got: %s", res[1])
- }
- if n[2] != 1 {
- t.Errorf("Separator is long, expected a single element in third frame but got: %d - %v", n[2], res)
- }
- if string(res[2]) != "ab" {
- t.Errorf("Join should have returned \"ab\" in third frame, but got: %s", res[2])
- }
- if n[3] != 1 {
- t.Errorf("Separator is long, expected a single element in fourth frame but got: %d - %v", n[3], res)
- }
- if string(res[3]) != "xyz" {
- t.Errorf("Join should have returned \"xyz\" in fourth frame, but got: %s", res[3])
- }
- if n[4] != 1 {
- t.Errorf("Separator is long, expected a single element in fifth frame but got: %d - %v", n[4], res)
- }
- if string(res[4]) != "foobaz" {
- t.Errorf("Join should have returned \"foobaz\" in fifth frame, but got: %s", res[4])
- }
- if n[5] != 1 {
- t.Errorf("Separator is long, expected a single element in sixth frame but got: %d - %v", n[5], res)
- }
- if string(res[5]) != "x" {
- t.Errorf("Join should have returned \"x\" in sixth frame, but got: %s", res[5])
- }
- if n[6] != 1 {
- t.Errorf("Separator is long, expected a single element in seventh frame but got: %d - %v", n[6], res)
- }
- if string(res[6]) != "wwxxyyzz" {
- t.Errorf("Join should have returned \"wwxxyyzz\" in seventh frame, but got: %s", res[6])
- }
+func TestCloseWithClientAlreadyClosed(t *testing.T) {
+ c, err := New("localhost:8125")
+ assert.NoError(t, err)
+ assert.False(t, c.IsClosed())
- res, n = c.joinMaxSize(elements[4:], " ", 6)
- if len(res) != len(n) && len(res) != 3 {
- t.Errorf("Was expecting 3 frames to flush but got: %v - %v", n, res)
+ assert.NoError(t, c.Close())
+ assert.True(t, c.IsClosed())
- }
- if n[0] != 1 {
- t.Errorf("Element should just fit in frame - expected single element in frame: %d - %v", n[0], res)
- }
- if string(res[0]) != "foobaz" {
- t.Errorf("Join should have returned \"foobaz\" in first frame, but got: %s", res[0])
- }
- if n[1] != 1 {
- t.Errorf("Single element expected in frame, but got. %d - %v", n[1], res)
- }
- if string(res[1]) != "x" {
- t.Errorf("Join should' have returned \"x\" in second frame, but got: %s", res[1])
- }
- if n[2] != 1 {
- t.Errorf("Even though element is greater then max size we still try to send it. %d - %v", n[2], res)
- }
- if string(res[2]) != "wwxxyyzz" {
- t.Errorf("Join should have returned \"wwxxyyzz\" in third frame, but got: %s", res[2])
- }
+ assert.NoError(t, c.Close())
+ assert.True(t, c.IsClosed())
}
-func TestSendMsgUDP(t *testing.T) {
- addr := "localhost:1201"
- udpAddr, err := net.ResolveUDPAddr("udp", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- server, err := net.ListenUDP("udp", udpAddr)
- if err != nil {
- t.Fatal(err)
- }
- defer server.Close()
-
- client, err := New(addr)
- if err != nil {
- t.Fatal(err)
- }
-
- err = client.sendMsg(strings.Repeat("x", MaxUDPPayloadSize+1))
- if err == nil {
- t.Error("Expected error to be returned if message size is bigger than MaxUDPPayloadSize")
- }
-
- message := "test message"
-
- err = client.sendMsg(message)
- if err != nil {
- t.Errorf("Expected no error to be returned if message size is smaller or equal to MaxUDPPayloadSize, got: %s", err.Error())
- }
-
- buffer := make([]byte, MaxUDPPayloadSize+1)
- n, err := io.ReadAtLeast(server, buffer, 1)
-
- if err != nil {
- t.Fatalf("Expected no error to be returned reading the buffer, got: %s", err.Error())
- }
-
- if n != len(message) {
- t.Fatalf("Failed to read full message from buffer. Got size `%d` expected `%d`", n, MaxUDPPayloadSize)
- }
-
- if string(buffer[:n]) != message {
- t.Fatalf("The received message did not match what we expect.")
- }
-
- client, err = NewBuffered(addr, 1)
- if err != nil {
- t.Fatal(err)
- }
-
- err = client.sendMsg(strings.Repeat("x", MaxUDPPayloadSize+1))
- if err == nil {
- t.Error("Expected error to be returned if message size is bigger than MaxUDPPayloadSize")
- }
-
- err = client.sendMsg(message)
- if err != nil {
- t.Errorf("Expected no error to be returned if message size is smaller or equal to MaxUDPPayloadSize, got: %s", err.Error())
- }
-
- client.Lock()
- err = client.flushLocked()
- client.Unlock()
+func TestIsClosed(t *testing.T) {
+ c, err := New("localhost:8125")
+ assert.NoError(t, err)
+ assert.False(t, c.IsClosed())
- if err != nil {
- t.Fatalf("Expected no error to be returned flushing the client, got: %s", err.Error())
- }
-
- buffer = make([]byte, MaxUDPPayloadSize+1)
- n, err = io.ReadAtLeast(server, buffer, 1)
-
- if err != nil {
- t.Fatalf("Expected no error to be returned reading the buffer, got: %s", err.Error())
- }
-
- if n != len(message) {
- t.Fatalf("Failed to read full message from buffer. Got size `%d` expected `%d`", n, MaxUDPPayloadSize)
- }
-
- if string(buffer[:n]) != message {
- t.Fatalf("The received message did not match what we expect.")
- }
+ assert.NoError(t, c.Close())
+ assert.True(t, c.IsClosed())
}
-func TestSendUDSErrors(t *testing.T) {
- dir, err := ioutil.TempDir("", "socket")
- if err != nil {
- t.Fatal(err)
- }
- defer os.RemoveAll(dir) // clean up
+func TestCloneWithExtraOptions(t *testing.T) {
+ client, err := New("localhost:1201", WithTags([]string{"tag1", "tag2"}))
+ require.Nil(t, err, fmt.Sprintf("failed to create client: %s", err))
- message := "test message"
+ assert.Equal(t, client.tags, []string{"tag1", "tag2"})
+ assert.Equal(t, client.namespace, "")
+ assert.Equal(t, client.workersMode, mutexMode)
+ assert.Equal(t, "localhost:1201", client.addrOption)
+ assert.Len(t, client.options, 1)
- addr := filepath.Join(dir, "dsd.socket")
- udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
- if err != nil {
- t.Fatal(err)
- }
-
- addrParts := []string{UnixAddressPrefix, addr}
- client, err := New(strings.Join(addrParts, ""))
- if err != nil {
- t.Fatal(err)
- }
-
- // Server not listening yet
- err = client.sendMsg(message)
- if err == nil || !strings.HasSuffix(err.Error(), "no such file or directory") {
- t.Errorf("Expected error \"no such file or directory\", got: %s", err.Error())
- }
+ cloneClient, err := CloneWithExtraOptions(client, WithNamespace("test"), WithChannelMode())
+ require.Nil(t, err, fmt.Sprintf("failed to clone client: %s", err))
- // Start server and send packet
- server, err := net.ListenUnixgram("unixgram", udsAddr)
- if err != nil {
- t.Fatal(err)
- }
- err = client.sendMsg(message)
- if err != nil {
- t.Errorf("Expected no error to be returned when server is listening, got: %s", err.Error())
- }
- bytes := make([]byte, 1024)
- n, err := server.Read(bytes)
- if err != nil {
- t.Fatal(err)
- }
- if string(bytes[:n]) != message {
- t.Errorf("Expected: %s. Actual: %s", string(message), string(bytes))
- }
-
- // close server and send packet
- server.Close()
- os.Remove(addr)
- err = client.sendMsg(message)
- if err == nil {
- t.Error("Expected an error, got nil")
- }
-
- // Restart server and send packet
- server, err = net.ListenUnixgram("unixgram", udsAddr)
- if err != nil {
- t.Fatal(err)
- }
- time.Sleep(100 * time.Millisecond)
- defer server.Close()
- err = client.sendMsg(message)
- if err != nil {
- t.Errorf("Expected no error to be returned when server is listening, got: %s", err.Error())
- }
-
- bytes = make([]byte, 1024)
- n, err = server.Read(bytes)
- if err != nil {
- t.Fatal(err)
- }
- if string(bytes[:n]) != message {
- t.Errorf("Expected: %s. Actual: %s", string(message), string(bytes))
- }
+ assert.Equal(t, cloneClient.tags, []string{"tag1", "tag2"})
+ assert.Equal(t, cloneClient.namespace, "test.")
+ assert.Equal(t, cloneClient.workersMode, channelMode)
+ assert.Equal(t, "localhost:1201", cloneClient.addrOption)
+ assert.Len(t, cloneClient.options, 3)
}
-func TestSendUDSIgnoreErrors(t *testing.T) {
- client, err := New("unix:///invalid")
- if err != nil {
- t.Fatal(err)
- }
-
- // Default mode throws error
- err = client.sendMsg("message")
- if err == nil || !strings.HasSuffix(err.Error(), "no such file or directory") {
- t.Errorf("Expected error \"connect: no such file or directory\", got: %s", err.Error())
- }
-
- // Skip errors
- client.SkipErrors = true
- err = client.sendMsg("message")
- if err != nil {
- t.Errorf("Expected no error to be returned when in skip errors mode, got: %s", err.Error())
+func TestResolveAddressFromEnvironment(t *testing.T) {
+ hostInitialValue, hostInitiallySet := os.LookupEnv(agentHostEnvVarName)
+ if hostInitiallySet {
+ defer os.Setenv(agentHostEnvVarName, hostInitialValue)
+ } else {
+ defer os.Unsetenv(agentHostEnvVarName)
+ }
+ portInitialValue, portInitiallySet := os.LookupEnv(agentPortEnvVarName)
+ if portInitiallySet {
+ defer os.Setenv(agentPortEnvVarName, portInitialValue)
+ } else {
+ defer os.Unsetenv(agentPortEnvVarName)
+ }
+
+ for _, tc := range []struct {
+ name string
+ addrParam string
+ hostEnv string
+ portEnv string
+ expectedAddr string
+ }{
+ {"UPD Nominal case", "127.0.0.1:1234", "", "", "127.0.0.1:1234"},
+ {"UPD Parameter overrides environment", "127.0.0.1:8125", "10.12.16.9", "1234", "127.0.0.1:8125"},
+ {"UPD Host and port passed as env", "", "10.12.16.9", "1234", "10.12.16.9:1234"},
+ {"UPD Host env, default port", "", "10.12.16.9", "", "10.12.16.9:8125"},
+ {"UPD Host passed, ignore env port", "10.12.16.9", "", "1234", "10.12.16.9:8125"},
+
+ {"UDS socket passed", "unix://test/path.socket", "", "", "unix://test/path.socket"},
+ {"UDS socket env", "", "unix://test/path.socket", "", "unix://test/path.socket"},
+ {"UDS socket env with port", "", "unix://test/path.socket", "8125", "unix://test/path.socket"},
+
+ {"Pipe passed", "\\\\.\\pipe\\my_pipe", "", "", "\\\\.\\pipe\\my_pipe"},
+ {"Pipe env", "", "\\\\.\\pipe\\my_pipe", "", "\\\\.\\pipe\\my_pipe"},
+ {"Pipe env with port", "", "\\\\.\\pipe\\my_pipe", "8125", "\\\\.\\pipe\\my_pipe"},
+
+ {"No autodetection failed", "", "", "", ""},
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ os.Setenv(agentHostEnvVarName, tc.hostEnv)
+ os.Setenv(agentPortEnvVarName, tc.portEnv)
+
+ addr := resolveAddr(tc.addrParam)
+ assert.Equal(t, tc.expectedAddr, addr)
+ })
}
}
-func TestNilSafe(t *testing.T) {
- var c *Client
- assertNotPanics(t, func() { c.SetWriteTimeout(0) })
- assertNotPanics(t, func() { c.Flush() })
- assertNotPanics(t, func() { c.Close() })
- assertNotPanics(t, func() { c.Count("", 0, nil, 1) })
- assertNotPanics(t, func() { c.Histogram("", 0, nil, 1) })
- assertNotPanics(t, func() { c.Distribution("", 0, nil, 1) })
- assertNotPanics(t, func() { c.Gauge("", 0, nil, 1) })
- assertNotPanics(t, func() { c.Set("", "", nil, 1) })
- assertNotPanics(t, func() {
- c.send("", "", []byte(""), nil, 1)
- })
- assertNotPanics(t, func() { c.Event(NewEvent("", "")) })
- assertNotPanics(t, func() { c.SimpleEvent("", "") })
- assertNotPanics(t, func() { c.ServiceCheck(NewServiceCheck("", Ok)) })
- assertNotPanics(t, func() { c.SimpleServiceCheck("", Ok) })
+func TestGetTelemetry(t *testing.T) {
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ nil,
+ WithExtendedClientSideAggregation(),
+ )
+
+ ts.sendAllAndAssert(t, client)
+ tlm := client.GetTelemetry()
+
+ assert.Equal(t, uint64(9), tlm.TotalMetrics, "telmetry TotalMetrics was wrong")
+ assert.Equal(t, uint64(1), tlm.TotalMetricsGauge, "telmetry TotalMetricsGauge was wrong")
+ assert.Equal(t, uint64(3), tlm.TotalMetricsCount, "telmetry TotalMetricsCount was wrong")
+ assert.Equal(t, uint64(1), tlm.TotalMetricsHistogram, "telmetry TotalMetricsHistogram was wrong")
+ assert.Equal(t, uint64(1), tlm.TotalMetricsDistribution, "telmetry TotalMetricsDistribution was wrong")
+ assert.Equal(t, uint64(1), tlm.TotalMetricsSet, "telmetry TotalMetricsSet was wrong")
+ assert.Equal(t, uint64(2), tlm.TotalMetricsTiming, "telmetry TotalMetricsTiming was wrong")
+ assert.Equal(t, uint64(1), tlm.TotalEvents, "telmetry TotalEvents was wrong")
+ assert.Equal(t, uint64(1), tlm.TotalServiceChecks, "telmetry TotalServiceChecks was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalDroppedOnReceive, "telmetry TotalDroppedOnReceive was wrong")
+ assert.Equal(t, uint64(22), tlm.TotalPayloadsSent, "telmetry TotalPayloadsSent was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalPayloadsDropped, "telmetry TotalPayloadsDropped was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalPayloadsDroppedWriter, "telmetry TotalPayloadsDroppedWriter was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalPayloadsDroppedQueueFull, "telmetry TotalPayloadsDroppedQueueFull was wrong")
+ assert.Equal(t, uint64(3112), tlm.TotalBytesSent, "telmetry TotalBytesSent was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalBytesDropped, "telmetry TotalBytesDropped was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalBytesDroppedWriter, "telmetry TotalBytesDroppedWriter was wrong")
+ assert.Equal(t, uint64(0), tlm.TotalBytesDroppedQueueFull, "telmetry TotalBytesDroppedQueueFull was wrong")
+ assert.Equal(t, uint64(9), tlm.AggregationNbContext, "telmetry AggregationNbContext was wrong")
+ assert.Equal(t, uint64(1), tlm.AggregationNbContextGauge, "telmetry AggregationNbContextGauge was wrong")
+ assert.Equal(t, uint64(3), tlm.AggregationNbContextCount, "telmetry AggregationNbContextCount was wrong")
+ assert.Equal(t, uint64(1), tlm.AggregationNbContextSet, "telmetry AggregationNbContextSet was wrong")
+ assert.Equal(t, uint64(1), tlm.AggregationNbContextHistogram, "telmetry AggregationNbContextHistogram was wrong")
+ assert.Equal(t, uint64(1), tlm.AggregationNbContextDistribution, "telmetry AggregationNbContextDistribution was wrong")
+ assert.Equal(t, uint64(2), tlm.AggregationNbContextTiming, "telmetry AggregationNbContextTiming was wrong")
}
-func TestEvents(t *testing.T) {
- matrix := []struct {
- event *Event
- encoded string
+func Test_isOriginDetectionEnabled(t *testing.T) {
+ tests := []struct {
+ name string
+ o *Options
+ hasEntityID bool
+ configEnvVarValue string
+ want bool
}{
{
- NewEvent("Hello", "Something happened to my event"),
- `_e{5,30}:Hello|Something happened to my event`,
- }, {
- &Event{Title: "hi", Text: "okay", AggregationKey: "foo"},
- `_e{2,4}:hi|okay|k:foo`,
- }, {
- &Event{Title: "hi", Text: "okay", AggregationKey: "foo", AlertType: Info},
- `_e{2,4}:hi|okay|k:foo|t:info`,
- }, {
- &Event{Title: "hi", Text: "w/e", AlertType: Error, Priority: Normal},
- `_e{2,3}:hi|w/e|p:normal|t:error`,
- }, {
- &Event{Title: "hi", Text: "uh", Tags: []string{"host:foo", "app:bar"}},
- `_e{2,2}:hi|uh|#host:foo,app:bar`,
- }, {
- &Event{Title: "hi", Text: "line1\nline2", Tags: []string{"hello\nworld"}},
- `_e{2,12}:hi|line1\nline2|#helloworld`,
+ name: "nominal case",
+ o: &Options{originDetection: defaultOriginDetection},
+ hasEntityID: false,
+ configEnvVarValue: "",
+ want: true,
},
- }
-
- for _, m := range matrix {
- r, err := m.event.Encode()
- if err != nil {
- t.Errorf("Error encoding: %s\n", err)
- continue
- }
- if r != m.encoded {
- t.Errorf("Expected `%s`, got `%s`\n", m.encoded, r)
- }
- }
-
- e := NewEvent("", "hi")
- if _, err := e.Encode(); err == nil {
- t.Errorf("Expected error on empty Title.")
- }
-
- e = NewEvent("hi", "")
- if _, err := e.Encode(); err == nil {
- t.Errorf("Expected error on empty Text.")
- }
-
- e = NewEvent("hello", "world")
- s, err := e.Encode("tag1", "tag2")
- if err != nil {
- t.Error(err)
- }
- expected := "_e{5,5}:hello|world|#tag1,tag2"
- if s != expected {
- t.Errorf("Expected %s, got %s", expected, s)
- }
- if len(e.Tags) != 0 {
- t.Errorf("Modified event in place illegally.")
- }
-}
-
-func TestServiceChecks(t *testing.T) {
- matrix := []struct {
- serviceCheck *ServiceCheck
- encoded string
- }{
{
- NewServiceCheck("DataCatService", Ok),
- `_sc|DataCatService|0`,
- }, {
- NewServiceCheck("DataCatService", Warn),
- `_sc|DataCatService|1`,
- }, {
- NewServiceCheck("DataCatService", Critical),
- `_sc|DataCatService|2`,
- }, {
- NewServiceCheck("DataCatService", Unknown),
- `_sc|DataCatService|3`,
- }, {
- &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat"},
- `_sc|DataCatService|0|h:DataStation.Cat`,
- }, {
- &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message"},
- `_sc|DataCatService|0|h:DataStation.Cat|m:Here goes valuable message`,
- }, {
- &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш"},
- `_sc|DataCatService|0|h:DataStation.Cat|m:Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш`,
- }, {
- &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message", Tags: []string{"host:foo", "app:bar"}},
- `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes valuable message`,
- }, {
- &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes \n that should be escaped", Tags: []string{"host:foo", "app:b\nar"}},
- `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes \n that should be escaped`,
- }, {
- &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes m: that should be escaped", Tags: []string{"host:foo", "app:bar"}},
- `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes m\: that should be escaped`,
+ name: "has entity ID",
+ o: &Options{originDetection: defaultOriginDetection},
+ hasEntityID: true,
+ configEnvVarValue: "",
+ want: false,
+ },
+ {
+ name: "has user-provided container ID",
+ o: &Options{containerID: "user-provided"},
+ hasEntityID: true,
+ configEnvVarValue: "",
+ want: false,
+ },
+ {
+ name: "originDetection option disabled",
+ o: &Options{originDetection: false},
+ hasEntityID: false,
+ configEnvVarValue: "",
+ want: false,
+ },
+ {
+ name: "DD_ORIGIN_DETECTION_ENABLED=false",
+ o: &Options{originDetection: defaultOriginDetection},
+ hasEntityID: false,
+ configEnvVarValue: "false",
+ want: false,
+ },
+ {
+ name: "invalid DD_ORIGIN_DETECTION_ENABLED value",
+ o: &Options{originDetection: defaultOriginDetection},
+ hasEntityID: false,
+ configEnvVarValue: "invalid",
+ want: true,
},
}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Setenv("DD_ORIGIN_DETECTION_ENABLED", tt.configEnvVarValue)
+ defer os.Unsetenv("DD_ORIGIN_DETECTION_ENABLED")
- for _, m := range matrix {
- r, err := m.serviceCheck.Encode()
- if err != nil {
- t.Errorf("Error encoding: %s\n", err)
- continue
- }
- if r != m.encoded {
- t.Errorf("Expected `%s`, got `%s`\n", m.encoded, r)
- }
- }
-
- sc := NewServiceCheck("", Ok)
- if _, err := sc.Encode(); err == nil {
- t.Errorf("Expected error on empty Name.")
- }
-
- sc = NewServiceCheck("sc", ServiceCheckStatus(5))
- if _, err := sc.Encode(); err == nil {
- t.Errorf("Expected error on invalid status value.")
- }
-
- sc = NewServiceCheck("hello", Warn)
- s, err := sc.Encode("tag1", "tag2")
- if err != nil {
- t.Error(err)
- }
- expected := "_sc|hello|1|#tag1,tag2"
- if s != expected {
- t.Errorf("Expected %s, got %s", expected, s)
- }
- if len(sc.Tags) != 0 {
- t.Errorf("Modified serviceCheck in place illegally.")
- }
-}
-
-func TestFlushOnClose(t *testing.T) {
- client, err := NewBuffered("localhost:1201", 64)
- if err != nil {
- t.Fatal(err)
- }
- // stop the flushing mechanism so we can test the buffer without interferences
- client.stop <- struct{}{}
-
- message := "test message"
-
- err = client.sendMsg(message)
- if err != nil {
- t.Fatal(err)
- }
-
- if len(client.commands) != 1 {
- t.Errorf("Commands buffer should contain 1 item, got %d", len(client.commands))
- }
-
- err = client.Close()
- if err != nil {
- t.Fatal(err)
- }
-
- if len(client.commands) != 0 {
- t.Errorf("Commands buffer should be empty, got %d", len(client.commands))
- }
-}
-
-// These benchmarks show that using different format options:
-// v1: sprintf-ing together a bunch of intermediate strings is 4-5x faster
-// v2: some use of buffer
-// v3: removing sprintf from stat generation and pushing stat building into format
-func BenchmarkFormatV3(b *testing.B) {
- b.StopTimer()
- c := &Client{}
- c.Namespace = "foo.bar."
- c.Tags = []string{"app:foo", "host:bar"}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- c.format("system.cpu.idle", 10, gaugeSuffix, []string{"foo"}, 1)
- c.format("system.cpu.load", 0.1, gaugeSuffix, nil, 0.9)
- }
-}
-
-func BenchmarkFormatV1(b *testing.B) {
- b.StopTimer()
- c := &Client{}
- c.Namespace = "foo.bar."
- c.Tags = []string{"app:foo", "host:bar"}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- c.formatV1("system.cpu.idle", 10, []string{"foo"}, 1)
- c.formatV1("system.cpu.load", 0.1, nil, 0.9)
- }
-}
-
-// V1 formatting function, added to client for tests
-func (c *Client) formatV1(name string, value float64, tags []string, rate float64) string {
- valueAsString := fmt.Sprintf("%f|g", value)
- if rate < 1 {
- valueAsString = fmt.Sprintf("%s|@%f", valueAsString, rate)
- }
- if c.Namespace != "" {
- name = fmt.Sprintf("%s%s", c.Namespace, name)
- }
-
- tags = append(c.Tags, tags...)
- if len(tags) > 0 {
- valueAsString = fmt.Sprintf("%s|#%s", valueAsString, strings.Join(tags, ","))
- }
-
- return fmt.Sprintf("%s:%s", name, valueAsString)
-
-}
-
-func BenchmarkFormatV2(b *testing.B) {
- b.StopTimer()
- c := &Client{}
- c.Namespace = "foo.bar."
- c.Tags = []string{"app:foo", "host:bar"}
- b.StartTimer()
- for i := 0; i < b.N; i++ {
- c.formatV2("system.cpu.idle", 10, []string{"foo"}, 1)
- c.formatV2("system.cpu.load", 0.1, nil, 0.9)
+ assert.Equal(t, tt.want, isOriginDetectionEnabled(tt.o, tt.hasEntityID))
+ })
}
}
-// V2 formatting function, added to client for tests
-func (c *Client) formatV2(name string, value float64, tags []string, rate float64) string {
- var buf bytes.Buffer
- if c.Namespace != "" {
- buf.WriteString(c.Namespace)
- }
- buf.WriteString(name)
- buf.WriteString(":")
- buf.WriteString(fmt.Sprintf("%f|g", value))
- if rate < 1 {
- buf.WriteString(`|@`)
- buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64))
- }
-
- writeTagString(&buf, c.Tags, tags)
+func TestMessageTooLongError(t *testing.T) {
+ client, err := New("localhost:8765", WithMaxBytesPerPayload(10), WithoutClientSideAggregation())
+ require.NoError(t, err)
- return buf.String()
+ err = client.Gauge("fake_name_", 21, nil, 1)
+ require.Error(t, err)
+ assert.IsType(t, MessageTooLongError{}, err)
}
diff --git a/statsd/telemetry.go b/statsd/telemetry.go
new file mode 100644
index 0000000..b3825e8
--- /dev/null
+++ b/statsd/telemetry.go
@@ -0,0 +1,274 @@
+package statsd
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+/*
+telemetryInterval is the interval at which telemetry will be sent by the client.
+*/
+const telemetryInterval = 10 * time.Second
+
+/*
+clientTelemetryTag is a tag identifying this specific client.
+*/
+var clientTelemetryTag = "client:go"
+
+/*
+clientVersionTelemetryTag is a tag identifying this specific client version.
+*/
+var clientVersionTelemetryTag = "client_version:5.1.1"
+
+// Telemetry represents internal metrics about the client behavior since it started.
+type Telemetry struct {
+ //
+ // Those are produced by the 'Client'
+ //
+
+ // TotalMetrics is the total number of metrics sent by the client before aggregation and sampling.
+ TotalMetrics uint64
+ // TotalMetricsGauge is the total number of gauges sent by the client before aggregation and sampling.
+ TotalMetricsGauge uint64
+ // TotalMetricsCount is the total number of counts sent by the client before aggregation and sampling.
+ TotalMetricsCount uint64
+ // TotalMetricsHistogram is the total number of histograms sent by the client before aggregation and sampling.
+ TotalMetricsHistogram uint64
+ // TotalMetricsDistribution is the total number of distributions sent by the client before aggregation and
+ // sampling.
+ TotalMetricsDistribution uint64
+ // TotalMetricsSet is the total number of sets sent by the client before aggregation and sampling.
+ TotalMetricsSet uint64
+ // TotalMetricsTiming is the total number of timings sent by the client before aggregation and sampling.
+ TotalMetricsTiming uint64
+ // TotalEvents is the total number of events sent by the client before aggregation and sampling.
+ TotalEvents uint64
+ // TotalServiceChecks is the total number of service_checks sent by the client before aggregation and sampling.
+ TotalServiceChecks uint64
+
+ // TotalDroppedOnReceive is the total number metrics/event/service_checks dropped when using ChannelMode (see
+ // WithChannelMode option).
+ TotalDroppedOnReceive uint64
+
+ //
+ // Those are produced by the 'sender'
+ //
+
+ // TotalPayloadsSent is the total number of payload (packet on the network) succesfully sent by the client. When
+ // using UDP we don't know if packet dropped or not, so all packet are considered as succesfully sent.
+ TotalPayloadsSent uint64
+ // TotalPayloadsDropped is the total number of payload dropped by the client. This includes all cause of dropped
+ // (TotalPayloadsDroppedQueueFull and TotalPayloadsDroppedWriter). When using UDP This won't includes the
+ // network dropped.
+ TotalPayloadsDropped uint64
+ // TotalPayloadsDroppedWriter is the total number of payload dropped by the writer (when using UDS or named
+ // pipe) due to network timeout or error.
+ TotalPayloadsDroppedWriter uint64
+ // TotalPayloadsDroppedQueueFull is the total number of payload dropped internally because the queue of payloads
+ // waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on
+ // the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size.
+ TotalPayloadsDroppedQueueFull uint64
+
+ // TotalBytesSent is the total number of bytes succesfully sent by the client. When using UDP we don't know if
+ // packet dropped or not, so all packet are considered as succesfully sent.
+ TotalBytesSent uint64
+ // TotalBytesDropped is the total number of bytes dropped by the client. This includes all cause of dropped
+ // (TotalBytesDroppedQueueFull and TotalBytesDroppedWriter). When using UDP This
+ // won't includes the network dropped.
+ TotalBytesDropped uint64
+ // TotalBytesDroppedWriter is the total number of bytes dropped by the writer (when using UDS or named pipe) due
+ // to network timeout or error.
+ TotalBytesDroppedWriter uint64
+ // TotalBytesDroppedQueueFull is the total number of bytes dropped internally because the queue of payloads
+ // waiting to be sent on the wire is full. This means the client is generating more metrics than can be sent on
+ // the wire. If your app sends metrics in batch look at WithSenderQueueSize option to increase the queue size.
+ TotalBytesDroppedQueueFull uint64
+
+ //
+ // Those are produced by the 'aggregator'
+ //
+
+ // AggregationNbContext is the total number of contexts flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContext uint64
+ // AggregationNbContextGauge is the total number of contexts for gauges flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContextGauge uint64
+ // AggregationNbContextCount is the total number of contexts for counts flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContextCount uint64
+ // AggregationNbContextSet is the total number of contexts for sets flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContextSet uint64
+ // AggregationNbContextHistogram is the total number of contexts for histograms flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContextHistogram uint64
+ // AggregationNbContextDistribution is the total number of contexts for distributions flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContextDistribution uint64
+ // AggregationNbContextTiming is the total number of contexts for timings flushed by the aggregator when either
+ // WithClientSideAggregation or WithExtendedClientSideAggregation options are enabled.
+ AggregationNbContextTiming uint64
+}
+
+type telemetryClient struct {
+ c *Client
+ tags []string
+ aggEnabled bool // is aggregation enabled and should we sent aggregation telemetry.
+ tagsByType map[metricType][]string
+ sender *sender
+ worker *worker
+ lastSample Telemetry // The previous sample of telemetry sent
+}
+
+func newTelemetryClient(c *Client, transport string, aggregationEnabled bool) *telemetryClient {
+ t := &telemetryClient{
+ c: c,
+ tags: append(c.tags, clientTelemetryTag, clientVersionTelemetryTag, "client_transport:"+transport),
+ aggEnabled: aggregationEnabled,
+ tagsByType: map[metricType][]string{},
+ }
+
+ t.tagsByType[gauge] = append(append([]string{}, t.tags...), "metrics_type:gauge")
+ t.tagsByType[count] = append(append([]string{}, t.tags...), "metrics_type:count")
+ t.tagsByType[set] = append(append([]string{}, t.tags...), "metrics_type:set")
+ t.tagsByType[timing] = append(append([]string{}, t.tags...), "metrics_type:timing")
+ t.tagsByType[histogram] = append(append([]string{}, t.tags...), "metrics_type:histogram")
+ t.tagsByType[distribution] = append(append([]string{}, t.tags...), "metrics_type:distribution")
+ return t
+}
+
+func newTelemetryClientWithCustomAddr(c *Client, transport string, telemetryAddr string, aggregationEnabled bool, pool *bufferPool, writeTimeout time.Duration) (*telemetryClient, error) {
+ telemetryWriter, _, err := createWriter(telemetryAddr, writeTimeout)
+ if err != nil {
+ return nil, fmt.Errorf("Could not resolve telemetry address: %v", err)
+ }
+
+ t := newTelemetryClient(c, transport, aggregationEnabled)
+
+ // Creating a custom sender/worker with 1 worker in mutex mode for the
+ // telemetry that share the same bufferPool.
+ // FIXME due to performance pitfall, we're always using UDP defaults
+ // even for UDS.
+ t.sender = newSender(telemetryWriter, DefaultUDPBufferPoolSize, pool)
+ t.worker = newWorker(pool, t.sender)
+ return t, nil
+}
+
+func (t *telemetryClient) run(wg *sync.WaitGroup, stop chan struct{}) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ ticker := time.NewTicker(telemetryInterval)
+ for {
+ select {
+ case <-ticker.C:
+ t.sendTelemetry()
+ case <-stop:
+ ticker.Stop()
+ if t.sender != nil {
+ t.sender.close()
+ }
+ return
+ }
+ }
+ }()
+}
+
+func (t *telemetryClient) sendTelemetry() {
+ for _, m := range t.flush() {
+ if t.worker != nil {
+ t.worker.processMetric(m)
+ } else {
+ t.c.send(m)
+ }
+ }
+
+ if t.worker != nil {
+ t.worker.flush()
+ }
+}
+
+func (t *telemetryClient) getTelemetry() Telemetry {
+ if t == nil {
+ // telemetry was disabled through the WithoutTelemetry option
+ return Telemetry{}
+ }
+
+ tlm := Telemetry{}
+ t.c.flushTelemetryMetrics(&tlm)
+ t.c.sender.flushTelemetryMetrics(&tlm)
+ t.c.agg.flushTelemetryMetrics(&tlm)
+
+ tlm.TotalMetrics = tlm.TotalMetricsGauge +
+ tlm.TotalMetricsCount +
+ tlm.TotalMetricsSet +
+ tlm.TotalMetricsHistogram +
+ tlm.TotalMetricsDistribution +
+ tlm.TotalMetricsTiming
+
+ tlm.TotalPayloadsDropped = tlm.TotalPayloadsDroppedQueueFull + tlm.TotalPayloadsDroppedWriter
+ tlm.TotalBytesDropped = tlm.TotalBytesDroppedQueueFull + tlm.TotalBytesDroppedWriter
+
+ if t.aggEnabled {
+ tlm.AggregationNbContext = tlm.AggregationNbContextGauge +
+ tlm.AggregationNbContextCount +
+ tlm.AggregationNbContextSet +
+ tlm.AggregationNbContextHistogram +
+ tlm.AggregationNbContextDistribution +
+ tlm.AggregationNbContextTiming
+ }
+ return tlm
+}
+
+// flushTelemetry returns Telemetry metrics to be flushed. It's its own function to ease testing.
+func (t *telemetryClient) flush() []metric {
+ m := []metric{}
+
+ // same as Count but without global namespace
+ telemetryCount := func(name string, value int64, tags []string) {
+ m = append(m, metric{metricType: count, name: name, ivalue: value, tags: tags, rate: 1})
+ }
+
+ tlm := t.getTelemetry()
+
+ // We send the diff between now and the previous telemetry flush. This keep the same telemetry behavior from V4
+ // so users dashboard's aren't broken when upgrading to V5. It also allow to graph on the same dashboard a mix
+ // of V4 and V5 apps.
+ telemetryCount("datadog.dogstatsd.client.metrics", int64(tlm.TotalMetrics-t.lastSample.TotalMetrics), t.tags)
+ telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsGauge-t.lastSample.TotalMetricsGauge), t.tagsByType[gauge])
+ telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsCount-t.lastSample.TotalMetricsCount), t.tagsByType[count])
+ telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsHistogram-t.lastSample.TotalMetricsHistogram), t.tagsByType[histogram])
+ telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsDistribution-t.lastSample.TotalMetricsDistribution), t.tagsByType[distribution])
+ telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsSet-t.lastSample.TotalMetricsSet), t.tagsByType[set])
+ telemetryCount("datadog.dogstatsd.client.metrics_by_type", int64(tlm.TotalMetricsTiming-t.lastSample.TotalMetricsTiming), t.tagsByType[timing])
+ telemetryCount("datadog.dogstatsd.client.events", int64(tlm.TotalEvents-t.lastSample.TotalEvents), t.tags)
+ telemetryCount("datadog.dogstatsd.client.service_checks", int64(tlm.TotalServiceChecks-t.lastSample.TotalServiceChecks), t.tags)
+
+ telemetryCount("datadog.dogstatsd.client.metric_dropped_on_receive", int64(tlm.TotalDroppedOnReceive-t.lastSample.TotalDroppedOnReceive), t.tags)
+
+ telemetryCount("datadog.dogstatsd.client.packets_sent", int64(tlm.TotalPayloadsSent-t.lastSample.TotalPayloadsSent), t.tags)
+ telemetryCount("datadog.dogstatsd.client.packets_dropped", int64(tlm.TotalPayloadsDropped-t.lastSample.TotalPayloadsDropped), t.tags)
+ telemetryCount("datadog.dogstatsd.client.packets_dropped_queue", int64(tlm.TotalPayloadsDroppedQueueFull-t.lastSample.TotalPayloadsDroppedQueueFull), t.tags)
+ telemetryCount("datadog.dogstatsd.client.packets_dropped_writer", int64(tlm.TotalPayloadsDroppedWriter-t.lastSample.TotalPayloadsDroppedWriter), t.tags)
+
+ telemetryCount("datadog.dogstatsd.client.bytes_dropped", int64(tlm.TotalBytesDropped-t.lastSample.TotalBytesDropped), t.tags)
+ telemetryCount("datadog.dogstatsd.client.bytes_sent", int64(tlm.TotalBytesSent-t.lastSample.TotalBytesSent), t.tags)
+ telemetryCount("datadog.dogstatsd.client.bytes_dropped_queue", int64(tlm.TotalBytesDroppedQueueFull-t.lastSample.TotalBytesDroppedQueueFull), t.tags)
+ telemetryCount("datadog.dogstatsd.client.bytes_dropped_writer", int64(tlm.TotalBytesDroppedWriter-t.lastSample.TotalBytesDroppedWriter), t.tags)
+
+ if t.aggEnabled {
+ telemetryCount("datadog.dogstatsd.client.aggregated_context", int64(tlm.AggregationNbContext-t.lastSample.AggregationNbContext), t.tags)
+ telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextGauge-t.lastSample.AggregationNbContextGauge), t.tagsByType[gauge])
+ telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextSet-t.lastSample.AggregationNbContextSet), t.tagsByType[set])
+ telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextCount-t.lastSample.AggregationNbContextCount), t.tagsByType[count])
+ telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextHistogram-t.lastSample.AggregationNbContextHistogram), t.tagsByType[histogram])
+ telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextDistribution-t.lastSample.AggregationNbContextDistribution), t.tagsByType[distribution])
+ telemetryCount("datadog.dogstatsd.client.aggregated_context_by_type", int64(tlm.AggregationNbContextTiming-t.lastSample.AggregationNbContextTiming), t.tagsByType[timing])
+ }
+
+ t.lastSample = tlm
+
+ return m
+}
diff --git a/statsd/telemetry_test.go b/statsd/telemetry_test.go
new file mode 100644
index 0000000..d5bc4ca
--- /dev/null
+++ b/statsd/telemetry_test.go
@@ -0,0 +1,96 @@
+package statsd
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+//
+// Most of the behavior of the telemetry is tested in the end_to_end_test.go file
+//
+
+func TestTelemetryCustomAddr(t *testing.T) {
+ telAddr := "localhost:8764"
+ ts, client := newClientAndTestServer(t,
+ "udp",
+ "localhost:8765",
+ nil,
+ WithTelemetryAddr(telAddr),
+ WithNamespace("test_namespace"),
+ )
+
+ udpAddr, err := net.ResolveUDPAddr("udp", telAddr)
+ require.Nil(t, err, fmt.Sprintf("could not resolve udp '%s': %s", telAddr, err))
+ server, err := net.ListenUDP("udp", udpAddr)
+ require.Nil(t, err, fmt.Sprintf("could not listen to UDP addr: %s", err))
+ defer server.Close()
+
+ expectedResult := []string{
+ "datadog.dogstatsd.client.metrics:9|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:gauge",
+ "datadog.dogstatsd.client.metrics_by_type:3|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:count",
+ "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:histogram",
+ "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:distribution",
+ "datadog.dogstatsd.client.metrics_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:set",
+ "datadog.dogstatsd.client.metrics_by_type:2|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:timing",
+ "datadog.dogstatsd.client.events:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.service_checks:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.metric_dropped_on_receive:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.packets_sent:10|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.bytes_sent:473|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.packets_dropped:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.bytes_dropped:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.packets_dropped_queue:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.bytes_dropped_queue:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.packets_dropped_writer:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.bytes_dropped_writer:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.aggregated_context:5|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp",
+ "datadog.dogstatsd.client.aggregated_context_by_type:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:distribution",
+ "datadog.dogstatsd.client.aggregated_context_by_type:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:histogram",
+ "datadog.dogstatsd.client.aggregated_context_by_type:0|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:timing",
+ "datadog.dogstatsd.client.aggregated_context_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:gauge",
+ "datadog.dogstatsd.client.aggregated_context_by_type:1|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:set",
+ "datadog.dogstatsd.client.aggregated_context_by_type:3|c|#client:go," + clientVersionTelemetryTag + ",client_transport:udp,metrics_type:count",
+ }
+ expectedSize := 0
+ for _, s := range expectedResult {
+ expectedSize += len(s)
+ }
+ sort.Strings(expectedResult)
+
+ readDone := make(chan struct{})
+ buffer := make([]byte, 10000)
+ n := 0
+ go func() {
+ n, _ = io.ReadAtLeast(server, buffer, expectedSize)
+ close(readDone)
+ }()
+
+ ts.sendAllType(client)
+ client.Flush()
+ client.telemetryClient.sendTelemetry()
+
+ select {
+ case <-readDone:
+ case <-time.After(2 * time.Second):
+ require.Fail(t, "No data was flush on Close")
+ }
+
+ result := []string{}
+ for _, s := range strings.Split(string(buffer[:n]), "\n") {
+ if s != "" {
+ result = append(result, s)
+ }
+ }
+ sort.Strings(result)
+
+ assert.Equal(t, expectedResult, result)
+}
diff --git a/statsd/test_helpers_test.go b/statsd/test_helpers_test.go
new file mode 100644
index 0000000..4af5787
--- /dev/null
+++ b/statsd/test_helpers_test.go
@@ -0,0 +1,561 @@
+package statsd
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type testTelemetryData struct {
+ gauge int
+ count int
+ histogram int
+ distribution int
+ set int
+ timing int
+ event int
+ service_check int
+
+ aggregated_context int
+ aggregated_gauge int
+ aggregated_set int
+ aggregated_count int
+ aggregated_histogram int
+ aggregated_distribution int
+ aggregated_timing int
+
+ metric_dropped_on_receive int
+ packets_sent int
+ packets_dropped int
+ packets_dropped_queue int
+ packets_dropped_writer int
+ bytes_sent int
+ bytes_dropped int
+ bytes_dropped_queue int
+ bytes_dropped_writer int
+}
+
+// testServer acts as a fake server and keep track of what was sent to a client. This allows end-to-end testing of the
+// dogstatsd client
+type testServer struct {
+ sync.Mutex
+
+ conn io.ReadCloser
+ data []string
+ errors []string
+ readData []string
+ proto string
+ addr string
+ stopped chan struct{}
+ tags string
+ namespace string
+ containerID string
+
+ aggregation bool
+ extendedAggregation bool
+ telemetry testTelemetryData
+ telemetryEnabled bool
+}
+
+func newClientAndTestServer(t *testing.T, proto string, addr string, tags []string, options ...Option) (*testServer, *Client) {
+
+ opt, err := resolveOptions(options)
+ require.NoError(t, err)
+
+ ts := &testServer{
+ proto: proto,
+ data: []string{},
+ addr: addr,
+ stopped: make(chan struct{}),
+ aggregation: opt.aggregation,
+ extendedAggregation: opt.extendedAggregation,
+ telemetryEnabled: opt.telemetry,
+ telemetry: testTelemetryData{},
+ namespace: opt.namespace,
+ }
+
+ if tags != nil {
+ ts.tags = strings.Join(tags, ",")
+ }
+
+ switch proto {
+ case "udp":
+ udpAddr, err := net.ResolveUDPAddr("udp", addr)
+ require.NoError(t, err)
+
+ conn, err := net.ListenUDP("udp", udpAddr)
+ require.NoError(t, err)
+ ts.conn = conn
+ case "uds":
+ socketPath := addr[7:]
+ address, err := net.ResolveUnixAddr("unixgram", socketPath)
+ require.NoError(t, err)
+ conn, err := net.ListenUnixgram("unixgram", address)
+ require.NoError(t, err)
+ err = os.Chmod(socketPath, 0722)
+ require.NoError(t, err)
+ ts.conn = conn
+ default:
+ require.FailNow(t, "unknown proto '%s'", proto)
+ }
+
+ client, err := New(addr, options...)
+ require.NoError(t, err)
+
+ ts.containerID = getContainerID()
+
+ go ts.start()
+ return ts, client
+}
+
+func (ts *testServer) start() {
+ buffer := make([]byte, 2048)
+ for {
+ n, err := ts.conn.Read(buffer)
+ if err != nil {
+ // connection has been closed
+ if strings.HasSuffix(err.Error(), " use of closed network connection") {
+ return
+ }
+ ts.errors = append(ts.errors, err.Error())
+ continue
+ }
+ readData := string(buffer[:n])
+ if n != 0 {
+ ts.readData = append(ts.readData, readData)
+ }
+
+ payload := strings.Split(readData, "\n")
+ ts.Lock()
+ for _, s := range payload {
+ if s != "" {
+ ts.data = append(ts.data, s)
+ }
+ }
+ ts.Unlock()
+ }
+}
+
+func (ts *testServer) assertMetric(t *testing.T, received []string, expected []string) {
+ sort.Strings(expected)
+ sort.Strings(received)
+
+ assert.Equal(t, len(expected), len(received), fmt.Sprintf("expected %d metrics but got actual %d", len(expected), len(received)))
+
+ if os.Getenv("PRINT_METRICS") != "" && len(expected) != len(received) {
+ fmt.Printf("received:\n")
+ for _, m := range received {
+ fmt.Printf(" %s\n", m)
+ }
+
+ fmt.Printf("\nexpected:\n")
+ for _, m := range expected {
+ fmt.Printf(" %s\n", m)
+ }
+ }
+
+ min := len(received)
+ if len(expected) < min {
+ min = len(expected)
+ }
+
+ for idx := 0; idx < min; idx++ {
+ if strings.HasPrefix(expected[idx], "datadog.dogstatsd.client.bytes_sent") {
+ continue
+ }
+ if strings.HasPrefix(expected[idx], "datadog.dogstatsd.client.packets_sent") {
+ continue
+ }
+ assert.Equal(t, expected[idx], received[idx])
+ }
+}
+
+func (ts *testServer) stop() {
+ ts.conn.Close()
+ close(ts.stopped)
+}
+
+func (ts *testServer) wait(t *testing.T, nbExpectedMetric int, timeout int, waitForTelemetry bool) {
+ start := time.Now()
+ for {
+ ts.Lock()
+ if nbExpectedMetric <= len(ts.data) {
+ ts.Unlock()
+ return
+ } else if time.Now().Sub(start) > time.Duration(timeout)*time.Second {
+ ts.Unlock()
+ require.FailNowf(t, "timeout while waiting for metrics", "%d metrics expected but only %d were received after %s\n", nbExpectedMetric, len(ts.data), time.Now().Sub(start))
+ return
+ }
+ ts.Unlock()
+ time.Sleep(100 * time.Millisecond)
+ }
+}
+
+func (ts *testServer) assertNbRead(t *testing.T, expectedNbRead int) {
+ errorMsg := ""
+ for idx, s := range ts.readData {
+ errorMsg += fmt.Sprintf("read %d:\n%s\n\n", idx, s)
+ }
+ assert.Equal(t, expectedNbRead, len(ts.readData), "expected %d read but got %d:\n%s", expectedNbRead, len(ts.readData), errorMsg)
+}
+
+// meta helper: take a list of expected metrics and assert
+func (ts *testServer) assert(t *testing.T, client *Client, expectedMetrics []string) {
+ // First wait for all the metrics to be sent. This is important when using channel mode + aggregation as we
+ // don't know when all the metrics will be fully aggregated
+ ts.wait(t, len(expectedMetrics), 5, false)
+
+ if ts.telemetryEnabled {
+ // Now that all the metrics have been handled we can flush the telemetry before the default interval of
+ // 10s
+ client.telemetryClient.sendTelemetry()
+ expectedMetrics = append(expectedMetrics, ts.getTelemetry()...)
+ // Wait for the telemetry to arrive
+ ts.wait(t, len(expectedMetrics), 5, true)
+ }
+
+ client.Close()
+ ts.stop()
+ received := ts.getData()
+ ts.assertMetric(t, received, expectedMetrics)
+ assert.Empty(t, ts.errors)
+}
+
+func (ts *testServer) assertContainerID(t *testing.T, expected string) {
+ assert.Equal(t, expected, ts.containerID)
+}
+
+// meta helper: most test send all types and then assert
+func (ts *testServer) sendAllAndAssert(t *testing.T, client *Client) {
+ expectedMetrics := ts.sendAllType(client)
+ ts.assert(t, client, expectedMetrics)
+}
+
+func (ts *testServer) getData() []string {
+ ts.Lock()
+ defer ts.Unlock()
+
+ data := make([]string, len(ts.data))
+ copy(data, ts.data)
+ return data
+}
+
+func (ts *testServer) getTelemetry() []string {
+ ts.Lock()
+ defer ts.Unlock()
+
+ tags := ts.getFinalTelemetryTags()
+
+ totalMetrics := ts.telemetry.gauge +
+ ts.telemetry.count +
+ ts.telemetry.histogram +
+ ts.telemetry.distribution +
+ ts.telemetry.set +
+ ts.telemetry.timing
+
+ containerID := ts.getContainerID()
+
+ metrics := []string{
+ fmt.Sprintf("datadog.dogstatsd.client.metrics:%d|c%s", totalMetrics, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.events:%d|c%s", ts.telemetry.event, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.service_checks:%d|c%s", ts.telemetry.service_check, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metric_dropped_on_receive:%d|c%s", ts.telemetry.metric_dropped_on_receive, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.packets_sent:%d|c%s", ts.telemetry.packets_sent, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.packets_dropped:%d|c%s", ts.telemetry.packets_dropped, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.packets_dropped_queue:%d|c%s", ts.telemetry.packets_dropped_queue, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.packets_dropped_writer:%d|c%s", ts.telemetry.packets_dropped_writer, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.bytes_sent:%d|c%s", ts.telemetry.bytes_sent, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.bytes_dropped:%d|c%s", ts.telemetry.bytes_dropped, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.bytes_dropped_queue:%d|c%s", ts.telemetry.bytes_dropped_queue, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.bytes_dropped_writer:%d|c%s", ts.telemetry.bytes_dropped_writer, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:gauge", ts.telemetry.gauge, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:count", ts.telemetry.count, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:histogram", ts.telemetry.histogram, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:distribution", ts.telemetry.distribution, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:set", ts.telemetry.set, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.metrics_by_type:%d|c%s,metrics_type:timing", ts.telemetry.timing, tags) + containerID,
+ }
+
+ if ts.aggregation {
+ metrics = append(metrics, []string{
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context:%d|c%s", ts.telemetry.aggregated_context, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:gauge", ts.telemetry.aggregated_gauge, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:count", ts.telemetry.aggregated_count, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:set", ts.telemetry.aggregated_set, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:distribution", ts.telemetry.aggregated_distribution, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:histogram", ts.telemetry.aggregated_histogram, tags) + containerID,
+ fmt.Sprintf("datadog.dogstatsd.client.aggregated_context_by_type:%d|c%s,metrics_type:timing", ts.telemetry.aggregated_timing, tags) + containerID,
+ }...)
+ }
+ return metrics
+}
+
+// Default testing scenarios
+
+func (ts *testServer) getFinalTags(t ...string) string {
+ if t == nil && ts.tags == "" {
+ return ""
+ }
+
+ res := "|#"
+ if ts.tags != "" {
+ res += ts.tags
+ }
+
+ if t != nil {
+ if ts.tags != "" {
+ res += ","
+ }
+ res += strings.Join(t, ",")
+ }
+ return res
+}
+
+func (ts *testServer) getContainerID() string {
+ if ts.containerID == "" {
+ return ""
+ }
+ return "|c:" + ts.containerID
+}
+
+func (ts *testServer) getFinalTelemetryTags() string {
+ base := "|#"
+ if ts.tags != "" {
+ base += ts.tags + ","
+ }
+ return base + strings.Join(
+ []string{clientTelemetryTag, clientVersionTelemetryTag, "client_transport:" + ts.proto},
+ ",")
+}
+
+func (ts *testServer) sendAllMetrics(c *Client) []string {
+ tags := []string{"custom:1", "custom:2"}
+ c.Gauge("Gauge", 1, tags, 1)
+ c.Count("Count", 2, tags, 1)
+ c.Histogram("Histogram", 3, tags, 1)
+ c.Distribution("Distribution", 4, tags, 1)
+ c.Decr("Decr", tags, 1)
+ c.Incr("Incr", tags, 1)
+ c.Set("Set", "value", tags, 1)
+ c.Timing("Timing", 5*time.Second, tags, 1)
+ c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1)
+
+ ts.telemetry.gauge += 1
+ ts.telemetry.histogram += 1
+ ts.telemetry.distribution += 1
+ ts.telemetry.count += 3
+ ts.telemetry.set += 1
+ ts.telemetry.timing += 2
+
+ if ts.aggregation {
+ ts.telemetry.aggregated_context += 5
+ ts.telemetry.aggregated_gauge += 1
+ ts.telemetry.aggregated_count += 3
+ ts.telemetry.aggregated_set += 1
+ }
+ if ts.extendedAggregation {
+ ts.telemetry.aggregated_context += 4
+ ts.telemetry.aggregated_histogram += 1
+ ts.telemetry.aggregated_distribution += 1
+ ts.telemetry.aggregated_timing += 2
+ }
+
+ finalTags := ts.getFinalTags(tags...)
+ containerID := ts.getContainerID()
+
+ return []string{
+ ts.namespace + "Gauge:1|g" + finalTags + containerID,
+ ts.namespace + "Count:2|c" + finalTags + containerID,
+ ts.namespace + "Histogram:3|h" + finalTags + containerID,
+ ts.namespace + "Distribution:4|d" + finalTags + containerID,
+ ts.namespace + "Decr:-1|c" + finalTags + containerID,
+ ts.namespace + "Incr:1|c" + finalTags + containerID,
+ ts.namespace + "Set:value|s" + finalTags + containerID,
+ ts.namespace + "Timing:5000.000000|ms" + finalTags + containerID,
+ ts.namespace + "TimeInMilliseconds:6.000000|ms" + finalTags + containerID,
+ }
+}
+
+func (ts *testServer) sendAllMetricsForBasicAggregation(c *Client) []string {
+ tags := []string{"custom:1", "custom:2"}
+ c.Gauge("Gauge", 1, tags, 1)
+ c.Gauge("Gauge", 2, tags, 1)
+ c.Count("Count", 2, tags, 1)
+ c.Count("Count", 2, tags, 1)
+ c.Histogram("Histogram", 3, tags, 1)
+ c.Distribution("Distribution", 4, tags, 1)
+ c.Decr("Decr", tags, 1)
+ c.Decr("Decr", tags, 1)
+ c.Incr("Incr", tags, 1)
+ c.Incr("Incr", tags, 1)
+ c.Set("Set", "value", tags, 1)
+ c.Set("Set", "value", tags, 1)
+ c.Timing("Timing", 5*time.Second, tags, 1)
+ c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1)
+
+ ts.telemetry.gauge += 2
+ ts.telemetry.histogram += 1
+ ts.telemetry.distribution += 1
+ ts.telemetry.count += 6
+ ts.telemetry.set += 2
+ ts.telemetry.timing += 2
+
+ if ts.aggregation {
+ ts.telemetry.aggregated_context += 5
+ ts.telemetry.aggregated_gauge += 1
+ ts.telemetry.aggregated_count += 3
+ ts.telemetry.aggregated_set += 1
+ }
+ if ts.extendedAggregation {
+ ts.telemetry.aggregated_context += 4
+ ts.telemetry.aggregated_histogram += 1
+ ts.telemetry.aggregated_distribution += 1
+ ts.telemetry.aggregated_timing += 2
+ }
+
+ finalTags := ts.getFinalTags(tags...)
+ containerID := ts.getContainerID()
+
+ return []string{
+ ts.namespace + "Gauge:2|g" + finalTags + containerID,
+ ts.namespace + "Count:4|c" + finalTags + containerID,
+ ts.namespace + "Histogram:3|h" + finalTags + containerID,
+ ts.namespace + "Distribution:4|d" + finalTags + containerID,
+ ts.namespace + "Decr:-2|c" + finalTags + containerID,
+ ts.namespace + "Incr:2|c" + finalTags + containerID,
+ ts.namespace + "Set:value|s" + finalTags + containerID,
+ ts.namespace + "Timing:5000.000000|ms" + finalTags + containerID,
+ ts.namespace + "TimeInMilliseconds:6.000000|ms" + finalTags + containerID,
+ }
+}
+
+func (ts *testServer) sendAllMetricsForExtendedAggregation(c *Client) []string {
+ tags := []string{"custom:1", "custom:2"}
+ c.Gauge("Gauge", 1, tags, 1)
+ c.Gauge("Gauge", 2, tags, 1)
+ c.Count("Count", 2, tags, 1)
+ c.Count("Count", 2, tags, 1)
+ c.Histogram("Histogram", 3, tags, 1)
+ c.Histogram("Histogram", 3, tags, 1)
+ c.Distribution("Distribution", 4, tags, 1)
+ c.Distribution("Distribution", 4, tags, 1)
+ c.Decr("Decr", tags, 1)
+ c.Decr("Decr", tags, 1)
+ c.Incr("Incr", tags, 1)
+ c.Incr("Incr", tags, 1)
+ c.Set("Set", "value", tags, 1)
+ c.Set("Set", "value", tags, 1)
+ c.Timing("Timing", 5*time.Second, tags, 1)
+ c.Timing("Timing", 5*time.Second, tags, 1)
+ c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1)
+ c.TimeInMilliseconds("TimeInMilliseconds", 6, tags, 1)
+
+ ts.telemetry.gauge += 2
+ ts.telemetry.histogram += 2
+ ts.telemetry.distribution += 2
+ ts.telemetry.count += 6
+ ts.telemetry.set += 2
+ ts.telemetry.timing += 4
+
+ if ts.aggregation {
+ ts.telemetry.aggregated_context += 5
+ ts.telemetry.aggregated_gauge += 1
+ ts.telemetry.aggregated_count += 3
+ ts.telemetry.aggregated_set += 1
+ }
+ if ts.extendedAggregation {
+ ts.telemetry.aggregated_context += 4
+ ts.telemetry.aggregated_histogram += 1
+ ts.telemetry.aggregated_distribution += 1
+ ts.telemetry.aggregated_timing += 2
+ }
+
+ finalTags := ts.getFinalTags(tags...)
+ containerID := ts.getContainerID()
+
+ return []string{
+ ts.namespace + "Gauge:2|g" + finalTags + containerID,
+ ts.namespace + "Count:4|c" + finalTags + containerID,
+ ts.namespace + "Histogram:3:3|h" + finalTags + containerID,
+ ts.namespace + "Distribution:4:4|d" + finalTags + containerID,
+ ts.namespace + "Decr:-2|c" + finalTags + containerID,
+ ts.namespace + "Incr:2|c" + finalTags + containerID,
+ ts.namespace + "Set:value|s" + finalTags + containerID,
+ ts.namespace + "Timing:5000.000000:5000.000000|ms" + finalTags + containerID,
+ ts.namespace + "TimeInMilliseconds:6.000000:6.000000|ms" + finalTags + containerID,
+ }
+}
+
+func (ts *testServer) sendAllType(c *Client) []string {
+ res := ts.sendAllMetrics(c)
+ c.SimpleEvent("hello", "world")
+ c.SimpleServiceCheck("hello", Warn)
+
+ ts.telemetry.event += 1
+ ts.telemetry.service_check += 1
+
+ finalTags := ts.getFinalTags()
+ containerID := ts.getContainerID()
+
+ return append(
+ res,
+ "_e{5,5}:hello|world"+finalTags+containerID,
+ "_sc|hello|1"+finalTags+containerID,
+ )
+}
+
+func (ts *testServer) sendBasicAggregationMetrics(client *Client) []string {
+ tags := []string{"custom:1", "custom:2"}
+ client.Gauge("gauge", 1, tags, 1)
+ client.Gauge("gauge", 21, tags, 1)
+ client.Count("count", 1, tags, 1)
+ client.Count("count", 3, tags, 1)
+ client.Set("set", "my_id", tags, 1)
+ client.Set("set", "my_id", tags, 1)
+
+ finalTags := ts.getFinalTags(tags...)
+ containerID := ts.getContainerID()
+ return []string{
+ ts.namespace + "set:my_id|s" + finalTags + containerID,
+ ts.namespace + "gauge:21|g" + finalTags + containerID,
+ ts.namespace + "count:4|c" + finalTags + containerID,
+ }
+}
+
+func (ts *testServer) sendExtendedBasicAggregationMetrics(client *Client) []string {
+ tags := []string{"custom:1", "custom:2"}
+ client.Gauge("gauge", 1, tags, 1)
+ client.Count("count", 2, tags, 1)
+ client.Set("set", "3_id", tags, 1)
+ client.Histogram("histo", 4, tags, 1)
+ client.Distribution("distro", 5, tags, 1)
+ client.Timing("timing", 6*time.Second, tags, 1)
+
+ finalTags := ts.getFinalTags(tags...)
+ containerID := ts.getContainerID()
+ return []string{
+ ts.namespace + "gauge:1|g" + finalTags + containerID,
+ ts.namespace + "count:2|c" + finalTags + containerID,
+ ts.namespace + "set:3_id|s" + finalTags + containerID,
+ ts.namespace + "histo:4|h" + finalTags + containerID,
+ ts.namespace + "distro:5|d" + finalTags + containerID,
+ ts.namespace + "timing:6000.000000|ms" + finalTags + containerID,
+ }
+}
+
+func patchContainerID(id string) { containerID = id }
+
+func resetContainerID() {
+ containerID = ""
+ initOnce = sync.Once{}
+}
diff --git a/statsd/udp.go b/statsd/udp.go
index 8af522c..e2922a9 100644
--- a/statsd/udp.go
+++ b/statsd/udp.go
@@ -1,7 +1,6 @@
package statsd
import (
- "errors"
"net"
"time"
)
@@ -12,7 +11,7 @@ type udpWriter struct {
}
// New returns a pointer to a new udpWriter given an addr in the format "hostname:port".
-func newUDPWriter(addr string) (*udpWriter, error) {
+func newUDPWriter(addr string, _ time.Duration) (*udpWriter, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
@@ -25,11 +24,6 @@ func newUDPWriter(addr string) (*udpWriter, error) {
return writer, nil
}
-// SetWriteTimeout is not needed for UDP, returns error
-func (w *udpWriter) SetWriteTimeout(d time.Duration) error {
- return errors.New("SetWriteTimeout: not supported for UDP connections")
-}
-
// Write data to the UDP connection with no error handling
func (w *udpWriter) Write(data []byte) (int, error) {
return w.conn.Write(data)
diff --git a/statsd/uds.go b/statsd/uds.go
index 31154ab..fa5f591 100644
--- a/statsd/uds.go
+++ b/statsd/uds.go
@@ -1,16 +1,13 @@
+// +build !windows
+
package statsd
import (
"net"
+ "sync"
"time"
)
-/*
-UDSTimeout holds the default timeout for UDS socket writes, as they can get
-blocking when the receiving buffer is full.
-*/
-const defaultUDSTimeout = 1 * time.Millisecond
-
// udsWriter is an internal class wrapping around management of UDS connection
type udsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
@@ -19,41 +16,34 @@ type udsWriter struct {
conn net.Conn
// write timeout
writeTimeout time.Duration
+ sync.RWMutex // used to lock conn / writer can replace it
}
-// New returns a pointer to a new udsWriter given a socket file path as addr.
-func newUdsWriter(addr string) (*udsWriter, error) {
+// newUDSWriter returns a pointer to a new udsWriter given a socket file path as addr.
+func newUDSWriter(addr string, writeTimeout time.Duration) (*udsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
// Defer connection to first Write
- writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
+ writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: writeTimeout}
return writer, nil
}
-// SetWriteTimeout allows the user to set a custom write timeout
-func (w *udsWriter) SetWriteTimeout(d time.Duration) error {
- w.writeTimeout = d
- return nil
-}
-
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *udsWriter) Write(data []byte) (int, error) {
- // Try connecting (first packet or connection lost)
- if w.conn == nil {
- conn, err := net.Dial(w.addr.Network(), w.addr.String())
- if err != nil {
- return 0, err
- }
- w.conn = conn
+ conn, err := w.ensureConnection()
+ if err != nil {
+ return 0, err
}
- w.conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
- n, e := w.conn.Write(data)
- if e != nil {
+
+ conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
+ n, e := conn.Write(data)
+
+ if err, isNetworkErr := e.(net.Error); err != nil && (!isNetworkErr || !err.Temporary()) {
// Statsd server disconnected, retry connecting at next packet
- w.conn = nil
+ w.unsetConnection()
return 0, e
}
return n, e
@@ -65,3 +55,34 @@ func (w *udsWriter) Close() error {
}
return nil
}
+
+func (w *udsWriter) ensureConnection() (net.Conn, error) {
+ // Check if we've already got a socket we can use
+ w.RLock()
+ currentConn := w.conn
+ w.RUnlock()
+
+ if currentConn != nil {
+ return currentConn, nil
+ }
+
+ // Looks like we might need to connect - try again with write locking.
+ w.Lock()
+ defer w.Unlock()
+ if w.conn != nil {
+ return w.conn, nil
+ }
+
+ newConn, err := net.Dial(w.addr.Network(), w.addr.String())
+ if err != nil {
+ return nil, err
+ }
+ w.conn = newConn
+ return newConn, nil
+}
+
+func (w *udsWriter) unsetConnection() {
+ w.Lock()
+ defer w.Unlock()
+ w.conn = nil
+}
diff --git a/statsd/uds_test.go b/statsd/uds_test.go
new file mode 100644
index 0000000..fb1c0c8
--- /dev/null
+++ b/statsd/uds_test.go
@@ -0,0 +1,84 @@
+// +build !windows
+
+package statsd
+
+import (
+ "fmt"
+ "math/rand"
+ "net"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func TestNewUDSWriter(t *testing.T) {
+ w, err := newUDSWriter("/tmp/test.socket", 100*time.Millisecond)
+ assert.NotNil(t, w)
+ assert.NoError(t, err)
+}
+
+func TestUDSWrite(t *testing.T) {
+ socketPath := fmt.Sprintf("/tmp/dsd_%d.socket", rand.Int())
+ defer os.Remove(socketPath)
+
+ address, err := net.ResolveUnixAddr("unixgram", socketPath)
+ require.NoError(t, err)
+ conn, err := net.ListenUnixgram("unixgram", address)
+ require.NoError(t, err)
+ err = os.Chmod(socketPath, 0722)
+ require.NoError(t, err)
+
+ w, err := newUDSWriter(socketPath, 100*time.Millisecond)
+ require.Nil(t, err)
+ require.NotNil(t, w)
+
+ // test 2 Write: the first one should setup the connection
+ for i := 0; i < 2; i++ {
+ n, err := w.Write([]byte("some data"))
+ require.NoError(t, err)
+ assert.Equal(t, 9, n)
+
+ buffer := make([]byte, 100)
+ n, err = conn.Read(buffer)
+ require.NoError(t, err)
+ assert.Equal(t, "some data", string(buffer[:n]))
+ }
+}
+
+func TestUDSWriteUnsetConnection(t *testing.T) {
+ socketPath := fmt.Sprintf("/tmp/dsd_%d.socket", rand.Int())
+ defer os.Remove(socketPath)
+
+ address, err := net.ResolveUnixAddr("unixgram", socketPath)
+ require.NoError(t, err)
+ conn, err := net.ListenUnixgram("unixgram", address)
+ require.NoError(t, err)
+ err = os.Chmod(socketPath, 0722)
+ require.NoError(t, err)
+
+ w, err := newUDSWriter(socketPath, 100*time.Millisecond)
+ require.Nil(t, err)
+ require.NotNil(t, w)
+
+ // test 2 Write: the first one should setup the connection
+ for i := 0; i < 2; i++ {
+ n, err := w.Write([]byte("some data"))
+ require.NoError(t, err)
+ assert.Equal(t, 9, n)
+
+ buffer := make([]byte, 100)
+ n, err = conn.Read(buffer)
+ require.NoError(t, err)
+ assert.Equal(t, "some data", string(buffer[:n]))
+
+ // Unset connection for the next Read
+ w.unsetConnection()
+ }
+}
diff --git a/statsd/uds_windows.go b/statsd/uds_windows.go
new file mode 100644
index 0000000..077894a
--- /dev/null
+++ b/statsd/uds_windows.go
@@ -0,0 +1,14 @@
+// +build windows
+
+package statsd
+
+import (
+ "fmt"
+ "io"
+ "time"
+)
+
+// newUDSWriter is disabled on Windows as Unix sockets are not available.
+func newUDSWriter(_ string, _ time.Duration) (io.WriteCloser, error) {
+ return nil, fmt.Errorf("Unix socket is not available on Windows")
+}
diff --git a/statsd/utils.go b/statsd/utils.go
new file mode 100644
index 0000000..8c3ac84
--- /dev/null
+++ b/statsd/utils.go
@@ -0,0 +1,32 @@
+package statsd
+
+import (
+ "math/rand"
+ "sync"
+)
+
+func shouldSample(rate float64, r *rand.Rand, lock *sync.Mutex) bool {
+ if rate >= 1 {
+ return true
+ }
+ // sources created by rand.NewSource() (ie. w.random) are not thread safe.
+ // TODO: use defer once the lowest Go version we support is 1.14 (defer
+ // has an overhead before that).
+ lock.Lock()
+ if r.Float64() > rate {
+ lock.Unlock()
+ return false
+ }
+ lock.Unlock()
+ return true
+}
+
+func copySlice(src []string) []string {
+ if src == nil {
+ return nil
+ }
+
+ c := make([]string, len(src))
+ copy(c, src)
+ return c
+}
diff --git a/statsd/worker.go b/statsd/worker.go
new file mode 100644
index 0000000..5446d50
--- /dev/null
+++ b/statsd/worker.go
@@ -0,0 +1,150 @@
+package statsd
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+type worker struct {
+ pool *bufferPool
+ buffer *statsdBuffer
+ sender *sender
+ random *rand.Rand
+ randomLock sync.Mutex
+ sync.Mutex
+
+ inputMetrics chan metric
+ stop chan struct{}
+}
+
+func newWorker(pool *bufferPool, sender *sender) *worker {
+ // Each worker uses its own random source and random lock to prevent
+ // workers in separate goroutines from contending for the lock on the
+ // "math/rand" package-global random source (e.g. calls like
+ // "rand.Float64()" must acquire a shared lock to get the next
+ // pseudorandom number).
+ // Note that calling "time.Now().UnixNano()" repeatedly quickly may return
+ // very similar values. That's fine for seeding the worker-specific random
+ // source because we just need an evenly distributed stream of float values.
+ // Do not use this random source for cryptographic randomness.
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ return &worker{
+ pool: pool,
+ sender: sender,
+ buffer: pool.borrowBuffer(),
+ random: random,
+ stop: make(chan struct{}),
+ }
+}
+
+func (w *worker) startReceivingMetric(bufferSize int) {
+ w.inputMetrics = make(chan metric, bufferSize)
+ go w.pullMetric()
+}
+
+func (w *worker) stopReceivingMetric() {
+ w.stop <- struct{}{}
+}
+
+func (w *worker) pullMetric() {
+ for {
+ select {
+ case m := <-w.inputMetrics:
+ w.processMetric(m)
+ case <-w.stop:
+ return
+ }
+ }
+}
+
+func (w *worker) processMetric(m metric) error {
+ if !shouldSample(m.rate, w.random, &w.randomLock) {
+ return nil
+ }
+ w.Lock()
+ var err error
+ if err = w.writeMetricUnsafe(m); err == errBufferFull {
+ w.flushUnsafe()
+ err = w.writeMetricUnsafe(m)
+ }
+ w.Unlock()
+ return err
+}
+
+func (w *worker) writeAggregatedMetricUnsafe(m metric, metricSymbol []byte, precision int) error {
+ globalPos := 0
+
+ // first check how much data we can write to the buffer:
+ // +3 + len(metricSymbol) because the message will include '|<metricSymbol>|#' before the tags
+ // +1 for the potential line break at the start of the metric
+ tagsSize := len(m.stags) + 4 + len(metricSymbol)
+ for _, t := range m.globalTags {
+ tagsSize += len(t) + 1
+ }
+
+ for {
+ pos, err := w.buffer.writeAggregated(metricSymbol, m.namespace, m.globalTags, m.name, m.fvalues[globalPos:], m.stags, tagsSize, precision)
+ if err == errPartialWrite {
+ // We successfully wrote part of the histogram metrics.
+ // We flush the current buffer and finish the histogram
+ // in a new one.
+ w.flushUnsafe()
+ globalPos += pos
+ } else {
+ return err
+ }
+ }
+}
+
+func (w *worker) writeMetricUnsafe(m metric) error {
+ switch m.metricType {
+ case gauge:
+ return w.buffer.writeGauge(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+ case count:
+ return w.buffer.writeCount(m.namespace, m.globalTags, m.name, m.ivalue, m.tags, m.rate)
+ case histogram:
+ return w.buffer.writeHistogram(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+ case distribution:
+ return w.buffer.writeDistribution(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+ case set:
+ return w.buffer.writeSet(m.namespace, m.globalTags, m.name, m.svalue, m.tags, m.rate)
+ case timing:
+ return w.buffer.writeTiming(m.namespace, m.globalTags, m.name, m.fvalue, m.tags, m.rate)
+ case event:
+ return w.buffer.writeEvent(m.evalue, m.globalTags)
+ case serviceCheck:
+ return w.buffer.writeServiceCheck(m.scvalue, m.globalTags)
+ case histogramAggregated:
+ return w.writeAggregatedMetricUnsafe(m, histogramSymbol, -1)
+ case distributionAggregated:
+ return w.writeAggregatedMetricUnsafe(m, distributionSymbol, -1)
+ case timingAggregated:
+ return w.writeAggregatedMetricUnsafe(m, timingSymbol, 6)
+ default:
+ return nil
+ }
+}
+
+func (w *worker) flush() {
+ w.Lock()
+ w.flushUnsafe()
+ w.Unlock()
+}
+
+func (w *worker) pause() {
+ w.Lock()
+}
+
+func (w *worker) unpause() {
+ w.Unlock()
+}
+
+// flush the current buffer. Lock must be held by caller.
+// flushed buffer written to the network asynchronously.
+func (w *worker) flushUnsafe() {
+ if len(w.buffer.bytes()) > 0 {
+ w.sender.send(w.buffer)
+ w.buffer = w.pool.borrowBuffer()
+ }
+}
diff --git a/statsd/worker_test.go b/statsd/worker_test.go
new file mode 100644
index 0000000..67e7de5
--- /dev/null
+++ b/statsd/worker_test.go
@@ -0,0 +1,289 @@
+package statsd
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestShouldSample(t *testing.T) {
+ rates := []float64{0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99, 1.0}
+ iterations := 50_000
+
+ for _, rate := range rates {
+ rate := rate // Capture range variable.
+ t.Run(fmt.Sprintf("Rate %0.2f", rate), func(t *testing.T) {
+ t.Parallel()
+
+ worker := newWorker(newBufferPool(1, 1, 1), nil)
+ count := 0
+ for i := 0; i < iterations; i++ {
+ if shouldSample(rate, worker.random, &worker.randomLock) {
+ count++
+ }
+ }
+ assert.InDelta(t, rate, float64(count)/float64(iterations), 0.01)
+ })
+ }
+}
+
+func BenchmarkShouldSample(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ worker := newWorker(newBufferPool(1, 1, 1), nil)
+ for pb.Next() {
+ shouldSample(0.1, worker.random, &worker.randomLock)
+ }
+ })
+}
+
+func initWorker(bufferSize int) (*bufferPool, *sender, *worker) {
+ pool := newBufferPool(10, bufferSize, 5)
+ // manually create the sender so the sender loop is not started. All we
+ // need is the queue
+ s := &sender{
+ queue: make(chan *statsdBuffer, 10),
+ pool: pool,
+ }
+
+ w := newWorker(pool, s)
+ return pool, s, w
+}
+
+func testWorker(t *testing.T, m metric, expectedBuffer string) {
+ _, s, w := initWorker(100)
+
+ err := w.processMetric(m)
+ assert.Nil(t, err)
+
+ w.flush()
+ data := <-s.queue
+ assert.Equal(t, expectedBuffer, string(data.buffer))
+
+}
+
+func TestWorkerGauge(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: gauge,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_gauge",
+ fvalue: 21,
+ tags: []string{"tag1", "tag2"},
+ rate: 1,
+ },
+ "namespace.test_gauge:21|g|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerCount(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: count,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_count",
+ ivalue: 21,
+ tags: []string{"tag1", "tag2"},
+ rate: 1,
+ },
+ "namespace.test_count:21|c|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerHistogram(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: histogram,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_histogram",
+ fvalue: 21,
+ tags: []string{"tag1", "tag2"},
+ rate: 1,
+ },
+ "namespace.test_histogram:21|h|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerDistribution(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: distribution,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_distribution",
+ fvalue: 21,
+ tags: []string{"tag1", "tag2"},
+ rate: 1,
+ },
+ "namespace.test_distribution:21|d|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerSet(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: set,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_set",
+ svalue: "value:1",
+ tags: []string{"tag1", "tag2"},
+ rate: 1,
+ },
+ "namespace.test_set:value:1|s|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerTiming(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: timing,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_timing",
+ fvalue: 1.2,
+ tags: []string{"tag1", "tag2"},
+ rate: 1,
+ },
+ "namespace.test_timing:1.200000|ms|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerHistogramAggregated(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: histogramAggregated,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_histogram",
+ fvalues: []float64{1.2},
+ stags: "tag1,tag2",
+ rate: 1,
+ },
+ "namespace.test_histogram:1.2|h|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerHistogramAggregatedMultiple(t *testing.T) {
+ _, s, w := initWorker(100)
+
+ m := metric{
+ metricType: histogramAggregated,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_histogram",
+ fvalues: []float64{1.1, 2.2, 3.3, 4.4},
+ stags: "tag1,tag2",
+ rate: 1,
+ }
+ err := w.processMetric(m)
+ assert.Nil(t, err)
+
+ w.flush()
+ data := <-s.queue
+ assert.Equal(t, "namespace.test_histogram:1.1:2.2:3.3:4.4|h|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+
+ // reducing buffer size so not all values fit in one packet
+ _, s, w = initWorker(70)
+
+ err = w.processMetric(m)
+ assert.Nil(t, err)
+
+ w.flush()
+ data = <-s.queue
+ assert.Equal(t, "namespace.test_histogram:1.1:2.2|h|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+ data = <-s.queue
+ assert.Equal(t, "namespace.test_histogram:3.3:4.4|h|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+}
+
+func TestWorkerDistributionAggregated(t *testing.T) {
+ testWorker(
+ t,
+ metric{
+ metricType: distributionAggregated,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_distribution",
+ fvalues: []float64{1.2},
+ stags: "tag1,tag2",
+ rate: 1,
+ },
+ "namespace.test_distribution:1.2|d|#globalTags,globalTags2,tag1,tag2\n",
+ )
+}
+
+func TestWorkerDistributionAggregatedMultiple(t *testing.T) {
+ _, s, w := initWorker(100)
+
+ m := metric{
+ metricType: distributionAggregated,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_distribution",
+ fvalues: []float64{1.1, 2.2, 3.3, 4.4},
+ stags: "tag1,tag2",
+ rate: 1,
+ }
+ err := w.processMetric(m)
+ assert.Nil(t, err)
+
+ w.flush()
+ data := <-s.queue
+ assert.Equal(t, "namespace.test_distribution:1.1:2.2:3.3:4.4|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+
+ // reducing buffer size so not all values fit in one packet
+ _, s, w = initWorker(72)
+
+ err = w.processMetric(m)
+ assert.Nil(t, err)
+
+ w.flush()
+ data = <-s.queue
+ assert.Equal(t, "namespace.test_distribution:1.1:2.2|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+ data = <-s.queue
+ assert.Equal(t, "namespace.test_distribution:3.3:4.4|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+}
+
+func TestWorkerMultipleDifferentDistributionAggregated(t *testing.T) {
+ // first metric will fit but not the second one
+ _, s, w := initWorker(160)
+
+ m := metric{
+ metricType: distributionAggregated,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_distribution",
+ fvalues: []float64{1.1, 2.2, 3.3, 4.4},
+ stags: "tag1,tag2",
+ rate: 1,
+ }
+ err := w.processMetric(m)
+ assert.Nil(t, err)
+ m = metric{
+ metricType: distributionAggregated,
+ namespace: "namespace.",
+ globalTags: []string{"globalTags", "globalTags2"},
+ name: "test_distribution_2",
+ fvalues: []float64{1.1, 2.2, 3.3, 4.4},
+ stags: "tag1,tag2",
+ rate: 1,
+ }
+ err = w.processMetric(m)
+ assert.Nil(t, err)
+
+ w.flush()
+ data := <-s.queue
+ assert.Equal(t, "namespace.test_distribution:1.1:2.2:3.3:4.4|d|#globalTags,globalTags2,tag1,tag2\nnamespace.test_distribution_2:1.1:2.2:3.3|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+ data = <-s.queue
+ assert.Equal(t, "namespace.test_distribution_2:4.4|d|#globalTags,globalTags2,tag1,tag2\n", string(data.buffer))
+}
Debdiff
[The following lists of changes regard files as different if they have different names, permissions or owners.]
Files in second set of .debs but not in first
-rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/example/simple_example.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/go.mod -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/go.sum -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/aggregator.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/aggregator_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/benchmark_report_metric_noop_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/benchmark_report_metric_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/buffer.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/buffer_pool.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/buffer_pool_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/buffer_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/buffered_metric_context.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/container.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/container_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/end_to_end_udp_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/end_to_end_uds_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/event.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/event_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/fnv1a.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/format.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/format_benchmark_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/format_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/metrics.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/metrics_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/mocks/statsd.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/noop.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/noop_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/options.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/options_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/pipe.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/pipe_windows.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/pipe_windows_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/sender.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/sender_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/service_check.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/service_check_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/telemetry.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/telemetry_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/test_helpers_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/uds_test.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/uds_windows.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/utils.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/worker.go -rw-r--r-- root/root /usr/share/gocode/src/github.com/DataDog/datadog-go/statsd/worker_test.go -rwxr-xr-x root/root /usr/bin/example
No differences were encountered in the control files