diff --git a/metrics/README.md b/metrics/README.md index 9aa64aa..b9e6115 100644 --- a/metrics/README.md +++ b/metrics/README.md @@ -1,23 +1,22 @@ # package metrics `package metrics` provides a set of uniform interfaces for service instrumentation. -It has **[counters][]**, **[gauges][]**, and **[histograms][]**, - and provides adapters to popular metrics packages, like **[expvar][]**, **[statsd][]**, and **[Prometheus][]**. - -[counters]: http://prometheus.io/docs/concepts/metric_types/#counter -[gauges]: http://prometheus.io/docs/concepts/metric_types/#gauge -[histograms]: http://prometheus.io/docs/concepts/metric_types/#histogram -[expvar]: https://golang.org/pkg/expvar -[statsd]: https://github.com/etsy/statsd -[Prometheus]: http://prometheus.io +It has + [counters](http://prometheus.io/docs/concepts/metric_types/#counter), + [gauges](http://prometheus.io/docs/concepts/metric_types/#gauge), and + [histograms](http://prometheus.io/docs/concepts/metric_types/#histogram), +and provides adapters to popular metrics packages, like + [expvar](https://golang.org/pkg/expvar), + [StatsD](https://github.com/etsy/statsd), and + [Prometheus](https://prometheus.io). ## Rationale -Code instrumentation is absolutely essential to achieve [observability][] into a distributed system. +Code instrumentation is absolutely essential to achieve + [observability](https://speakerdeck.com/mattheath/observability-in-micro-service-architectures) + into a distributed system. Metrics and instrumentation tools have coalesced around a few well-defined idioms. -`package metrics` provides a common, minimal interface those idioms for service authors. - -[observability]: https://speakerdeck.com/mattheath/observability-in-micro-service-architectures +`package metrics` provides a common, minimal interface those idioms for service authors. ## Usage @@ -32,8 +31,8 @@ } ``` -A histogram for request duration, exported via a Prometheus summary with -dynamically-computed quantiles. +A histogram for request duration, + exported via a Prometheus summary with dynamically-computed quantiles. ```go import ( @@ -43,20 +42,20 @@ "github.com/go-kit/kit/metrics/prometheus" ) -var requestDuration = prometheus.NewSummary(stdprometheus.SummaryOpts{ +var dur = prometheus.NewSummary(stdprometheus.SummaryOpts{ Namespace: "myservice", Subsystem: "api", - Name: "request_duration_nanoseconds_count", - Help: "Total time spent serving requests.", + Name: "request_duration_seconds", + Help: "Total time spent serving requests.", }, []string{}) func handleRequest() { - defer func(begin time.Time) { requestDuration.Observe(time.Since(begin)) }(time.Now()) + defer func(begin time.Time) { dur.Observe(time.Since(begin).Seconds()) }(time.Now()) // handle request } ``` -A gauge for the number of goroutines currently running, exported via statsd. +A gauge for the number of goroutines currently running, exported via StatsD. ```go import ( @@ -66,17 +65,18 @@ "time" "github.com/go-kit/kit/metrics/statsd" + "github.com/go-kit/kit/log" ) func main() { - statsdWriter, err := net.Dial("udp", "127.0.0.1:8126") - if err != nil { - panic(err) - } + statsd := statsd.New("foo_svc.", log.NewNopLogger()) - reportInterval := 5 * time.Second - goroutines := statsd.NewGauge(statsdWriter, "total_goroutines", reportInterval) - for range time.Tick(reportInterval) { + report := time.NewTicker(5*time.Second) + defer report.Stop() + go statsd.SendLoop(report.C, "tcp", "statsd.internal:8125") + + goroutines := statsd.NewGauge("goroutine_count") + for range time.Tick(time.Second) { goroutines.Set(float64(runtime.NumGoroutine())) } } diff --git a/metrics/circonus/circonus.go b/metrics/circonus/circonus.go new file mode 100644 index 0000000..01cdb79 --- /dev/null +++ b/metrics/circonus/circonus.go @@ -0,0 +1,85 @@ +// Package circonus provides a Circonus backend for metrics. +package circonus + +import ( + "github.com/circonus-labs/circonus-gometrics" + + "github.com/go-kit/kit/metrics3" +) + +// Circonus wraps a CirconusMetrics object and provides constructors for each of +// the Go kit metrics. The CirconusMetrics object manages aggregation of +// observations and emission to the Circonus server. +type Circonus struct { + m *circonusgometrics.CirconusMetrics +} + +// New creates a new Circonus object wrapping the passed CirconusMetrics, which +// the caller should create and set in motion. The Circonus object can be used +// to construct individual Go kit metrics. +func New(m *circonusgometrics.CirconusMetrics) *Circonus { + return &Circonus{ + m: m, + } +} + +// NewCounter returns a counter metric with the given name. +func (c *Circonus) NewCounter(name string) *Counter { + return &Counter{ + name: name, + m: c.m, + } +} + +// NewGauge returns a gauge metric with the given name. +func (c *Circonus) NewGauge(name string) *Gauge { + return &Gauge{ + name: name, + m: c.m, + } +} + +// NewHistogram returns a histogram metric with the given name. +func (c *Circonus) NewHistogram(name string) *Histogram { + return &Histogram{ + h: c.m.NewHistogram(name), + } +} + +// Counter is a Circonus implementation of a counter metric. +type Counter struct { + name string + m *circonusgometrics.CirconusMetrics +} + +// With implements Counter, but is a no-op, because Circonus metrics have no +// concept of per-observation label values. +func (c *Counter) With(labelValues ...string) metrics.Counter { return c } + +// Add implements Counter. Delta is converted to uint64; precision will be lost. +func (c *Counter) Add(delta float64) { c.m.Add(c.name, uint64(delta)) } + +// Gauge is a Circonus implementation of a gauge metric. +type Gauge struct { + name string + m *circonusgometrics.CirconusMetrics +} + +// With implements Gauge, but is a no-op, because Circonus metrics have no +// concept of per-observation label values. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } + +// Set implements Gauge. +func (g *Gauge) Set(value float64) { g.m.SetGauge(g.name, value) } + +// Histogram is a Circonus implementation of a histogram metric. +type Histogram struct { + h *circonusgometrics.Histogram +} + +// With implements Histogram, but is a no-op, because Circonus metrics have no +// concept of per-observation label values. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } + +// Observe implements Histogram. No precision is lost. +func (h *Histogram) Observe(value float64) { h.h.RecordValue(value) } diff --git a/metrics/circonus/circonus_test.go b/metrics/circonus/circonus_test.go new file mode 100644 index 0000000..a563783 --- /dev/null +++ b/metrics/circonus/circonus_test.go @@ -0,0 +1,120 @@ +package circonus + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "regexp" + "strconv" + "testing" + + "github.com/circonus-labs/circonus-gometrics" + "github.com/circonus-labs/circonus-gometrics/checkmgr" + + "github.com/go-kit/kit/metrics3/generic" + "github.com/go-kit/kit/metrics3/teststat" +) + +func TestCounter(t *testing.T) { + // The only way to extract values from Circonus is to pose as a Circonus + // server and receive real HTTP writes. + const name = "abc" + var val int64 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var res map[string]struct { + Value int64 `json:"_value"` // reverse-engineered :\ + } + json.NewDecoder(r.Body).Decode(&res) + val = res[name].Value + })) + defer s.Close() + + // Set up a Circonus object, submitting to our HTTP server. + m := newCirconusMetrics(s.URL) + counter := New(m).NewCounter(name).With("label values", "not supported") + value := func() float64 { m.Flush(); return float64(val) } + + // Engage. + if err := teststat.TestCounter(counter, value); err != nil { + t.Fatal(err) + } +} + +func TestGauge(t *testing.T) { + const name = "def" + var val float64 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var res map[string]struct { + Value string `json:"_value"` + } + json.NewDecoder(r.Body).Decode(&res) + val, _ = strconv.ParseFloat(res[name].Value, 64) + })) + defer s.Close() + + m := newCirconusMetrics(s.URL) + gauge := New(m).NewGauge(name).With("label values", "not supported") + value := func() float64 { m.Flush(); return val } + + if err := teststat.TestGauge(gauge, value); err != nil { + t.Fatal(err) + } +} + +func TestHistogram(t *testing.T) { + const name = "ghi" + + // Circonus just emits bucketed counts. We'll dump them into a generic + // histogram (losing some precision) and take statistics from there. Note + // this does assume that the generic histogram computes statistics properly, + // but we have another test for that :) + re := regexp.MustCompile(`^H\[([0-9\.e\+]+)\]=([0-9]+)$`) // H[1.2e+03]=456 + + var p50, p90, p95, p99 float64 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var res map[string]struct { + Values []string `json:"_value"` // reverse-engineered :\ + } + json.NewDecoder(r.Body).Decode(&res) + + h := generic.NewHistogram("dummy", len(res[name].Values)) // match tbe bucket counts + for _, v := range res[name].Values { + match := re.FindStringSubmatch(v) + f, _ := strconv.ParseFloat(match[1], 64) + n, _ := strconv.ParseInt(match[2], 10, 64) + for i := int64(0); i < n; i++ { + h.Observe(f) + } + } + + p50 = h.Quantile(0.50) + p90 = h.Quantile(0.90) + p95 = h.Quantile(0.95) + p99 = h.Quantile(0.99) + })) + defer s.Close() + + m := newCirconusMetrics(s.URL) + histogram := New(m).NewHistogram(name).With("label values", "not supported") + quantiles := func() (float64, float64, float64, float64) { m.Flush(); return p50, p90, p95, p99 } + + // Circonus metrics, because they do their own bucketing, are less precise + // than other systems. So, we bump the tolerance to 5 percent. + if err := teststat.TestHistogram(histogram, quantiles, 0.05); err != nil { + t.Fatal(err) + } +} + +func newCirconusMetrics(url string) *circonusgometrics.CirconusMetrics { + m, err := circonusgometrics.NewCirconusMetrics(&circonusgometrics.Config{ + CheckManager: checkmgr.Config{ + Check: checkmgr.CheckConfig{ + SubmissionURL: url, + }, + }, + }) + if err != nil { + panic(err) + } + return m +} diff --git a/metrics/discard/discard.go b/metrics/discard/discard.go index e99f766..ed6ff1c 100644 --- a/metrics/discard/discard.go +++ b/metrics/discard/discard.go @@ -1,43 +1,37 @@ -// Package discard implements a backend for package metrics that succeeds -// without doing anything. +// Package discard provides a no-op metrics backend. package discard -import "github.com/go-kit/kit/metrics" +import "github.com/go-kit/kit/metrics3" -type counter struct { - name string -} +type counter struct{} -// NewCounter returns a Counter that does nothing. -func NewCounter(name string) metrics.Counter { return &counter{name} } +// NewCounter returns a new no-op counter. +func NewCounter() metrics.Counter { return counter{} } -func (c *counter) Name() string { return c.name } -func (c *counter) With(metrics.Field) metrics.Counter { return c } -func (c *counter) Add(delta uint64) {} +// With implements Counter. +func (c counter) With(labelValues ...string) metrics.Counter { return c } -type gauge struct { - name string -} +// Add implements Counter. +func (c counter) Add(delta float64) {} -// NewGauge returns a Gauge that does nothing. -func NewGauge(name string) metrics.Gauge { return &gauge{name} } +type gauge struct{} -func (g *gauge) Name() string { return g.name } -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } -func (g *gauge) Set(value float64) {} -func (g *gauge) Add(delta float64) {} -func (g *gauge) Get() float64 { return 0 } +// NewGauge returns a new no-op gauge. +func NewGauge() metrics.Gauge { return gauge{} } -type histogram struct { - name string -} +// With implements Gauge. +func (g gauge) With(labelValues ...string) metrics.Gauge { return g } -// NewHistogram returns a Histogram that does nothing. -func NewHistogram(name string) metrics.Histogram { return &histogram{name} } +// Set implements Gauge. +func (g gauge) Set(value float64) {} -func (h *histogram) Name() string { return h.name } -func (h *histogram) With(metrics.Field) metrics.Histogram { return h } -func (h *histogram) Observe(value int64) {} -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - return []metrics.Bucket{}, []metrics.Quantile{} -} +type histogram struct{} + +// NewHistogram returns a new no-op histogram. +func NewHistogram() metrics.Histogram { return histogram{} } + +// With implements Histogram. +func (h histogram) With(labelValues ...string) metrics.Histogram { return h } + +// Observe implements histogram. +func (h histogram) Observe(value float64) {} diff --git a/metrics/doc.go b/metrics/doc.go index 760dfba..fa30337 100644 --- a/metrics/doc.go +++ b/metrics/doc.go @@ -1,4 +1,59 @@ // Package metrics provides a framework for application instrumentation. All // metrics are safe for concurrent use. Considerable design influence has been // taken from https://github.com/codahale/metrics and https://prometheus.io. +// +// This package contains the common interfaces. Your code should take these +// interfaces as parameters. Implementations are provided for different +// instrumentation systems in the various subdirectories. +// +// Usage +// +// Metrics are dependencies and should be passed to the components that need +// them in the same way you'd construct and pass a database handle, or reference +// to another component. So, create metrics in your func main, using whichever +// concrete implementation is appropriate for your organization. +// +// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ +// Namespace: "myteam", +// Subsystem: "foosvc", +// Name: "request_latency_seconds", +// Help: "Incoming request latency in seconds." +// }, []string{"method", "status_code"}) +// +// Write your components to take the metrics they will use as parameters to +// their constructors. Use the interface types, not the concrete types. That is, +// +// // NewAPI takes metrics.Histogram, not *prometheus.Summary +// func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { +// // ... +// } +// +// func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { +// begin := time.Now() +// // ... +// a.latency.Observe(time.Since(begin).Seconds()) +// } +// +// Finally, pass the metrics as dependencies when building your object graph. +// This should happen in func main, not in the global scope. +// +// api := NewAPI(store, logger, latency) +// http.ListenAndServe("/", api) +// +// Implementation details +// +// Each telemetry system has different semantics for label values, push vs. +// pull, support for histograms, etc. These properties influence the design of +// their respective packages. This table attempts to summarize the key points of +// distinction. +// +// SYSTEM DIM COUNTERS GAUGES HISTOGRAMS +// dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each +// statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each +// graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate +// expvar 1 atomic atomic synthetic, batch, in-place expose +// influx n custom custom custom +// prometheus n native native native +// circonus 1 native native native +// package metrics diff --git a/metrics/dogstatsd/dogstatsd.go b/metrics/dogstatsd/dogstatsd.go index b5c7d6d..3258062 100644 --- a/metrics/dogstatsd/dogstatsd.go +++ b/metrics/dogstatsd/dogstatsd.go @@ -1,248 +1,306 @@ -// Package dogstatsd implements a DogStatsD backend for package metrics. +// Package dogstatsd provides a DogStatsD backend for package metrics. It's very +// similar to StatsD, but supports arbitrary tags per-metric, which map to Go +// kit's label values. So, while label values are no-ops in StatsD, they are +// supported here. For more details, see the documentation at +// http://docs.datadoghq.com/guides/dogstatsd/. // -// This implementation supports Datadog tags that provide additional metric -// filtering capabilities. See the DogStatsD documentation for protocol -// specifics: -// http://docs.datadoghq.com/guides/dogstatsd/ -// +// This package batches observations and emits them on some schedule to the +// remote server. This is useful even if you connect to your DogStatsD server +// over UDP. Emitting one network packet per observation can quickly overwhelm +// even the fastest internal network. package dogstatsd import ( - "bytes" "fmt" "io" - "log" - "math" + "strings" "time" - "sync/atomic" - - "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/internal/lv" + "github.com/go-kit/kit/metrics3/internal/ratemap" + "github.com/go-kit/kit/util/conn" ) -// dogstatsd metrics were based on the statsd package in go-kit - -const maxBufferSize = 1400 // bytes - -type counter struct { - key string - c chan string - tags []metrics.Field -} - -// NewCounter returns a Counter that emits observations in the DogStatsD protocol -// to the passed writer. Observations are buffered for the report interval or -// until the buffer exceeds a max packet size, whichever comes first. +// Dogstatsd receives metrics observations and forwards them to a DogStatsD +// server. Create a Dogstatsd object, use it to create metrics, and pass those +// metrics as dependencies to the components that will use them. // -// TODO: support for sampling. -func NewCounter(w io.Writer, key string, reportInterval time.Duration, globalTags []metrics.Field) metrics.Counter { - return NewCounterTick(w, key, time.Tick(reportInterval), globalTags) -} - -// NewCounterTick is the same as NewCounter, but allows the user to pass in a -// ticker channel instead of invoking time.Tick. -func NewCounterTick(w io.Writer, key string, reportTicker <-chan time.Time, tags []metrics.Field) metrics.Counter { - c := &counter{ - key: key, - c: make(chan string), - tags: tags, - } - go fwd(w, key, reportTicker, c.c) - return c -} - -func (c *counter) Name() string { return c.key } - -func (c *counter) With(f metrics.Field) metrics.Counter { - return &counter{ - key: c.key, - c: c.c, - tags: append(c.tags, f), - } -} - -func (c *counter) Add(delta uint64) { c.c <- applyTags(fmt.Sprintf("%d|c", delta), c.tags) } - -type gauge struct { - key string - lastValue uint64 // math.Float64frombits - g chan string - tags []metrics.Field -} - -// NewGauge returns a Gauge that emits values in the DogStatsD protocol to the -// passed writer. Values are buffered for the report interval or until the -// buffer exceeds a max packet size, whichever comes first. +// All metrics are buffered until WriteTo is called. Counters and gauges are +// aggregated into a single observation per timeseries per write. Timings and +// histograms are buffered but not aggregated. // -// TODO: support for sampling. -func NewGauge(w io.Writer, key string, reportInterval time.Duration, tags []metrics.Field) metrics.Gauge { - return NewGaugeTick(w, key, time.Tick(reportInterval), tags) -} - -// NewGaugeTick is the same as NewGauge, but allows the user to pass in a ticker -// channel instead of invoking time.Tick. -func NewGaugeTick(w io.Writer, key string, reportTicker <-chan time.Time, tags []metrics.Field) metrics.Gauge { - g := &gauge{ - key: key, - g: make(chan string), - tags: tags, - } - go fwd(w, key, reportTicker, g.g) - return g -} - -func (g *gauge) Name() string { return g.key } - -func (g *gauge) With(f metrics.Field) metrics.Gauge { - return &gauge{ - key: g.key, - lastValue: g.lastValue, - g: g.g, - tags: append(g.tags, f), - } -} - -func (g *gauge) Add(delta float64) { - // https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges - sign := "+" - if delta < 0 { - sign, delta = "-", -delta - } - g.g <- applyTags(fmt.Sprintf("%s%f|g", sign, delta), g.tags) -} - -func (g *gauge) Set(value float64) { - atomic.StoreUint64(&g.lastValue, math.Float64bits(value)) - g.g <- applyTags(fmt.Sprintf("%f|g", value), g.tags) -} - -func (g *gauge) Get() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.lastValue)) -} - -// NewCallbackGauge emits values in the DogStatsD protocol to the passed writer. -// It collects values every scrape interval from the callback. Values are -// buffered for the report interval or until the buffer exceeds a max packet -// size, whichever comes first. The report and scrape intervals may be the -// same. The callback determines the value, and fields are ignored, so -// NewCallbackGauge returns nothing. -func NewCallbackGauge(w io.Writer, key string, reportInterval, scrapeInterval time.Duration, callback func() float64) { - NewCallbackGaugeTick(w, key, time.Tick(reportInterval), time.Tick(scrapeInterval), callback) -} - -// NewCallbackGaugeTick is the same as NewCallbackGauge, but allows the user to -// pass in ticker channels instead of durations to control report and scrape -// intervals. -func NewCallbackGaugeTick(w io.Writer, key string, reportTicker, scrapeTicker <-chan time.Time, callback func() float64) { - go fwd(w, key, reportTicker, emitEvery(scrapeTicker, callback)) -} - -func emitEvery(emitTicker <-chan time.Time, callback func() float64) <-chan string { - c := make(chan string) - go func() { - for range emitTicker { - c <- fmt.Sprintf("%f|g", callback()) - } - }() - return c -} - -type histogram struct { - key string - h chan string - tags []metrics.Field -} - -// NewHistogram returns a Histogram that emits observations in the DogStatsD -// protocol to the passed writer. Observations are buffered for the reporting -// interval or until the buffer exceeds a max packet size, whichever comes -// first. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(dogstatsdHistogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(dogstatsdHistogram, time.Millisecond) -// -// TODO: support for sampling. -func NewHistogram(w io.Writer, key string, reportInterval time.Duration, tags []metrics.Field) metrics.Histogram { - return NewHistogramTick(w, key, time.Tick(reportInterval), tags) -} - -// NewHistogramTick is the same as NewHistogram, but allows the user to pass a -// ticker channel instead of invoking time.Tick. -func NewHistogramTick(w io.Writer, key string, reportTicker <-chan time.Time, tags []metrics.Field) metrics.Histogram { - h := &histogram{ - key: key, - h: make(chan string), - tags: tags, - } - go fwd(w, key, reportTicker, h.h) - return h -} - -func (h *histogram) Name() string { return h.key } - -func (h *histogram) With(f metrics.Field) metrics.Histogram { - return &histogram{ - key: h.key, - h: h.h, - tags: append(h.tags, f), - } -} - -func (h *histogram) Observe(value int64) { - h.h <- applyTags(fmt.Sprintf("%d|ms", value), h.tags) -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): no way to do this without introducing e.g. codahale/hdrhistogram - return []metrics.Bucket{}, []metrics.Quantile{} -} - -func fwd(w io.Writer, key string, reportTicker <-chan time.Time, c <-chan string) { - buf := &bytes.Buffer{} - for { - select { - case s := <-c: - fmt.Fprintf(buf, "%s:%s\n", key, s) - if buf.Len() > maxBufferSize { - flush(w, buf) +// To regularly report metrics to an io.Writer, use the WriteLoop helper method. +// To send to a DogStatsD server, use the SendLoop helper method. +type Dogstatsd struct { + prefix string + rates *ratemap.RateMap + counters *lv.Space + gauges *lv.Space + timings *lv.Space + histograms *lv.Space + logger log.Logger +} + +// New returns a Dogstatsd object that may be used to create metrics. Prefix is +// applied to all created metrics. Callers must ensure that regular calls to +// WriteTo are performed, either manually or with one of the helper methods. +func New(prefix string, logger log.Logger) *Dogstatsd { + return &Dogstatsd{ + prefix: prefix, + rates: ratemap.New(), + counters: lv.NewSpace(), + gauges: lv.NewSpace(), + timings: lv.NewSpace(), + histograms: lv.NewSpace(), + logger: logger, + } +} + +// NewCounter returns a counter, sending observations to this Dogstatsd object. +func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter { + d.rates.Set(d.prefix+name, sampleRate) + return &Counter{ + name: d.prefix + name, + obs: d.counters.Observe, + } +} + +// NewGauge returns a gauge, sending observations to this Dogstatsd object. +func (d *Dogstatsd) NewGauge(name string) *Gauge { + return &Gauge{ + name: d.prefix + name, + obs: d.gauges.Observe, + } +} + +// NewTiming returns a histogram whose observations are interpreted as +// millisecond durations, and are forwarded to this Dogstatsd object. +func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing { + d.rates.Set(d.prefix+name, sampleRate) + return &Timing{ + name: d.prefix + name, + obs: d.timings.Observe, + } +} + +// NewHistogram returns a histogram whose observations are of an unspecified +// unit, and are forwarded to this Dogstatsd object. +func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram { + d.rates.Set(d.prefix+name, sampleRate) + return &Histogram{ + name: d.prefix + name, + obs: d.histograms.Observe, + } +} + +// WriteLoop is a helper method that invokes WriteTo to the passed writer every +// time the passed channel fires. This method blocks until the channel is +// closed, so clients probably want to run it in its own goroutine. For typical +// usage, create a time.Ticker and pass its C channel to this method. +func (d *Dogstatsd) WriteLoop(c <-chan time.Time, w io.Writer) { + for range c { + if _, err := d.WriteTo(w); err != nil { + d.logger.Log("during", "WriteTo", "err", err) + } + } +} + +// SendLoop is a helper method that wraps WriteLoop, passing a managed +// connection to the network and address. Like WriteLoop, this method blocks +// until the channel is closed, so clients probably want to start it in its own +// goroutine. For typical usage, create a time.Ticker and pass its C channel to +// this method. +func (d *Dogstatsd) SendLoop(c <-chan time.Time, network, address string) { + d.WriteLoop(c, conn.NewDefaultManager(network, address, d.logger)) +} + +// WriteTo flushes the buffered content of the metrics to the writer, in +// DogStatsD format. WriteTo abides best-effort semantics, so observations are +// lost if there is a problem with the write. Clients should be sure to call +// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods. +func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) { + var n int + + d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs)) + if err != nil { + return false + } + count += int64(n) + return true + }) + if err != nil { + return count, err + } + + d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs)) + if err != nil { + return false + } + count += int64(n) + return true + }) + if err != nil { + return count, err + } + + d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + sampleRate := d.rates.Get(name) + for _, value := range values { + n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) + if err != nil { + return false } - - case <-reportTicker: - flush(w, buf) - } - } -} - -func flush(w io.Writer, buf *bytes.Buffer) { - if buf.Len() <= 0 { - return - } - if _, err := w.Write(buf.Bytes()); err != nil { - log.Printf("error: could not write to dogstatsd: %v", err) - } - buf.Reset() -} - -func applyTags(value string, tags []metrics.Field) string { - if len(tags) > 0 { - var tagsString string - for _, t := range tags { - switch tagsString { - case "": - tagsString = t.Key + ":" + t.Value - default: - tagsString = tagsString + "," + t.Key + ":" + t.Value + count += int64(n) + } + return true + }) + if err != nil { + return count, err + } + + d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + sampleRate := d.rates.Get(name) + for _, value := range values { + n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) + if err != nil { + return false } - } - value = value + "|#" + tagsString - } - return value -} + count += int64(n) + } + return true + }) + if err != nil { + return count, err + } + + return count, err +} + +func sum(a []float64) float64 { + var v float64 + for _, f := range a { + v += f + } + return v +} + +func last(a []float64) float64 { + return a[len(a)-1] +} + +func sampling(r float64) string { + var sv string + if r < 1.0 { + sv = fmt.Sprintf("|@%f", r) + } + return sv +} + +func tagValues(labelValues []string) string { + if len(labelValues) == 0 { + return "" + } + if len(labelValues)%2 != 0 { + panic("tagValues received a labelValues with an odd number of strings") + } + pairs := make([]string, 0, len(labelValues)/2) + for i := 0; i < len(labelValues); i += 2 { + pairs = append(pairs, labelValues[i]+":"+labelValues[i+1]) + } + return "|#" + strings.Join(pairs, ",") +} + +type observeFunc func(name string, lvs lv.LabelValues, value float64) + +// Counter is a DogStatsD counter. Observations are forwarded to a Dogstatsd +// object, and aggregated (summed) per timeseries. +type Counter struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + name: c.name, + lvs: c.lvs.With(labelValues...), + obs: c.obs, + } +} + +// Add implements metrics.Counter. +func (c *Counter) Add(delta float64) { + c.obs(c.name, c.lvs, delta) +} + +// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd +// object, and aggregated (the last observation selected) per timeseries. +type Gauge struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + name: g.name, + lvs: g.lvs.With(labelValues...), + obs: g.obs, + } +} + +// Set implements metrics.Gauge. +func (g *Gauge) Set(value float64) { + g.obs(g.name, g.lvs, value) +} + +// Timing is a DogStatsD timing, or metrics.Histogram. Observations are +// forwarded to a Dogstatsd object, and collected (but not aggregated) per +// timeseries. +type Timing struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Timing. +func (t *Timing) With(labelValues ...string) metrics.Histogram { + return &Timing{ + name: t.name, + lvs: t.lvs.With(labelValues...), + obs: t.obs, + } +} + +// Observe implements metrics.Histogram. Value is interpreted as milliseconds. +func (t *Timing) Observe(value float64) { + t.obs(t.name, t.lvs, value) +} + +// Histogram is a DogStatsD histrogram. Observations are forwarded to a +// Dogstatsd object, and collected (but not aggregated) per timeseries. +type Histogram struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + name: h.name, + lvs: h.lvs.With(labelValues...), + obs: h.obs, + } +} + +// Observe implements metrics.Histogram. +func (h *Histogram) Observe(value float64) { + h.obs(h.name, h.lvs, value) +} diff --git a/metrics/dogstatsd/dogstatsd_test.go b/metrics/dogstatsd/dogstatsd_test.go index 383dbd7..cd7e5af 100644 --- a/metrics/dogstatsd/dogstatsd_test.go +++ b/metrics/dogstatsd/dogstatsd_test.go @@ -1,266 +1,90 @@ package dogstatsd import ( - "bytes" - "fmt" - "net" - "strings" - "sync" "testing" - "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" + "github.com/go-kit/kit/metrics3/teststat" ) -func TestEmitterCounter(t *testing.T) { - e, buf := testEmitter() - - c := e.NewCounter("test_statsd_counter") - c.Add(1) - c.Add(2) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_counter:1|c\nprefix.test_statsd_counter:2|c\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) +func TestCounter(t *testing.T) { + prefix, name := "abc.", "def" + label, value := "label", "value" + regex := `^` + prefix + name + `:([0-9\.]+)\|c\|#` + label + `:` + value + `$` + d := New(prefix, log.NewNopLogger()) + counter := d.NewCounter(name, 1.0).With(label, value) + valuef := teststat.SumLines(d, regex) + if err := teststat.TestCounter(counter, valuef); err != nil { + t.Fatal(err) } } -func TestEmitterGauge(t *testing.T) { - e, buf := testEmitter() +func TestCounterSampled(t *testing.T) { + // This will involve multiplying the observed sum by the inverse of the + // sample rate and checking against the expected value within some + // tolerance. + t.Skip("TODO") +} - g := e.NewGauge("test_statsd_gauge") - - delta := 1.0 - g.Add(delta) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := fmt.Sprintf("prefix.test_statsd_gauge:+%f|g\n", delta) - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) +func TestGauge(t *testing.T) { + prefix, name := "ghi.", "jkl" + label, value := "xyz", "abc" + regex := `^` + prefix + name + `:([0-9\.]+)\|g\|#` + label + `:` + value + `$` + d := New(prefix, log.NewNopLogger()) + gauge := d.NewGauge(name).With(label, value) + valuef := teststat.LastLine(d, regex) + if err := teststat.TestGauge(gauge, valuef); err != nil { + t.Fatal(err) } } -func TestEmitterHistogram(t *testing.T) { - e, buf := testEmitter() - h := e.NewHistogram("test_statsd_histogram") +// DogStatsD histograms just emit all observations. So, we collect them into +// a generic histogram, and run the statistics test on that. - h.Observe(123) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_histogram:123|ms\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) +func TestHistogram(t *testing.T) { + prefix, name := "dogstatsd.", "histogram_test" + label, value := "abc", "def" + regex := `^` + prefix + name + `:([0-9\.]+)\|h\|#` + label + `:` + value + `$` + d := New(prefix, log.NewNopLogger()) + histogram := d.NewHistogram(name, 1.0).With(label, value) + quantiles := teststat.Quantiles(d, regex, 50) // no |@0.X + if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { + t.Fatal(err) } } -func TestCounter(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - tags := []metrics.Field{} - c := NewCounterTick(buf, "test_statsd_counter", reportc, tags) - - c.Add(1) - c.With(metrics.Field{"foo", "bar"}).Add(2) - c.With(metrics.Field{"foo", "bar"}).With(metrics.Field{"abc", "123"}).Add(2) - c.Add(3) - - want, have := "test_statsd_counter:1|c\ntest_statsd_counter:2|c|#foo:bar\ntest_statsd_counter:2|c|#foo:bar,abc:123\ntest_statsd_counter:3|c\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - tags := []metrics.Field{} - g := NewGaugeTick(buf, "test_statsd_gauge", reportc, tags) - - delta := 1.0 - g.Add(delta) - - want, have := fmt.Sprintf("test_statsd_gauge:+%f|g\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - delta = -2.0 - g.With(metrics.Field{"foo", "bar"}).Add(delta) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g|#foo:bar\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - value := 3.0 - g.With(metrics.Field{"foo", "bar"}).With(metrics.Field{"abc", "123"}).Set(value) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g|#foo:bar,abc:123\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestCallbackGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc, scrapec := make(chan time.Time), make(chan time.Time) - value := 55.55 - cb := func() float64 { return value } - NewCallbackGaugeTick(buf, "test_statsd_callback_gauge", reportc, scrapec, cb) - - scrapec <- time.Now() - reportc <- time.Now() - - // Travis is annoying - by(t, time.Second, func() bool { - return buf.String() != "" - }, func() { - reportc <- time.Now() - }, "buffer never got write+flush") - - want, have := fmt.Sprintf("test_statsd_callback_gauge:%f|g\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return strings.HasPrefix(have, want) // HasPrefix because we might get multiple writes - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestHistogram(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - tags := []metrics.Field{} - h := NewHistogramTick(buf, "test_statsd_histogram", reportc, tags) - - h.Observe(123) - h.With(metrics.Field{"foo", "bar"}).Observe(456) - - want, have := "test_statsd_histogram:123|ms\ntest_statsd_histogram:456|ms|#foo:bar\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func by(t *testing.T, d time.Duration, check func() bool, execute func(), msg string) { - deadline := time.Now().Add(d) - for !check() { - if time.Now().After(deadline) { - t.Fatal(msg) - } - execute() +func TestHistogramSampled(t *testing.T) { + prefix, name := "dogstatsd.", "sampled_histogram_test" + label, value := "foo", "bar" + regex := `^` + prefix + name + `:([0-9\.]+)\|h\|@0\.01[0]*\|#` + label + `:` + value + `$` + d := New(prefix, log.NewNopLogger()) + histogram := d.NewHistogram(name, 0.01).With(label, value) + quantiles := teststat.Quantiles(d, regex, 50) + if err := teststat.TestHistogram(histogram, quantiles, 0.02); err != nil { + t.Fatal(err) } } -type syncbuf struct { - mtx sync.Mutex - buf *bytes.Buffer -} - -func (s *syncbuf) Write(p []byte) (int, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.Write(p) -} - -func (s *syncbuf) String() string { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.String() -} - -func (s *syncbuf) Reset() { - s.mtx.Lock() - defer s.mtx.Unlock() - s.buf.Reset() -} - -func testEmitter() (*Emitter, *syncbuf) { - buf := &syncbuf{buf: &bytes.Buffer{}} - e := &Emitter{ - prefix: "prefix.", - mgr: conn.NewManager(mockDialer(buf), "", "", time.After, log.NewNopLogger()), - logger: log.NewNopLogger(), - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(time.Millisecond * 20) - return e, buf -} - -func mockDialer(buf *syncbuf) conn.Dialer { - return func(net, addr string) (net.Conn, error) { - return &mockConn{buf}, nil +func TestTiming(t *testing.T) { + prefix, name := "dogstatsd.", "timing_test" + label, value := "wiggle", "bottom" + regex := `^` + prefix + name + `:([0-9\.]+)\|ms\|#` + label + `:` + value + `$` + d := New(prefix, log.NewNopLogger()) + histogram := d.NewTiming(name, 1.0).With(label, value) + quantiles := teststat.Quantiles(d, regex, 50) // no |@0.X + if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { + t.Fatal(err) } } -type mockConn struct { - buf *syncbuf +func TestTimingSampled(t *testing.T) { + prefix, name := "dogstatsd.", "sampled_timing_test" + label, value := "internal", "external" + regex := `^` + prefix + name + `:([0-9\.]+)\|ms\|@0.03[0]*\|#` + label + `:` + value + `$` + d := New(prefix, log.NewNopLogger()) + histogram := d.NewTiming(name, 0.03).With(label, value) + quantiles := teststat.Quantiles(d, regex, 50) + if err := teststat.TestHistogram(histogram, quantiles, 0.02); err != nil { + t.Fatal(err) + } } - -func (c *mockConn) Read(b []byte) (n int, err error) { - panic("not implemented") -} - -func (c *mockConn) Write(b []byte) (n int, err error) { - return c.buf.Write(b) -} - -func (c *mockConn) Close() error { - panic("not implemented") -} - -func (c *mockConn) LocalAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) RemoteAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) SetDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetReadDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetWriteDeadline(t time.Time) error { - panic("not implemented") -} diff --git a/metrics/dogstatsd/emitter.go b/metrics/dogstatsd/emitter.go deleted file mode 100644 index d0add2d..0000000 --- a/metrics/dogstatsd/emitter.go +++ /dev/null @@ -1,159 +0,0 @@ -package dogstatsd - -import ( - "bytes" - "fmt" - "net" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -// Emitter is a struct to manage connections and orchestrate the emission of -// metrics to a DogStatsd process. -type Emitter struct { - prefix string - keyVals chan keyVal - mgr *conn.Manager - logger log.Logger - quitc chan chan struct{} -} - -type keyVal struct { - key string - val string -} - -func stringToKeyVal(key string, keyVals chan keyVal) chan string { - vals := make(chan string) - go func() { - for val := range vals { - keyVals <- keyVal{key: key, val: val} - } - }() - return vals -} - -// NewEmitter will return an Emitter that will prefix all metrics names with the -// given prefix. Once started, it will attempt to create a connection with the -// given network and address via `net.Dial` and periodically post metrics to the -// connection in the DogStatsD protocol. -func NewEmitter(network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - return NewEmitterDial(net.Dial, network, address, metricsPrefix, flushInterval, logger) -} - -// NewEmitterDial is the same as NewEmitter, but allows you to specify your own -// Dialer function. This is primarily useful for tests. -func NewEmitterDial(dialer conn.Dialer, network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - e := &Emitter{ - prefix: metricsPrefix, - mgr: conn.NewManager(dialer, network, address, time.After, logger), - logger: logger, - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(flushInterval) - return e -} - -// NewCounter returns a Counter that emits observations in the DogStatsD protocol -// via the Emitter's connection manager. Observations are buffered for the -// report interval or until the buffer exceeds a max packet size, whichever -// comes first. Fields are ignored. -func (e *Emitter) NewCounter(key string) metrics.Counter { - key = e.prefix + key - return &counter{ - key: key, - c: stringToKeyVal(key, e.keyVals), - } -} - -// NewHistogram returns a Histogram that emits observations in the DogStatsD -// protocol via the Emitter's connection manager. Observations are buffered for -// the reporting interval or until the buffer exceeds a max packet size, -// whichever comes first. Fields are ignored. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(histogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(histogram, time.Millisecond) -// -// TODO: support for sampling. -func (e *Emitter) NewHistogram(key string) metrics.Histogram { - key = e.prefix + key - return &histogram{ - key: key, - h: stringToKeyVal(key, e.keyVals), - } -} - -// NewGauge returns a Gauge that emits values in the DogStatsD protocol via the -// the Emitter's connection manager. Values are buffered for the report -// interval or until the buffer exceeds a max packet size, whichever comes -// first. Fields are ignored. -// -// TODO: support for sampling -func (e *Emitter) NewGauge(key string) metrics.Gauge { - key = e.prefix + key - return &gauge{ - key: key, - g: stringToKeyVal(key, e.keyVals), - } -} - -func (e *Emitter) loop(d time.Duration) { - ticker := time.NewTicker(d) - defer ticker.Stop() - buf := &bytes.Buffer{} - for { - select { - case kv := <-e.keyVals: - fmt.Fprintf(buf, "%s:%s\n", kv.key, kv.val) - if buf.Len() > maxBufferSize { - e.Flush(buf) - } - - case <-ticker.C: - e.Flush(buf) - - case q := <-e.quitc: - e.Flush(buf) - close(q) - return - } - } -} - -// Stop will flush the current metrics and close the active connection. Calling -// stop more than once is a programmer error. -func (e *Emitter) Stop() { - q := make(chan struct{}) - e.quitc <- q - <-q -} - -// Flush will write the given buffer to a connection provided by the Emitter's -// connection manager. -func (e *Emitter) Flush(buf *bytes.Buffer) { - conn := e.mgr.Take() - if conn == nil { - e.logger.Log("during", "flush", "err", "connection unavailable") - return - } - - _, err := conn.Write(buf.Bytes()) - if err != nil { - e.logger.Log("during", "flush", "err", err) - } - buf.Reset() - - e.mgr.Put(err) -} diff --git a/metrics/expvar/expvar.go b/metrics/expvar/expvar.go index 371d103..d27964f 100644 --- a/metrics/expvar/expvar.go +++ b/metrics/expvar/expvar.go @@ -1,172 +1,91 @@ -// Package expvar implements an expvar backend for package metrics. -// -// The current implementation ignores fields. In the future, it would be good -// to have an implementation that accepted a set of predeclared field names at -// construction time, and used field values to produce delimiter-separated -// bucket (key) names. That is, -// -// c := NewFieldedCounter(..., "path", "status") -// c.Add(1) // "myprefix_unknown_unknown" += 1 -// c2 := c.With("path", "foo").With("status": "200") -// c2.Add(1) // "myprefix_foo_200" += 1 -// -// It would also be possible to have an implementation that generated more -// sophisticated expvar.Values. For example, a Counter could be implemented as -// a map, representing a tree of key/value pairs whose leaves were the actual -// expvar.Ints. +// Package expvar provides expvar backends for metrics. +// Label values are not supported. package expvar import ( "expvar" - "fmt" - "sort" - "strconv" "sync" - "time" - "github.com/codahale/hdrhistogram" - - "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/generic" ) -type counter struct { - name string - v *expvar.Int +// Counter implements the counter metric with an expvar float. +// Label values are not supported. +type Counter struct { + f *expvar.Float } -// NewCounter returns a new Counter backed by an expvar with the given name. -// Fields are ignored. -func NewCounter(name string) metrics.Counter { - return &counter{ - name: name, - v: expvar.NewInt(name), +// NewCounter creates an expvar Float with the given name, and returns an object +// that implements the Counter interface. +func NewCounter(name string) *Counter { + return &Counter{ + f: expvar.NewFloat(name), } } -func (c *counter) Name() string { return c.name } -func (c *counter) With(metrics.Field) metrics.Counter { return c } -func (c *counter) Add(delta uint64) { c.v.Add(int64(delta)) } +// With is a no-op. +func (c *Counter) With(labelValues ...string) metrics.Counter { return c } -type gauge struct { - name string - v *expvar.Float +// Add implements Counter. +func (c *Counter) Add(delta float64) { c.f.Add(delta) } + +// Gauge implements the gauge metric wtih an expvar float. +// Label values are not supported. +type Gauge struct { + f *expvar.Float } -// NewGauge returns a new Gauge backed by an expvar with the given name. It -// should be updated manually; for a callback-based approach, see -// PublishCallbackGauge. Fields are ignored. -func NewGauge(name string) metrics.Gauge { - return &gauge{ - name: name, - v: expvar.NewFloat(name), +// NewGauge creates an expvar Float with the given name, and returns an object +// that implements the Gauge interface. +func NewGauge(name string) *Gauge { + return &Gauge{ + f: expvar.NewFloat(name), } } -func (g *gauge) Name() string { return g.name } -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } -func (g *gauge) Add(delta float64) { g.v.Add(delta) } -func (g *gauge) Set(value float64) { g.v.Set(value) } -func (g *gauge) Get() float64 { return mustParseFloat64(g.v.String()) } +// With is a no-op. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } -// PublishCallbackGauge publishes a Gauge as an expvar with the given name, -// whose value is determined at collect time by the passed callback function. -// The callback determines the value, and fields are ignored, so -// PublishCallbackGauge returns nothing. -func PublishCallbackGauge(name string, callback func() float64) { - expvar.Publish(name, callbackGauge(callback)) +// Set implements Gauge. +func (g *Gauge) Set(value float64) { g.f.Set(value) } + +// Histogram implements the histogram metric with a combination of the generic +// Histogram object and several expvar Floats, one for each of the 50th, 90th, +// 95th, and 99th quantiles of observed values, with the quantile attached to +// the name as a suffix. Label values are not supported. +type Histogram struct { + mtx sync.Mutex + h *generic.Histogram + p50 *expvar.Float + p90 *expvar.Float + p95 *expvar.Float + p99 *expvar.Float } -type callbackGauge func() float64 - -func (g callbackGauge) String() string { return strconv.FormatFloat(g(), 'g', -1, 64) } - -type histogram struct { - mu sync.Mutex - hist *hdrhistogram.WindowedHistogram - - name string - gauges map[int]metrics.Gauge -} - -// NewHistogram is taken from http://github.com/codahale/metrics. It returns a -// windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by -// using the passed name as a prefix and appending "_pNN" e.g. "_p50". -func NewHistogram(name string, minValue, maxValue int64, sigfigs int, quantiles ...int) metrics.Histogram { - gauges := map[int]metrics.Gauge{} - for _, quantile := range quantiles { - if quantile <= 0 || quantile >= 100 { - panic(fmt.Sprintf("invalid quantile %d", quantile)) - } - gauges[quantile] = NewGauge(fmt.Sprintf("%s_p%02d", name, quantile)) - } - h := &histogram{ - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - name: name, - gauges: gauges, - } - go h.rotateLoop(1 * time.Minute) - return h -} - -func (h *histogram) Name() string { return h.name } -func (h *histogram) With(metrics.Field) metrics.Histogram { return h } - -func (h *histogram) Observe(value int64) { - h.mu.Lock() - err := h.hist.Current.RecordValue(value) - h.mu.Unlock() - - if err != nil { - panic(err.Error()) - } - - for q, gauge := range h.gauges { - gauge.Set(float64(h.hist.Current.ValueAtQuantile(float64(q)))) +// NewHistogram returns a Histogram object with the given name and number of +// buckets in the underlying histogram object. 50 is a good default number of +// buckets. +func NewHistogram(name string, buckets int) *Histogram { + return &Histogram{ + h: generic.NewHistogram(name, buckets), + p50: expvar.NewFloat(name + ".p50"), + p90: expvar.NewFloat(name + ".p90"), + p95: expvar.NewFloat(name + ".p95"), + p99: expvar.NewFloat(name + ".p99"), } } -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - bars := h.hist.Merge().Distribution() - buckets := make([]metrics.Bucket, len(bars)) - for i, bar := range bars { - buckets[i] = metrics.Bucket{ - From: bar.From, - To: bar.To, - Count: bar.Count, - } - } - quantiles := make([]metrics.Quantile, 0, len(h.gauges)) - for quantile, gauge := range h.gauges { - quantiles = append(quantiles, metrics.Quantile{ - Quantile: quantile, - Value: int64(gauge.Get()), - }) - } - sort.Sort(quantileSlice(quantiles)) - return buckets, quantiles +// With is a no-op. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } + +// Observe impleemts Histogram. +func (h *Histogram) Observe(value float64) { + h.mtx.Lock() + defer h.mtx.Unlock() + h.h.Observe(value) + h.p50.Set(h.h.Quantile(0.50)) + h.p90.Set(h.h.Quantile(0.90)) + h.p95.Set(h.h.Quantile(0.95)) + h.p99.Set(h.h.Quantile(0.99)) } - -func (h *histogram) rotateLoop(d time.Duration) { - for range time.Tick(d) { - h.mu.Lock() - h.hist.Rotate() - h.mu.Unlock() - } -} - -func mustParseFloat64(s string) float64 { - f, err := strconv.ParseFloat(s, 64) - if err != nil { - panic(err) - } - return f -} - -type quantileSlice []metrics.Quantile - -func (a quantileSlice) Len() int { return len(a) } -func (a quantileSlice) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } -func (a quantileSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/metrics/expvar/expvar_test.go b/metrics/expvar/expvar_test.go index 644bb40..5307473 100644 --- a/metrics/expvar/expvar_test.go +++ b/metrics/expvar/expvar_test.go @@ -1,69 +1,38 @@ -package expvar_test +package expvar import ( - stdexpvar "expvar" - "fmt" + "strconv" "testing" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/teststat" + "github.com/go-kit/kit/metrics3/teststat" ) -func TestHistogramQuantiles(t *testing.T) { - var ( - name = "test_histogram_quantiles" - quantiles = []int{50, 90, 95, 99} - h = expvar.NewHistogram(name, 0, 100, 3, quantiles...).With(metrics.Field{Key: "ignored", Value: "field"}) - ) - const seed, mean, stdev int64 = 424242, 50, 10 - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - teststat.AssertExpvarNormalHistogram(t, name, mean, stdev, quantiles) -} - -func TestCallbackGauge(t *testing.T) { - var ( - name = "foo" - value = 42.43 - ) - expvar.PublishCallbackGauge(name, func() float64 { return value }) - if want, have := fmt.Sprint(value), stdexpvar.Get(name).String(); want != have { - t.Errorf("want %q, have %q", want, have) - } -} - func TestCounter(t *testing.T) { - var ( - name = "m" - value = 123 - ) - expvar.NewCounter(name).With(metrics.Field{Key: "ignored", Value: "field"}).Add(uint64(value)) - if want, have := fmt.Sprint(value), stdexpvar.Get(name).String(); want != have { - t.Errorf("want %q, have %q", want, have) + counter := NewCounter("expvar_counter").With("label values", "not supported").(*Counter) + value := func() float64 { f, _ := strconv.ParseFloat(counter.f.String(), 64); return f } + if err := teststat.TestCounter(counter, value); err != nil { + t.Fatal(err) } } func TestGauge(t *testing.T) { - var ( - name = "xyz" - value = 54321 - delta = 12345 - g = expvar.NewGauge(name).With(metrics.Field{Key: "ignored", Value: "field"}) - ) - g.Set(float64(value)) - g.Add(float64(delta)) - if want, have := fmt.Sprint(value+delta), stdexpvar.Get(name).String(); want != have { - t.Errorf("want %q, have %q", want, have) + gauge := NewGauge("expvar_gauge").With("label values", "not supported").(*Gauge) + value := func() float64 { f, _ := strconv.ParseFloat(gauge.f.String(), 64); return f } + if err := teststat.TestGauge(gauge, value); err != nil { + t.Fatal(err) } } -func TestInvalidQuantile(t *testing.T) { - defer func() { - if err := recover(); err == nil { - t.Errorf("expected panic, got none") - } else { - t.Logf("got expected panic: %v", err) - } - }() - expvar.NewHistogram("foo", 0.0, 100.0, 3, 50, 90, 95, 99, 101) +func TestHistogram(t *testing.T) { + histogram := NewHistogram("expvar_histogram", 50).With("label values", "not supported").(*Histogram) + quantiles := func() (float64, float64, float64, float64) { + p50, _ := strconv.ParseFloat(histogram.p50.String(), 64) + p90, _ := strconv.ParseFloat(histogram.p90.String(), 64) + p95, _ := strconv.ParseFloat(histogram.p95.String(), 64) + p99, _ := strconv.ParseFloat(histogram.p99.String(), 64) + return p50, p90, p95, p99 + } + if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { + t.Fatal(err) + } } diff --git a/metrics/generic/generic.go b/metrics/generic/generic.go new file mode 100644 index 0000000..0a251dc --- /dev/null +++ b/metrics/generic/generic.go @@ -0,0 +1,218 @@ +// Package generic implements generic versions of each of the metric types. They +// can be embedded by other implementations, and converted to specific formats +// as necessary. +package generic + +import ( + "fmt" + "io" + "math" + "sync" + "sync/atomic" + + "github.com/VividCortex/gohistogram" + + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/internal/lv" +) + +// Counter is an in-memory implementation of a Counter. +type Counter struct { + Name string + lvs lv.LabelValues + bits uint64 +} + +// NewCounter returns a new, usable Counter. +func NewCounter(name string) *Counter { + return &Counter{ + Name: name, + } +} + +// With implements Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + bits: atomic.LoadUint64(&c.bits), + lvs: c.lvs.With(labelValues...), + } +} + +// Add implements Counter. +func (c *Counter) Add(delta float64) { + for { + var ( + old = atomic.LoadUint64(&c.bits) + newf = math.Float64frombits(old) + delta + new = math.Float64bits(newf) + ) + if atomic.CompareAndSwapUint64(&c.bits, old, new) { + break + } + } +} + +// Value returns the current value of the counter. +func (c *Counter) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&c.bits)) +} + +// ValueReset returns the current value of the counter, and resets it to zero. +// This is useful for metrics backends whose counter aggregations expect deltas, +// like Graphite. +func (c *Counter) ValueReset() float64 { + for { + var ( + old = atomic.LoadUint64(&c.bits) + newf = 0.0 + new = math.Float64bits(newf) + ) + if atomic.CompareAndSwapUint64(&c.bits, old, new) { + return math.Float64frombits(old) + } + } +} + +// LabelValues returns the set of label values attached to the counter. +func (c *Counter) LabelValues() []string { + return c.lvs +} + +// Gauge is an in-memory implementation of a Gauge. +type Gauge struct { + Name string + lvs lv.LabelValues + bits uint64 +} + +// NewGauge returns a new, usable Gauge. +func NewGauge(name string) *Gauge { + return &Gauge{ + Name: name, + } +} + +// With implements Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + bits: atomic.LoadUint64(&g.bits), + lvs: g.lvs.With(labelValues...), + } +} + +// Set implements Gauge. +func (g *Gauge) Set(value float64) { + atomic.StoreUint64(&g.bits, math.Float64bits(value)) +} + +// Value returns the current value of the gauge. +func (g *Gauge) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&g.bits)) +} + +// LabelValues returns the set of label values attached to the gauge. +func (g *Gauge) LabelValues() []string { + return g.lvs +} + +// Histogram is an in-memory implementation of a streaming histogram, based on +// VividCortex/gohistogram. It dynamically computes quantiles, so it's not +// suitable for aggregation. +type Histogram struct { + Name string + lvs lv.LabelValues + h gohistogram.Histogram +} + +// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A +// good default value for buckets is 50. +func NewHistogram(name string, buckets int) *Histogram { + return &Histogram{ + Name: name, + h: gohistogram.NewHistogram(buckets), + } +} + +// With implements Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + lvs: h.lvs.With(labelValues...), + h: h.h, + } +} + +// Observe implements Histogram. +func (h *Histogram) Observe(value float64) { + h.h.Add(value) +} + +// Quantile returns the value of the quantile q, 0.0 < q < 1.0. +func (h *Histogram) Quantile(q float64) float64 { + return h.h.Quantile(q) +} + +// LabelValues returns the set of label values attached to the histogram. +func (h *Histogram) LabelValues() []string { + return h.lvs +} + +// Print writes a string representation of the histogram to the passed writer. +// Useful for printing to a terminal. +func (h *Histogram) Print(w io.Writer) { + fmt.Fprintf(w, h.h.String()) +} + +// Bucket is a range in a histogram which aggregates observations. +type Bucket struct { + From, To, Count int64 +} + +// Quantile is a pair of a quantile (0..100) and its observed maximum value. +type Quantile struct { + Quantile int // 0..100 + Value int64 +} + +// SimpleHistogram is an in-memory implementation of a Histogram. It only tracks +// an approximate moving average, so is likely too naïve for many use cases. +type SimpleHistogram struct { + mtx sync.RWMutex + lvs lv.LabelValues + avg float64 + n uint64 +} + +// NewSimpleHistogram returns a SimpleHistogram, ready for observations. +func NewSimpleHistogram() *SimpleHistogram { + return &SimpleHistogram{} +} + +// With implements Histogram. +func (h *SimpleHistogram) With(labelValues ...string) metrics.Histogram { + return &SimpleHistogram{ + lvs: h.lvs.With(labelValues...), + avg: h.avg, + n: h.n, + } +} + +// Observe implements Histogram. +func (h *SimpleHistogram) Observe(value float64) { + h.mtx.Lock() + defer h.mtx.Unlock() + h.n++ + h.avg -= h.avg / float64(h.n) + h.avg += value / float64(h.n) +} + +// ApproximateMovingAverage returns the approximate moving average of observations. +func (h *SimpleHistogram) ApproximateMovingAverage() float64 { + h.mtx.RLock() + h.mtx.RUnlock() + return h.avg +} + +// LabelValues returns the set of label values attached to the histogram. +func (h *SimpleHistogram) LabelValues() []string { + return h.lvs +} diff --git a/metrics/generic/generic_test.go b/metrics/generic/generic_test.go new file mode 100644 index 0000000..7f8f2a9 --- /dev/null +++ b/metrics/generic/generic_test.go @@ -0,0 +1,75 @@ +package generic_test + +// This is package generic_test in order to get around an import cycle: this +// package imports teststat to do its testing, but package teststat imports +// generic to use its Histogram in the Quantiles helper function. + +import ( + "math" + "math/rand" + "testing" + + "github.com/go-kit/kit/metrics3/generic" + "github.com/go-kit/kit/metrics3/teststat" +) + +func TestCounter(t *testing.T) { + counter := generic.NewCounter("my_counter").With("label", "counter").(*generic.Counter) + value := func() float64 { return counter.Value() } + if err := teststat.TestCounter(counter, value); err != nil { + t.Fatal(err) + } +} + +func TestValueReset(t *testing.T) { + counter := generic.NewCounter("test_value_reset") + counter.Add(123) + counter.Add(456) + counter.Add(789) + if want, have := float64(123+456+789), counter.ValueReset(); want != have { + t.Errorf("want %f, have %f", want, have) + } + if want, have := float64(0), counter.Value(); want != have { + t.Errorf("want %f, have %f", want, have) + } +} + +func TestGauge(t *testing.T) { + gauge := generic.NewGauge("my_gauge").With("label", "gauge").(*generic.Gauge) + value := func() float64 { return gauge.Value() } + if err := teststat.TestGauge(gauge, value); err != nil { + t.Fatal(err) + } +} + +func TestHistogram(t *testing.T) { + histogram := generic.NewHistogram("my_histogram", 50).With("label", "histogram").(*generic.Histogram) + quantiles := func() (float64, float64, float64, float64) { + return histogram.Quantile(0.50), histogram.Quantile(0.90), histogram.Quantile(0.95), histogram.Quantile(0.99) + } + if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { + t.Fatal(err) + } +} + +func TestSimpleHistogram(t *testing.T) { + histogram := generic.NewSimpleHistogram().With("label", "simple_histogram").(*generic.SimpleHistogram) + var ( + sum int + count = 1234 // not too big + ) + for i := 0; i < count; i++ { + value := rand.Intn(1000) + sum += value + histogram.Observe(float64(value)) + } + + var ( + want = float64(sum) / float64(count) + have = histogram.ApproximateMovingAverage() + tolerance = 0.001 // real real slim + ) + if math.Abs(want-have)/want > tolerance { + t.Errorf("want %f, have %f", want, have) + } +} diff --git a/metrics/graphite/emitter.go b/metrics/graphite/emitter.go deleted file mode 100644 index 87e633c..0000000 --- a/metrics/graphite/emitter.go +++ /dev/null @@ -1,159 +0,0 @@ -package graphite - -import ( - "bufio" - "fmt" - "io" - "net" - "sync" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -// Emitter is a struct to manage connections and orchestrate the emission of -// metrics to a Graphite system. -type Emitter struct { - mtx sync.Mutex - prefix string - mgr *conn.Manager - counters []*counter - histograms []*windowedHistogram - gauges []*gauge - logger log.Logger - quitc chan chan struct{} -} - -// NewEmitter will return an Emitter that will prefix all metrics names with the -// given prefix. Once started, it will attempt to create a connection with the -// given network and address via `net.Dial` and periodically post metrics to the -// connection in the Graphite plaintext protocol. -func NewEmitter(network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - return NewEmitterDial(net.Dial, network, address, metricsPrefix, flushInterval, logger) -} - -// NewEmitterDial is the same as NewEmitter, but allows you to specify your own -// Dialer function. This is primarily useful for tests. -func NewEmitterDial(dialer conn.Dialer, network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - e := &Emitter{ - prefix: metricsPrefix, - mgr: conn.NewManager(dialer, network, address, time.After, logger), - logger: logger, - quitc: make(chan chan struct{}), - } - go e.loop(flushInterval) - return e -} - -// NewCounter returns a Counter whose value will be periodically emitted in -// a Graphite-compatible format once the Emitter is started. Fields are ignored. -func (e *Emitter) NewCounter(name string) metrics.Counter { - e.mtx.Lock() - defer e.mtx.Unlock() - c := newCounter(name) - e.counters = append(e.counters, c) - return c -} - -// NewHistogram is taken from http://github.com/codahale/metrics. It returns a -// windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by using -// the passed name as a prefix and appending "_pNN" e.g. "_p50". -// -// The values of this histogram will be periodically emitted in a -// Graphite-compatible format once the Emitter is started. Fields are ignored. -func (e *Emitter) NewHistogram(name string, minValue, maxValue int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - gauges := map[int]metrics.Gauge{} - for _, quantile := range quantiles { - if quantile <= 0 || quantile >= 100 { - return nil, fmt.Errorf("invalid quantile %d", quantile) - } - gauges[quantile] = e.gauge(fmt.Sprintf("%s_p%02d", name, quantile)) - } - h := newWindowedHistogram(name, minValue, maxValue, sigfigs, gauges, e.logger) - - e.mtx.Lock() - defer e.mtx.Unlock() - e.histograms = append(e.histograms, h) - return h, nil -} - -// NewGauge returns a Gauge whose value will be periodically emitted in a -// Graphite-compatible format once the Emitter is started. Fields are ignored. -func (e *Emitter) NewGauge(name string) metrics.Gauge { - e.mtx.Lock() - defer e.mtx.Unlock() - return e.gauge(name) -} - -func (e *Emitter) gauge(name string) metrics.Gauge { - g := &gauge{name, 0} - e.gauges = append(e.gauges, g) - return g -} - -func (e *Emitter) loop(d time.Duration) { - ticker := time.NewTicker(d) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - e.Flush() - - case q := <-e.quitc: - e.Flush() - close(q) - return - } - } -} - -// Stop will flush the current metrics and close the active connection. Calling -// stop more than once is a programmer error. -func (e *Emitter) Stop() { - q := make(chan struct{}) - e.quitc <- q - <-q -} - -// Flush will write the current metrics to the Emitter's connection in the -// Graphite plaintext protocol. -func (e *Emitter) Flush() { - e.mtx.Lock() // one flush at a time - defer e.mtx.Unlock() - - conn := e.mgr.Take() - if conn == nil { - e.logger.Log("during", "flush", "err", "connection unavailable") - return - } - - err := e.flush(conn) - if err != nil { - e.logger.Log("during", "flush", "err", err) - } - e.mgr.Put(err) -} - -func (e *Emitter) flush(w io.Writer) error { - bw := bufio.NewWriter(w) - - for _, c := range e.counters { - c.flush(bw, e.prefix) - } - - for _, h := range e.histograms { - h.flush(bw, e.prefix) - } - - for _, g := range e.gauges { - g.flush(bw, e.prefix) - } - - return bw.Flush() -} diff --git a/metrics/graphite/graphite.go b/metrics/graphite/graphite.go index 6736cc2..fcae7d2 100644 --- a/metrics/graphite/graphite.go +++ b/metrics/graphite/graphite.go @@ -1,186 +1,200 @@ -// Package graphite implements a Graphite backend for package metrics. Metrics -// will be emitted to a Graphite server in the plaintext protocol which looks -// like: +// Package graphite provides a Graphite backend for metrics. Metrics are batched +// and emitted in the plaintext protocol. For more information, see +// http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol // -// " " -// -// See http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol. -// The current implementation ignores fields. +// Graphite does not have a native understanding of metric parameterization, so +// label values not supported. Use distinct metrics for each unique combination +// of label values. package graphite import ( "fmt" "io" - "math" - "sort" "sync" - "sync/atomic" "time" - "github.com/codahale/hdrhistogram" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/generic" + "github.com/go-kit/kit/util/conn" ) -func newCounter(name string) *counter { - return &counter{name, 0} -} - -func newGauge(name string) *gauge { - return &gauge{name, 0} -} - -// counter implements the metrics.counter interface but also provides a -// Flush method to emit the current counter values in the Graphite plaintext -// protocol. -type counter struct { - key string - count uint64 -} - -func (c *counter) Name() string { return c.key } - -// With currently ignores fields. -func (c *counter) With(metrics.Field) metrics.Counter { return c } - -func (c *counter) Add(delta uint64) { atomic.AddUint64(&c.count, delta) } - -func (c *counter) get() uint64 { return atomic.LoadUint64(&c.count) } - -// flush will emit the current counter value in the Graphite plaintext -// protocol to the given io.Writer. -func (c *counter) flush(w io.Writer, prefix string) { - fmt.Fprintf(w, "%s.count %d %d\n", prefix+c.Name(), c.get(), time.Now().Unix()) -} - -// gauge implements the metrics.gauge interface but also provides a -// Flush method to emit the current counter values in the Graphite plaintext -// protocol. -type gauge struct { - key string - value uint64 // math.Float64bits -} - -func (g *gauge) Name() string { return g.key } - -// With currently ignores fields. -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } - -func (g *gauge) Add(delta float64) { - for { - old := atomic.LoadUint64(&g.value) - new := math.Float64bits(math.Float64frombits(old) + delta) - if atomic.CompareAndSwapUint64(&g.value, old, new) { - return - } - } -} - -func (g *gauge) Set(value float64) { - atomic.StoreUint64(&g.value, math.Float64bits(value)) -} - -func (g *gauge) Get() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.value)) -} - -// Flush will emit the current gauge value in the Graphite plaintext -// protocol to the given io.Writer. -func (g *gauge) flush(w io.Writer, prefix string) { - fmt.Fprintf(w, "%s %.2f %d\n", prefix+g.Name(), g.Get(), time.Now().Unix()) -} - -// windowedHistogram is taken from http://github.com/codahale/metrics. It -// is a windowed HDR histogram which drops data older than five minutes. +// Graphite receives metrics observations and forwards them to a Graphite server. +// Create a Graphite object, use it to create metrics, and pass those metrics as +// dependencies to the components that will use them. // -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by using -// the passed name as a prefix and appending "_pNN" e.g. "_p50". +// All metrics are buffered until WriteTo is called. Counters and gauges are +// aggregated into a single observation per timeseries per write. Histograms are +// exploded into per-quantile gauges and reported once per write. // -// The values of this histogram will be periodically emitted in a -// Graphite-compatible format once the GraphiteProvider is started. Fields are ignored. -type windowedHistogram struct { - mtx sync.Mutex - hist *hdrhistogram.WindowedHistogram - - name string - gauges map[int]metrics.Gauge - logger log.Logger -} - -func newWindowedHistogram(name string, minValue, maxValue int64, sigfigs int, quantiles map[int]metrics.Gauge, logger log.Logger) *windowedHistogram { - h := &windowedHistogram{ - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - name: name, - gauges: quantiles, - logger: logger, - } - go h.rotateLoop(1 * time.Minute) +// To regularly report metrics to an io.Writer, use the WriteLoop helper method. +// To send to a Graphite server, use the SendLoop helper method. +type Graphite struct { + mtx sync.RWMutex + prefix string + counters map[string]*Counter + gauges map[string]*Gauge + histograms map[string]*Histogram + logger log.Logger +} + +// New returns a Statsd object that may be used to create metrics. Prefix is +// applied to all created metrics. Callers must ensure that regular calls to +// WriteTo are performed, either manually or with one of the helper methods. +func New(prefix string, logger log.Logger) *Graphite { + return &Graphite{ + prefix: prefix, + counters: map[string]*Counter{}, + gauges: map[string]*Gauge{}, + histograms: map[string]*Histogram{}, + logger: logger, + } +} + +// NewCounter returns a counter. Observations are aggregated and emitted once +// per write invocation. +func (g *Graphite) NewCounter(name string) *Counter { + c := NewCounter(g.prefix + name) + g.mtx.Lock() + g.counters[g.prefix+name] = c + g.mtx.Unlock() + return c +} + +// NewGauge returns a gauge. Observations are aggregated and emitted once per +// write invocation. +func (g *Graphite) NewGauge(name string) *Gauge { + ga := NewGauge(g.prefix + name) + g.mtx.Lock() + g.gauges[g.prefix+name] = ga + g.mtx.Unlock() + return ga +} + +// NewHistogram returns a histogram. Observations are aggregated and emitted as +// per-quantile gauges, once per write invocation. 50 is a good default value +// for buckets. +func (g *Graphite) NewHistogram(name string, buckets int) *Histogram { + h := NewHistogram(g.prefix+name, buckets) + g.mtx.Lock() + g.histograms[g.prefix+name] = h + g.mtx.Unlock() return h } -func (h *windowedHistogram) Name() string { return h.name } - -func (h *windowedHistogram) With(metrics.Field) metrics.Histogram { return h } - -func (h *windowedHistogram) Observe(value int64) { - h.mtx.Lock() - err := h.hist.Current.RecordValue(value) - h.mtx.Unlock() - - if err != nil { - h.logger.Log("err", err, "msg", "unable to record histogram value") - return - } - - for q, gauge := range h.gauges { - gauge.Set(float64(h.hist.Current.ValueAtQuantile(float64(q)))) - } -} - -func (h *windowedHistogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - bars := h.hist.Merge().Distribution() - buckets := make([]metrics.Bucket, len(bars)) - for i, bar := range bars { - buckets[i] = metrics.Bucket{ - From: bar.From, - To: bar.To, - Count: bar.Count, - } - } - quantiles := make([]metrics.Quantile, 0, len(h.gauges)) - for quantile, gauge := range h.gauges { - quantiles = append(quantiles, metrics.Quantile{ - Quantile: quantile, - Value: int64(gauge.Get()), - }) - } - sort.Sort(quantileSlice(quantiles)) - return buckets, quantiles -} - -func (h *windowedHistogram) flush(w io.Writer, prefix string) { - name := prefix + h.Name() - hist := h.hist.Merge() +// WriteLoop is a helper method that invokes WriteTo to the passed writer every +// time the passed channel fires. This method blocks until the channel is +// closed, so clients probably want to run it in its own goroutine. For typical +// usage, create a time.Ticker and pass its C channel to this method. +func (g *Graphite) WriteLoop(c <-chan time.Time, w io.Writer) { + for range c { + if _, err := g.WriteTo(w); err != nil { + g.logger.Log("during", "WriteTo", "err", err) + } + } +} + +// SendLoop is a helper method that wraps WriteLoop, passing a managed +// connection to the network and address. Like WriteLoop, this method blocks +// until the channel is closed, so clients probably want to start it in its own +// goroutine. For typical usage, create a time.Ticker and pass its C channel to +// this method. +func (g *Graphite) SendLoop(c <-chan time.Time, network, address string) { + g.WriteLoop(c, conn.NewDefaultManager(network, address, g.logger)) +} + +// WriteTo flushes the buffered content of the metrics to the writer, in +// Graphite plaintext format. WriteTo abides best-effort semantics, so +// observations are lost if there is a problem with the write. Clients should be +// sure to call WriteTo regularly, ideally through the WriteLoop or SendLoop +// helper methods. +func (g *Graphite) WriteTo(w io.Writer) (count int64, err error) { + g.mtx.RLock() + defer g.mtx.RUnlock() now := time.Now().Unix() - fmt.Fprintf(w, "%s.count %d %d\n", name, hist.TotalCount(), now) - fmt.Fprintf(w, "%s.min %d %d\n", name, hist.Min(), now) - fmt.Fprintf(w, "%s.max %d %d\n", name, hist.Max(), now) - fmt.Fprintf(w, "%s.mean %.2f %d\n", name, hist.Mean(), now) - fmt.Fprintf(w, "%s.std-dev %.2f %d\n", name, hist.StdDev(), now) -} - -func (h *windowedHistogram) rotateLoop(d time.Duration) { - for range time.Tick(d) { - h.mtx.Lock() - h.hist.Rotate() - h.mtx.Unlock() - } -} - -type quantileSlice []metrics.Quantile - -func (a quantileSlice) Len() int { return len(a) } -func (a quantileSlice) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } -func (a quantileSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + + for name, c := range g.counters { + n, err := fmt.Fprintf(w, "%s %f %d\n", name, c.c.ValueReset(), now) + if err != nil { + return count, err + } + count += int64(n) + } + + for name, ga := range g.gauges { + n, err := fmt.Fprintf(w, "%s %f %d\n", name, ga.g.Value(), now) + if err != nil { + return count, err + } + count += int64(n) + } + + for name, h := range g.histograms { + for _, p := range []struct { + s string + f float64 + }{ + {"50", 0.50}, + {"90", 0.90}, + {"95", 0.95}, + {"99", 0.99}, + } { + n, err := fmt.Fprintf(w, "%s.p%s %f %d\n", name, p.s, h.h.Quantile(p.f), now) + if err != nil { + return count, err + } + count += int64(n) + } + } + + return count, err +} + +// Counter is a Graphite counter metric. +type Counter struct { + c *generic.Counter +} + +// NewCounter returns a new usable counter metric. +func NewCounter(name string) *Counter { + return &Counter{generic.NewCounter(name)} +} + +// With is a no-op. +func (c *Counter) With(...string) metrics.Counter { return c } + +// Add implements counter. +func (c *Counter) Add(delta float64) { c.c.Add(delta) } + +// Gauge is a Graphite gauge metric. +type Gauge struct { + g *generic.Gauge +} + +// NewGauge returns a new usable Gauge metric. +func NewGauge(name string) *Gauge { + return &Gauge{generic.NewGauge(name)} +} + +// With is a no-op. +func (g *Gauge) With(...string) metrics.Gauge { return g } + +// Set implements gauge. +func (g *Gauge) Set(value float64) { g.g.Set(value) } + +// Histogram is a Graphite histogram metric. Observations are bucketed into +// per-quantile gauges. +type Histogram struct { + h *generic.Histogram +} + +// NewHistogram returns a new usable Histogram metric. +func NewHistogram(name string, buckets int) *Histogram { + return &Histogram{generic.NewHistogram(name, buckets)} +} + +// With is a no-op. +func (h *Histogram) With(...string) metrics.Histogram { return h } + +// Observe implements histogram. +func (h *Histogram) Observe(value float64) { h.h.Observe(value) } diff --git a/metrics/graphite/graphite_test.go b/metrics/graphite/graphite_test.go index 627efea..468c523 100644 --- a/metrics/graphite/graphite_test.go +++ b/metrics/graphite/graphite_test.go @@ -2,79 +2,62 @@ import ( "bytes" - "fmt" - "strings" + "regexp" + "strconv" "testing" - "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/teststat" + "github.com/go-kit/kit/metrics3/teststat" ) -func TestHistogramQuantiles(t *testing.T) { - prefix := "prefix." - e := NewEmitter("", "", prefix, time.Second, log.NewNopLogger()) - var ( - name = "test_histogram_quantiles" - quantiles = []int{50, 90, 95, 99} - ) - h, err := e.NewHistogram(name, 0, 100, 3, quantiles...) - if err != nil { - t.Fatalf("unable to create test histogram: %v", err) - } - h = h.With(metrics.Field{Key: "ignored", Value: "field"}) - const seed, mean, stdev int64 = 424242, 50, 10 - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - - // flush the current metrics into a buffer to examine - var b bytes.Buffer - e.flush(&b) - teststat.AssertGraphiteNormalHistogram(t, prefix, name, mean, stdev, quantiles, b.String()) -} - func TestCounter(t *testing.T) { - var ( - prefix = "prefix." - name = "m" - value = 123 - e = NewEmitter("", "", prefix, time.Second, log.NewNopLogger()) - b bytes.Buffer - ) - e.NewCounter(name).With(metrics.Field{Key: "ignored", Value: "field"}).Add(uint64(value)) - e.flush(&b) - want := fmt.Sprintf("%s%s.count %d", prefix, name, value) - payload := b.String() - if !strings.HasPrefix(payload, want) { - t.Errorf("counter %s want\n%s, have\n%s", name, want, payload) + prefix, name := "abc.", "def" + label, value := "label", "value" // ignored for Graphite + regex := `^` + prefix + name + ` ([0-9\.]+) [0-9]+$` + g := New(prefix, log.NewNopLogger()) + counter := g.NewCounter(name).With(label, value) + valuef := teststat.SumLines(g, regex) + if err := teststat.TestCounter(counter, valuef); err != nil { + t.Fatal(err) } } func TestGauge(t *testing.T) { - var ( - prefix = "prefix." - name = "xyz" - value = 54321 - delta = 12345 - e = NewEmitter("", "", prefix, time.Second, log.NewNopLogger()) - b bytes.Buffer - g = e.NewGauge(name).With(metrics.Field{Key: "ignored", Value: "field"}) - ) - - g.Set(float64(value)) - g.Add(float64(delta)) - - e.flush(&b) - payload := b.String() - - want := fmt.Sprintf("%s%s %d", prefix, name, value+delta) - if !strings.HasPrefix(payload, want) { - t.Errorf("gauge %s want\n%s, have\n%s", name, want, payload) + prefix, name := "ghi.", "jkl" + label, value := "xyz", "abc" // ignored for Graphite + regex := `^` + prefix + name + ` ([0-9\.]+) [0-9]+$` + g := New(prefix, log.NewNopLogger()) + gauge := g.NewGauge(name).With(label, value) + valuef := teststat.LastLine(g, regex) + if err := teststat.TestGauge(gauge, valuef); err != nil { + t.Fatal(err) } } -func TestEmitterStops(t *testing.T) { - e := NewEmitter("foo", "bar", "baz", time.Second, log.NewNopLogger()) - time.Sleep(100 * time.Millisecond) - e.Stop() +func TestHistogram(t *testing.T) { + // The histogram test is actually like 4 gauge tests. + prefix, name := "statsd.", "histogram_test" + label, value := "abc", "def" // ignored for Graphite + re50 := regexp.MustCompile(prefix + name + `.p50 ([0-9\.]+) [0-9]+`) + re90 := regexp.MustCompile(prefix + name + `.p90 ([0-9\.]+) [0-9]+`) + re95 := regexp.MustCompile(prefix + name + `.p95 ([0-9\.]+) [0-9]+`) + re99 := regexp.MustCompile(prefix + name + `.p99 ([0-9\.]+) [0-9]+`) + g := New(prefix, log.NewNopLogger()) + histogram := g.NewHistogram(name, 50).With(label, value) + quantiles := func() (float64, float64, float64, float64) { + var buf bytes.Buffer + g.WriteTo(&buf) + match50 := re50.FindStringSubmatch(buf.String()) + p50, _ := strconv.ParseFloat(match50[1], 64) + match90 := re90.FindStringSubmatch(buf.String()) + p90, _ := strconv.ParseFloat(match90[1], 64) + match95 := re95.FindStringSubmatch(buf.String()) + p95, _ := strconv.ParseFloat(match95[1], 64) + match99 := re99.FindStringSubmatch(buf.String()) + p99, _ := strconv.ParseFloat(match99[1], 64) + return p50, p90, p95, p99 + } + if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { + t.Fatal(err) + } } diff --git a/metrics/influx/influx.go b/metrics/influx/influx.go new file mode 100644 index 0000000..bdc1b51 --- /dev/null +++ b/metrics/influx/influx.go @@ -0,0 +1,249 @@ +// Package influx provides an InfluxDB implementation for metrics. The model is +// similar to other push-based instrumentation systems. Observations are +// aggregated locally and emitted to the Influx server on regular intervals. +package influx + +import ( + "time" + + influxdb "github.com/influxdata/influxdb/client/v2" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/internal/lv" +) + +// Influx is a store for metrics that will be emitted to an Influx database. +// +// Influx is a general purpose time-series database, and has no native concepts +// of counters, gauges, or histograms. Counters are modeled as a timeseries with +// one data point per flush, with a "count" field that reflects all adds since +// the last flush. Gauges are modeled as a timeseries with one data point per +// flush, with a "value" field that reflects the current state of the gauge. +// Histograms are modeled as a timeseries with one data point per observation, +// with a "value" field that reflects each observation; use e.g. the HISTOGRAM +// aggregate function to compute histograms. +// +// Influx tags are immutable, attached to the Influx object, and given to each +// metric at construction. Influx fields are mapped to Go kit label values, and +// may be mutated via With functions. Actual metric values are provided as +// fields with specific names depending on the metric. +// +// All observations are collected in memory locally, and flushed on demand. +type Influx struct { + counters *lv.Space + gauges *lv.Space + histograms *lv.Space + tags map[string]string + conf influxdb.BatchPointsConfig + logger log.Logger +} + +// New returns an Influx, ready to create metrics and collect observations. Tags +// are applied to all metrics created from this object. The BatchPointsConfig is +// used during flushing. +func New(tags map[string]string, conf influxdb.BatchPointsConfig, logger log.Logger) *Influx { + return &Influx{ + counters: lv.NewSpace(), + gauges: lv.NewSpace(), + histograms: lv.NewSpace(), + tags: tags, + conf: conf, + logger: logger, + } +} + +// NewCounter returns an Influx counter. +func (in *Influx) NewCounter(name string) *Counter { + return &Counter{ + name: name, + obs: in.counters.Observe, + } +} + +// NewGauge returns an Influx gauge. +func (in *Influx) NewGauge(name string) *Gauge { + return &Gauge{ + name: name, + obs: in.gauges.Observe, + } +} + +// NewHistogram returns an Influx histogram. +func (in *Influx) NewHistogram(name string) *Histogram { + return &Histogram{ + name: name, + obs: in.histograms.Observe, + } +} + +// BatchPointsWriter captures a subset of the influxdb.Client methods necessary +// for emitting metrics observations. +type BatchPointsWriter interface { + Write(influxdb.BatchPoints) error +} + +// WriteLoop is a helper method that invokes WriteTo to the passed writer every +// time the passed channel fires. This method blocks until the channel is +// closed, so clients probably want to run it in its own goroutine. For typical +// usage, create a time.Ticker and pass its C channel to this method. +func (in *Influx) WriteLoop(c <-chan time.Time, w BatchPointsWriter) { + for range c { + if err := in.WriteTo(w); err != nil { + in.logger.Log("during", "WriteTo", "err", err) + } + } +} + +// WriteTo flushes the buffered content of the metrics to the writer, in an +// Influx BatchPoints format. WriteTo abides best-effort semantics, so +// observations are lost if there is a problem with the write. Clients should be +// sure to call WriteTo regularly, ideally through the WriteLoop helper method. +func (in *Influx) WriteTo(w BatchPointsWriter) (err error) { + bp, err := influxdb.NewBatchPoints(in.conf) + if err != nil { + return err + } + + now := time.Now() + + in.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + fields := fieldsFrom(lvs) + fields["count"] = sum(values) + var p *influxdb.Point + p, err = influxdb.NewPoint(name, in.tags, fields, now) + if err != nil { + return false + } + bp.AddPoint(p) + return true + }) + if err != nil { + return err + } + + in.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + fields := fieldsFrom(lvs) + fields["value"] = last(values) + var p *influxdb.Point + p, err = influxdb.NewPoint(name, in.tags, fields, now) + if err != nil { + return false + } + bp.AddPoint(p) + return true + }) + if err != nil { + return err + } + + in.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + fields := fieldsFrom(lvs) + ps := make([]*influxdb.Point, len(values)) + for i, v := range values { + fields["value"] = v // overwrite each time + ps[i], err = influxdb.NewPoint(name, in.tags, fields, now) + if err != nil { + return false + } + } + bp.AddPoints(ps) + return true + }) + if err != nil { + return err + } + + return w.Write(bp) +} + +func fieldsFrom(labelValues []string) map[string]interface{} { + if len(labelValues)%2 != 0 { + panic("fieldsFrom received a labelValues with an odd number of strings") + } + fields := make(map[string]interface{}, len(labelValues)/2) + for i := 0; i < len(labelValues); i += 2 { + fields[labelValues[i]] = labelValues[i+1] + } + return fields +} + +func sum(a []float64) float64 { + var v float64 + for _, f := range a { + v += f + } + return v +} + +func last(a []float64) float64 { + return a[len(a)-1] +} + +type observeFunc func(name string, lvs lv.LabelValues, value float64) + +// Counter is an Influx counter. Observations are forwarded to an Influx +// object, and aggregated (summed) per timeseries. +type Counter struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + name: c.name, + lvs: c.lvs.With(labelValues...), + obs: c.obs, + } +} + +// Add implements metrics.Counter. +func (c *Counter) Add(delta float64) { + c.obs(c.name, c.lvs, delta) +} + +// Gauge is an Influx gauge. Observations are forwarded to a Dogstatsd +// object, and aggregated (the last observation selected) per timeseries. +type Gauge struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + name: g.name, + lvs: g.lvs.With(labelValues...), + obs: g.obs, + } +} + +// Set implements metrics.Gauge. +func (g *Gauge) Set(value float64) { + g.obs(g.name, g.lvs, value) +} + +// Histogram is an Influx histrogram. Observations are aggregated into a +// generic.Histogram and emitted as per-quantile gauges to the Influx server. +type Histogram struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + name: h.name, + lvs: h.lvs.With(labelValues...), + obs: h.obs, + } +} + +// Observe implements metrics.Histogram. +func (h *Histogram) Observe(value float64) { + h.obs(h.name, h.lvs, value) +} diff --git a/metrics/influx/influx_test.go b/metrics/influx/influx_test.go new file mode 100644 index 0000000..1c44aed --- /dev/null +++ b/metrics/influx/influx_test.go @@ -0,0 +1,92 @@ +package influx + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics3/generic" + "github.com/go-kit/kit/metrics3/teststat" + influxdb "github.com/influxdata/influxdb/client/v2" +) + +func TestCounter(t *testing.T) { + in := New(map[string]string{"a": "b"}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) + re := regexp.MustCompile(`influx_counter,a=b count=([0-9\.]+) [0-9]+`) // reverse-engineered :\ + counter := in.NewCounter("influx_counter") + value := func() float64 { + client := &bufWriter{} + in.WriteTo(client) + match := re.FindStringSubmatch(client.buf.String()) + f, _ := strconv.ParseFloat(match[1], 64) + return f + } + if err := teststat.TestCounter(counter, value); err != nil { + t.Fatal(err) + } +} + +func TestGauge(t *testing.T) { + in := New(map[string]string{"foo": "alpha"}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) + re := regexp.MustCompile(`influx_gauge,foo=alpha value=([0-9\.]+) [0-9]+`) + gauge := in.NewGauge("influx_gauge") + value := func() float64 { + client := &bufWriter{} + in.WriteTo(client) + match := re.FindStringSubmatch(client.buf.String()) + f, _ := strconv.ParseFloat(match[1], 64) + return f + } + if err := teststat.TestGauge(gauge, value); err != nil { + t.Fatal(err) + } +} + +func TestHistogram(t *testing.T) { + in := New(map[string]string{"foo": "alpha"}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) + re := regexp.MustCompile(`influx_histogram,foo=alpha bar="beta",value=([0-9\.]+) [0-9]+`) + histogram := in.NewHistogram("influx_histogram").With("bar", "beta") + quantiles := func() (float64, float64, float64, float64) { + w := &bufWriter{} + in.WriteTo(w) + h := generic.NewHistogram("h", 50) + matches := re.FindAllStringSubmatch(w.buf.String(), -1) + for _, match := range matches { + f, _ := strconv.ParseFloat(match[1], 64) + h.Observe(f) + } + return h.Quantile(0.50), h.Quantile(0.90), h.Quantile(0.95), h.Quantile(0.99) + } + if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { + t.Fatal(err) + } +} + +func TestHistogramLabels(t *testing.T) { + in := New(map[string]string{}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) + h := in.NewHistogram("foo") + h.Observe(123) + h.With("abc", "xyz").Observe(456) + w := &bufWriter{} + if err := in.WriteTo(w); err != nil { + t.Fatal(err) + } + if want, have := 2, len(strings.Split(strings.TrimSpace(w.buf.String()), "\n")); want != have { + t.Errorf("want %d, have %d", want, have) + } +} + +type bufWriter struct { + buf bytes.Buffer +} + +func (w *bufWriter) Write(bp influxdb.BatchPoints) error { + for _, p := range bp.Points() { + fmt.Fprintf(&w.buf, p.String()+"\n") + } + return nil +} diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go deleted file mode 100644 index 26bccc4..0000000 --- a/metrics/influxdb/influxdb.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package influxdb implements a InfluxDB backend for package metrics. -package influxdb - -import ( - "fmt" - "sort" - "sync" - "time" - - "github.com/codahale/hdrhistogram" - stdinflux "github.com/influxdata/influxdb/client/v2" - - "github.com/go-kit/kit/metrics" -) - -type counter struct { - key string - tags []metrics.Field - fields []metrics.Field - value uint64 - bp stdinflux.BatchPoints -} - -// NewCounter returns a Counter that writes values in the reportInterval -// to the given InfluxDB client, utilizing batching. -func NewCounter(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportInterval time.Duration) metrics.Counter { - return NewCounterTick(client, bp, key, tags, time.Tick(reportInterval)) -} - -// NewCounterTick is the same as NewCounter, but allows the user to pass a own -// channel to trigger the write process to the client. -func NewCounterTick(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportTicker <-chan time.Time) metrics.Counter { - c := &counter{ - key: key, - tags: tags, - value: 0, - bp: bp, - } - go watch(client, bp, reportTicker) - return c -} - -func (c *counter) Name() string { - return c.key -} - -func (c *counter) With(field metrics.Field) metrics.Counter { - return &counter{ - key: c.key, - tags: c.tags, - value: c.value, - bp: c.bp, - fields: append(c.fields, field), - } -} - -func (c *counter) Add(delta uint64) { - c.value = c.value + delta - - tags := map[string]string{} - - for _, tag := range c.tags { - tags[tag.Key] = tag.Value - } - - fields := map[string]interface{}{} - - for _, field := range c.fields { - fields[field.Key] = field.Value - } - fields["value"] = c.value - pt, _ := stdinflux.NewPoint(c.key, tags, fields, time.Now()) - c.bp.AddPoint(pt) -} - -type gauge struct { - key string - tags []metrics.Field - fields []metrics.Field - value float64 - bp stdinflux.BatchPoints -} - -// NewGauge creates a new gauge instance, reporting points in the defined reportInterval. -func NewGauge(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportInterval time.Duration) metrics.Gauge { - return NewGaugeTick(client, bp, key, tags, time.Tick(reportInterval)) -} - -// NewGaugeTick is the same as NewGauge with a ticker channel instead of a interval. -func NewGaugeTick(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, reportTicker <-chan time.Time) metrics.Gauge { - g := &gauge{ - key: key, - tags: tags, - value: 0, - bp: bp, - } - go watch(client, bp, reportTicker) - return g -} - -func (g *gauge) Name() string { - return g.key -} - -func (g *gauge) With(field metrics.Field) metrics.Gauge { - return &gauge{ - key: g.key, - tags: g.tags, - value: g.value, - bp: g.bp, - fields: append(g.fields, field), - } -} - -func (g *gauge) Add(delta float64) { - g.value = g.value + delta - g.createPoint() -} - -func (g *gauge) Set(value float64) { - g.value = value - g.createPoint() -} - -func (g *gauge) Get() float64 { - return g.value -} - -func (g *gauge) createPoint() { - tags := map[string]string{} - - for _, tag := range g.tags { - tags[tag.Key] = tag.Value - } - - fields := map[string]interface{}{} - - for _, field := range g.fields { - fields[field.Key] = field.Value - } - fields["value"] = g.value - pt, _ := stdinflux.NewPoint(g.key, tags, fields, time.Now()) - g.bp.AddPoint(pt) -} - -// The implementation from histogram is taken from metrics/expvar - -type histogram struct { - mu sync.Mutex - hist *hdrhistogram.WindowedHistogram - - key string - gauges map[int]metrics.Gauge -} - -// NewHistogram is taken from http://github.com/codahale/metrics. It returns a -// windowed HDR histogram which drops data older than five minutes. -// -// The histogram exposes metrics for each passed quantile as gauges. Quantiles -// should be integers in the range 1..99. The gauge names are assigned by -// using the passed name as a prefix and appending "_pNN" e.g. "_p50". -func NewHistogram(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, - reportInterval time.Duration, minValue, maxValue int64, sigfigs int, quantiles ...int) metrics.Histogram { - return NewHistogramTick(client, bp, key, tags, time.Tick(reportInterval), minValue, maxValue, sigfigs, quantiles...) -} - -// NewHistogramTick is the same as NewHistoGram, but allows to pass a custom reportTicker. -func NewHistogramTick(client stdinflux.Client, bp stdinflux.BatchPoints, key string, tags []metrics.Field, - reportTicker <-chan time.Time, minValue, maxValue int64, sigfigs int, quantiles ...int) metrics.Histogram { - gauges := map[int]metrics.Gauge{} - - for _, quantile := range quantiles { - if quantile <= 0 || quantile >= 100 { - panic(fmt.Sprintf("invalid quantile %d", quantile)) - } - gauges[quantile] = NewGaugeTick(client, bp, fmt.Sprintf("%s_p%02d", key, quantile), tags, reportTicker) - } - - h := &histogram{ - hist: hdrhistogram.NewWindowed(5, minValue, maxValue, sigfigs), - key: key, - gauges: gauges, - } - - go h.rotateLoop(1 * time.Minute) - return h -} - -func (h *histogram) Name() string { - return h.key -} - -func (h *histogram) With(field metrics.Field) metrics.Histogram { - for q, gauge := range h.gauges { - h.gauges[q] = gauge.With(field) - } - - return h -} - -func (h *histogram) Observe(value int64) { - h.mu.Lock() - err := h.hist.Current.RecordValue(value) - h.mu.Unlock() - - if err != nil { - panic(err.Error()) - } - - for q, gauge := range h.gauges { - gauge.Set(float64(h.hist.Current.ValueAtQuantile(float64(q)))) - } -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - bars := h.hist.Merge().Distribution() - buckets := make([]metrics.Bucket, len(bars)) - for i, bar := range bars { - buckets[i] = metrics.Bucket{ - From: bar.From, - To: bar.To, - Count: bar.Count, - } - } - quantiles := make([]metrics.Quantile, 0, len(h.gauges)) - for quantile, gauge := range h.gauges { - quantiles = append(quantiles, metrics.Quantile{ - Quantile: quantile, - Value: int64(gauge.Get()), - }) - } - sort.Sort(quantileSlice(quantiles)) - return buckets, quantiles -} - -func (h *histogram) rotateLoop(d time.Duration) { - for range time.Tick(d) { - h.mu.Lock() - h.hist.Rotate() - h.mu.Unlock() - } -} - -type quantileSlice []metrics.Quantile - -func (a quantileSlice) Len() int { return len(a) } -func (a quantileSlice) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } -func (a quantileSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func watch(client stdinflux.Client, bp stdinflux.BatchPoints, reportTicker <-chan time.Time) { - for range reportTicker { - client.Write(bp) - } -} diff --git a/metrics/influxdb/influxdb_test.go b/metrics/influxdb/influxdb_test.go deleted file mode 100644 index 5bb4b3f..0000000 --- a/metrics/influxdb/influxdb_test.go +++ /dev/null @@ -1,348 +0,0 @@ -package influxdb_test - -import ( - "reflect" - "sync" - "testing" - "time" - - stdinflux "github.com/influxdata/influxdb/client/v2" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/influxdb" -) - -func TestCounter(t *testing.T) { - expectedName := "test_counter" - expectedTags := map[string]string{} - expectedFields := []map[string]interface{}{ - {"value": "2"}, - {"value": "7"}, - {"value": "10"}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - counter := influxdb.NewCounterTick(cl, bp, expectedName, tags, triggerChan) - counter.Add(2) - counter.Add(5) - counter.Add(3) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestCounterWithTags(t *testing.T) { - expectedName := "test_counter" - expectedTags := map[string]string{ - "key1": "value1", - "key2": "value2", - } - expectedFields := []map[string]interface{}{ - {"value": "2"}, - {"Test": "Test", "value": "7"}, - {"Test": "Test", "value": "10"}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - counter := influxdb.NewCounterTick(cl, bp, expectedName, tags, triggerChan) - counter.Add(2) - counter = counter.With(metrics.Field{Key: "Test", Value: "Test"}) - counter.Add(5) - counter.Add(3) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestGauge(t *testing.T) { - expectedName := "test_gauge" - expectedTags := map[string]string{} - expectedFields := []map[string]interface{}{ - {"value": 2.1}, - {"value": 1.0}, - {"value": 10.5}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - counter := influxdb.NewGaugeTick(cl, bp, expectedName, tags, triggerChan) - counter.Add(2.1) - counter.Set(1) - counter.Add(9.5) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestGaugeWithTags(t *testing.T) { - expectedName := "test_counter" - expectedTags := map[string]string{ - "key1": "value1", - "key2": "value2", - } - expectedFields := []map[string]interface{}{ - {"value": 2.3}, - {"Test": "Test", "value": 1.0}, - {"Test": "Test", "value": 13.6}, - } - - cl := &mockClient{} - cl.Add(3) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - gauge := influxdb.NewGaugeTick(cl, bp, expectedName, tags, triggerChan) - gauge.Add(2.3) - gauge = gauge.With(metrics.Field{Key: "Test", Value: "Test"}) - gauge.Set(1) - gauge.Add(12.6) - - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 2; i++ { - givenPoint := mockPoint{ - Name: expectedName, - Tags: expectedTags, - Fields: expectedFields[i], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestHistogram(t *testing.T) { - expectedName := "test_histogram" - expectedTags := map[string]string{} - expectedFields := []map[string]map[string]interface{}{ - { - "test_histogram_p50": {"value": 5.0}, - "test_histogram_p90": {"value": 5.0}, - "test_histogram_p95": {"value": 5.0}, - "test_histogram_p99": {"value": 5.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - } - quantiles := []int{50, 90, 95, 99} - - cl := &mockClient{} - cl.Add(12) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - histogram := influxdb.NewHistogramTick(cl, bp, expectedName, tags, triggerChan, 0, 100, 3, quantiles...) - histogram.Observe(5) - histogram = histogram.With(metrics.Field{Key: "Test", Value: "Test"}) - histogram.Observe(10) - histogram.Observe(4) - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 11; i++ { - actualName := cl.Points[i].Name() - givenName := expectedName + actualName[len(actualName)-4:] - givenPoint := mockPoint{ - Name: givenName, - Tags: expectedTags, - Fields: expectedFields[i/4][actualName], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func TestHistogramWithTags(t *testing.T) { - expectedName := "test_histogram" - expectedTags := map[string]string{ - "key1": "value1", - "key2": "value2", - } - expectedFields := []map[string]map[string]interface{}{ - { - "test_histogram_p50": {"value": 5.0}, - "test_histogram_p90": {"value": 5.0}, - "test_histogram_p95": {"value": 5.0}, - "test_histogram_p99": {"value": 5.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - { - "test_histogram_p50": {"Test": "Test", "value": 5.0}, - "test_histogram_p90": {"Test": "Test", "value": 10.0}, - "test_histogram_p95": {"Test": "Test", "value": 10.0}, - "test_histogram_p99": {"Test": "Test", "value": 10.0}, - }, - } - quantiles := []int{50, 90, 95, 99} - - cl := &mockClient{} - cl.Add(12) - bp, _ := stdinflux.NewBatchPoints(stdinflux.BatchPointsConfig{ - Database: "testing", - Precision: "s", - }) - - tags := []metrics.Field{} - for key, value := range expectedTags { - tags = append(tags, metrics.Field{Key: key, Value: value}) - } - - triggerChan := make(chan time.Time) - histogram := influxdb.NewHistogramTick(cl, bp, expectedName, tags, triggerChan, 0, 100, 3, quantiles...) - histogram.Observe(5) - histogram = histogram.With(metrics.Field{Key: "Test", Value: "Test"}) - histogram.Observe(10) - histogram.Observe(4) - triggerChan <- time.Now() - cl.Wait() - - for i := 0; i <= 11; i++ { - actualName := cl.Points[i].Name() - givenName := expectedName + actualName[len(actualName)-4:] - givenPoint := mockPoint{ - Name: givenName, - Tags: expectedTags, - Fields: expectedFields[i/4][actualName], - } - comparePoint(t, i, givenPoint, cl.Points[i]) - } -} - -func comparePoint(t *testing.T, i int, expected mockPoint, given stdinflux.Point) { - - if want, have := expected.Name, given.Name(); want != have { - t.Errorf("point %d: want %q, have %q", i, want, have) - } - - if want, have := expected.Tags, given.Tags(); !reflect.DeepEqual(want, have) { - t.Errorf("point %d: want %v, have %v", i, want, have) - } - - if want, have := expected.Fields, given.Fields(); !reflect.DeepEqual(want, have) { - t.Errorf("point %d: want %v, have %v", i, want, have) - } -} - -type mockClient struct { - Points []stdinflux.Point - sync.WaitGroup -} - -func (m *mockClient) Ping(timeout time.Duration) (time.Duration, string, error) { - t := 0 * time.Millisecond - return t, "", nil -} - -func (m *mockClient) Write(bp stdinflux.BatchPoints) error { - for _, p := range bp.Points() { - m.Points = append(m.Points, *p) - m.Done() - } - - return nil -} - -func (m *mockClient) Query(q stdinflux.Query) (*stdinflux.Response, error) { - return nil, nil -} - -func (m *mockClient) Close() error { - return nil -} - -type mockPoint struct { - Name string - Tags map[string]string - Fields map[string]interface{} -} diff --git a/metrics/internal/emitting/buffer.go b/metrics/internal/emitting/buffer.go new file mode 100644 index 0000000..bca8930 --- /dev/null +++ b/metrics/internal/emitting/buffer.go @@ -0,0 +1,94 @@ +package emitting + +import ( + "fmt" + "strings" + "sync" + + "sort" + + "github.com/go-kit/kit/metrics3/generic" +) + +type Buffer struct { + buckets int + + mtx sync.Mutex + counters map[point]*generic.Counter + gauges map[point]*generic.Gauge + histograms map[point]*generic.Histogram +} + +func (b *Buffer) Add(a Add) { + pt := makePoint(a.Name, a.LabelValues) + b.mtx.Lock() + defer b.mtx.Unlock() + c, ok := b.counters[pt] + if !ok { + c = generic.NewCounter(a.Name).With(a.LabelValues...).(*generic.Counter) + } + c.Add(a.Delta) + b.counters[pt] = c +} + +func (b *Buffer) Set(s Set) { + pt := makePoint(s.Name, s.LabelValues) + b.mtx.Lock() + defer b.mtx.Unlock() + g, ok := b.gauges[pt] + if !ok { + g = generic.NewGauge(s.Name).With(s.LabelValues...).(*generic.Gauge) + } + g.Set(s.Value) + b.gauges[pt] = g +} + +func (b *Buffer) Obv(o Obv) { + pt := makePoint(o.Name, o.LabelValues) + b.mtx.Lock() + defer b.mtx.Unlock() + h, ok := b.histograms[pt] + if !ok { + h = generic.NewHistogram(o.Name, b.buckets).With(o.LabelValues...).(*generic.Histogram) + } + h.Observe(o.Value) + b.histograms[pt] = h +} + +// point as in point in N-dimensional vector space; +// a string encoding of name + sorted k/v pairs. +type point string + +const ( + recordDelimiter = "•" + fieldDelimiter = "·" +) + +// (foo, [a b c d]) => "foo•a·b•c·d" +func makePoint(name string, labelValues []string) point { + if len(labelValues)%2 != 0 { + panic("odd number of label values; programmer error!") + } + pairs := make([]string, 0, len(labelValues)/2) + for i := 0; i < len(labelValues); i += 2 { + pairs = append(pairs, fmt.Sprintf("%s%s%s", labelValues[i], fieldDelimiter, labelValues[i+1])) + } + sort.Strings(sort.StringSlice(pairs)) + pairs = append([]string{name}, pairs...) + return point(strings.Join(pairs, recordDelimiter)) +} + +// "foo•a·b•c·d" => (foo, [a b c d]) +func (p point) nameLabelValues() (name string, labelValues []string) { + records := strings.Split(string(p), recordDelimiter) + if len(records)%2 != 1 { // always name + even number of label/values + panic("even number of point records; programmer error!") + } + name, records = records[0], records[1:] + labelValues = make([]string, 0, len(records)*2) + for _, record := range records { + fields := strings.SplitN(record, fieldDelimiter, 2) + labelValues = append(labelValues, fields[0], fields[1]) + } + return name, labelValues +} diff --git a/metrics/internal/emitting/metrics.go b/metrics/internal/emitting/metrics.go new file mode 100644 index 0000000..5168c06 --- /dev/null +++ b/metrics/internal/emitting/metrics.go @@ -0,0 +1,107 @@ +package emitting + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/internal/lv" +) + +type Counter struct { + name string + lvs lv.LabelValues + sampleRate float64 + c chan Add +} + +type Add struct { + Name string + LabelValues []string + SampleRate float64 + Delta float64 +} + +func NewCounter(name string, sampleRate float64, c chan Add) *Counter { + return &Counter{ + name: name, + sampleRate: sampleRate, + c: c, + } +} + +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + name: c.name, + lvs: c.lvs.With(labelValues...), + sampleRate: c.sampleRate, + c: c.c, + } +} + +func (c *Counter) Add(delta float64) { + c.c <- Add{c.name, c.lvs, c.sampleRate, delta} +} + +type Gauge struct { + name string + lvs lv.LabelValues + c chan Set +} + +type Set struct { + Name string + LabelValues []string + Value float64 +} + +func NewGauge(name string, c chan Set) *Gauge { + return &Gauge{ + name: name, + c: c, + } +} + +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + name: g.name, + lvs: g.lvs.With(labelValues...), + c: g.c, + } +} + +func (g *Gauge) Set(value float64) { + g.c <- Set{g.name, g.lvs, value} +} + +type Histogram struct { + name string + lvs lv.LabelValues + sampleRate float64 + c chan Obv +} + +type Obv struct { + Name string + LabelValues []string + SampleRate float64 + Value float64 +} + +func NewHistogram(name string, sampleRate float64, c chan Obv) *Histogram { + return &Histogram{ + name: name, + sampleRate: sampleRate, + c: c, + } +} + +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + name: h.name, + lvs: h.lvs.With(labelValues...), + sampleRate: h.sampleRate, + c: h.c, + } +} + +func (h *Histogram) Observe(value float64) { + h.c <- Obv{h.name, h.lvs, h.sampleRate, value} +} diff --git a/metrics/internal/lv/labelvalues.go b/metrics/internal/lv/labelvalues.go new file mode 100644 index 0000000..8bb1ba0 --- /dev/null +++ b/metrics/internal/lv/labelvalues.go @@ -0,0 +1,14 @@ +package lv + +// LabelValues is a type alias that provides validation on its With method. +// Metrics may include it as a member to help them satisfy With semantics and +// save some code duplication. +type LabelValues []string + +// With validates the input, and returns a new aggregate labelValues. +func (lvs LabelValues) With(labelValues ...string) LabelValues { + if len(labelValues)%2 != 0 { + labelValues = append(labelValues, "unknown") + } + return append(lvs, labelValues...) +} diff --git a/metrics/internal/lv/labelvalues_test.go b/metrics/internal/lv/labelvalues_test.go new file mode 100644 index 0000000..5e72609 --- /dev/null +++ b/metrics/internal/lv/labelvalues_test.go @@ -0,0 +1,22 @@ +package lv + +import ( + "strings" + "testing" +) + +func TestWith(t *testing.T) { + var a LabelValues + b := a.With("a", "1") + c := a.With("b", "2", "c", "3") + + if want, have := "", strings.Join(a, ""); want != have { + t.Errorf("With appears to mutate the original LabelValues: want %q, have %q", want, have) + } + if want, have := "a1", strings.Join(b, ""); want != have { + t.Errorf("With does not appear to return the right thing: want %q, have %q", want, have) + } + if want, have := "b2c3", strings.Join(c, ""); want != have { + t.Errorf("With does not appear to return the right thing: want %q, have %q", want, have) + } +} diff --git a/metrics/internal/lv/space.go b/metrics/internal/lv/space.go new file mode 100644 index 0000000..6807347 --- /dev/null +++ b/metrics/internal/lv/space.go @@ -0,0 +1,106 @@ +package lv + +import "sync" + +// NewSpace returns an N-dimensional vector space. +func NewSpace() *Space { + return &Space{} +} + +// Space represents an N-dimensional vector space. Each name and unique label +// value pair establishes a new dimension and point within that dimension. Order +// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1]. +type Space struct { + mtx sync.RWMutex + nodes map[string]*node +} + +// Observe locates the time series identified by the name and label values in +// the vector space, and appends the value to the list of observations. +func (s *Space) Observe(name string, lvs LabelValues, value float64) { + s.nodeFor(name).observe(lvs, value) +} + +// Walk traverses the vector space and invokes fn for each non-empty time series +// which is encountered. Return false to abort the traversal. +func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { + s.mtx.RLock() + defer s.mtx.RUnlock() + for name, node := range s.nodes { + f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) } + if !node.walk(LabelValues{}, f) { + return + } + } +} + +// Reset empties the current space and returns a new Space with the old +// contents. Reset a Space to get an immutable copy suitable for walking. +func (s *Space) Reset() *Space { + s.mtx.Lock() + defer s.mtx.Unlock() + n := NewSpace() + n.nodes, s.nodes = s.nodes, n.nodes + return n +} + +func (s *Space) nodeFor(name string) *node { + s.mtx.Lock() + defer s.mtx.Unlock() + if s.nodes == nil { + s.nodes = map[string]*node{} + } + n, ok := s.nodes[name] + if !ok { + n = &node{} + s.nodes[name] = n + } + return n +} + +// node exists at a specific point in the N-dimensional vector space of all +// possible label values. The node collects observations and has child nodes +// with greater specificity. +type node struct { + mtx sync.RWMutex + observations []float64 + children map[pair]*node +} + +type pair struct{ label, value string } + +func (n *node) observe(lvs LabelValues, value float64) { + n.mtx.Lock() + defer n.mtx.Unlock() + if len(lvs) == 0 { + n.observations = append(n.observations, value) + return + } + if len(lvs) < 2 { + panic("too few LabelValues; programmer error!") + } + head, tail := pair{lvs[0], lvs[1]}, lvs[2:] + if n.children == nil { + n.children = map[pair]*node{} + } + child, ok := n.children[head] + if !ok { + child = &node{} + n.children[head] = child + } + child.observe(tail, value) +} + +func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { + n.mtx.RLock() + defer n.mtx.RUnlock() + if len(n.observations) > 0 && !fn(lvs, n.observations) { + return false + } + for p, child := range n.children { + if !child.walk(append(lvs, p.label, p.value), fn) { + return false + } + } + return true +} diff --git a/metrics/internal/lv/space_test.go b/metrics/internal/lv/space_test.go new file mode 100644 index 0000000..0ef5517 --- /dev/null +++ b/metrics/internal/lv/space_test.go @@ -0,0 +1,86 @@ +package lv + +import ( + "strings" + "testing" +) + +func TestSpaceWalkAbort(t *testing.T) { + s := NewSpace() + s.Observe("a", LabelValues{"a", "b"}, 1) + s.Observe("a", LabelValues{"c", "d"}, 2) + s.Observe("a", LabelValues{"e", "f"}, 4) + s.Observe("a", LabelValues{"g", "h"}, 8) + s.Observe("b", LabelValues{"a", "b"}, 16) + s.Observe("b", LabelValues{"c", "d"}, 32) + s.Observe("b", LabelValues{"e", "f"}, 64) + s.Observe("b", LabelValues{"g", "h"}, 128) + + var count int + s.Walk(func(name string, lvs LabelValues, obs []float64) bool { + count++ + return false + }) + if want, have := 1, count; want != have { + t.Errorf("want %d, have %d", want, have) + } +} + +func TestSpaceWalkSums(t *testing.T) { + s := NewSpace() + s.Observe("metric_one", LabelValues{}, 1) + s.Observe("metric_one", LabelValues{}, 2) + s.Observe("metric_one", LabelValues{"a", "1", "b", "2"}, 4) + s.Observe("metric_one", LabelValues{"a", "1", "b", "2"}, 8) + s.Observe("metric_one", LabelValues{}, 16) + s.Observe("metric_one", LabelValues{"a", "1", "b", "3"}, 32) + s.Observe("metric_two", LabelValues{}, 64) + s.Observe("metric_two", LabelValues{}, 128) + s.Observe("metric_two", LabelValues{"a", "1", "b", "2"}, 256) + + have := map[string]float64{} + s.Walk(func(name string, lvs LabelValues, obs []float64) bool { + //t.Logf("%s %v => %v", name, lvs, obs) + have[name+" ["+strings.Join(lvs, "")+"]"] += sum(obs) + return true + }) + + want := map[string]float64{ + "metric_one []": 1 + 2 + 16, + "metric_one [a1b2]": 4 + 8, + "metric_one [a1b3]": 32, + "metric_two []": 64 + 128, + "metric_two [a1b2]": 256, + } + for keystr, wantsum := range want { + if havesum := have[keystr]; wantsum != havesum { + t.Errorf("%q: want %.1f, have %.1f", keystr, wantsum, havesum) + } + delete(want, keystr) + delete(have, keystr) + } + for keystr, havesum := range have { + t.Errorf("%q: unexpected observations recorded: %.1f", keystr, havesum) + } +} + +func TestSpaceWalkSkipsEmptyDimensions(t *testing.T) { + s := NewSpace() + s.Observe("foo", LabelValues{"bar", "1", "baz", "2"}, 123) + + var count int + s.Walk(func(name string, lvs LabelValues, obs []float64) bool { + count++ + return true + }) + if want, have := 1, count; want != have { + t.Errorf("want %d, have %d", want, have) + } +} + +func sum(a []float64) (v float64) { + for _, f := range a { + v += f + } + return +} diff --git a/metrics/internal/ratemap/ratemap.go b/metrics/internal/ratemap/ratemap.go new file mode 100644 index 0000000..a955c12 --- /dev/null +++ b/metrics/internal/ratemap/ratemap.go @@ -0,0 +1,40 @@ +// Package ratemap implements a goroutine-safe map of string to float64. It can +// be embedded in implementations whose metrics support fixed sample rates, so +// that an additional parameter doesn't have to be tracked through the e.g. +// lv.Space object. +package ratemap + +import "sync" + +// RateMap is a simple goroutine-safe map of string to float64. +type RateMap struct { + mtx sync.RWMutex + m map[string]float64 +} + +// New returns a new RateMap. +func New() *RateMap { + return &RateMap{ + m: map[string]float64{}, + } +} + +// Set writes the given name/rate pair to the map. +// Set is safe for concurrent access by multiple goroutines. +func (m *RateMap) Set(name string, rate float64) { + m.mtx.Lock() + defer m.mtx.Unlock() + m.m[name] = rate +} + +// Get retrieves the rate for the given name, or 1.0 if none is set. +// Get is safe for concurrent access by multiple goroutines. +func (m *RateMap) Get(name string) float64 { + m.mtx.RLock() + defer m.mtx.RUnlock() + f, ok := m.m[name] + if !ok { + f = 1.0 + } + return f +} diff --git a/metrics/metrics.go b/metrics/metrics.go index f12e56f..f2eb6ea 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -1,51 +1,24 @@ package metrics -// Counter is a monotonically-increasing, unsigned, 64-bit integer used to -// capture the number of times an event has occurred. By tracking the deltas -// between measurements of a counter over intervals of time, an aggregation -// layer can derive rates, acceleration, etc. +// Counter describes a metric that accumulates values monotonically. +// An example of a counter is the number of received HTTP requests. type Counter interface { - Name() string - With(Field) Counter - Add(delta uint64) + With(labelValues ...string) Counter + Add(delta float64) } -// Gauge captures instantaneous measurements of something using signed, 64-bit -// floats. The value does not need to be monotonic. +// Gauge describes a metric that takes a specific value over time. +// An example of a gauge is the current depth of a job queue. type Gauge interface { - Name() string - With(Field) Gauge + With(labelValues ...string) Gauge Set(value float64) - Add(delta float64) - Get() float64 } -// Histogram tracks the distribution of a stream of values (e.g. the number of -// milliseconds it takes to handle requests). Implementations may choose to -// add gauges for values at meaningful quantiles. +// Histogram describes a metric that takes repeated observations of the same +// kind of thing, and produces a statistical summary of those observations, +// typically expressed as quantile buckets. An example of a histogram is HTTP +// request latencies. type Histogram interface { - Name() string - With(Field) Histogram - Observe(value int64) - Distribution() ([]Bucket, []Quantile) + With(labelValues ...string) Histogram + Observe(value float64) } - -// Field is a key/value pair associated with an observation for a specific -// metric. Fields may be ignored by implementations. -type Field struct { - Key string - Value string -} - -// Bucket is a range in a histogram which aggregates observations. -type Bucket struct { - From int64 - To int64 - Count int64 -} - -// Quantile is a pair of quantile (0..100) and its observed maximum value. -type Quantile struct { - Quantile int // 0..100 - Value int64 -} diff --git a/metrics/multi.go b/metrics/multi.go deleted file mode 100644 index 114d0c1..0000000 --- a/metrics/multi.go +++ /dev/null @@ -1,112 +0,0 @@ -package metrics - -type multiCounter struct { - name string - a []Counter -} - -// NewMultiCounter returns a wrapper around multiple Counters. -func NewMultiCounter(name string, counters ...Counter) Counter { - return &multiCounter{ - name: name, - a: counters, - } -} - -func (c multiCounter) Name() string { return c.name } - -func (c multiCounter) With(f Field) Counter { - next := &multiCounter{ - name: c.name, - a: make([]Counter, len(c.a)), - } - for i, counter := range c.a { - next.a[i] = counter.With(f) - } - return next -} - -func (c multiCounter) Add(delta uint64) { - for _, counter := range c.a { - counter.Add(delta) - } -} - -type multiGauge struct { - name string - a []Gauge -} - -func (g multiGauge) Name() string { return g.name } - -// NewMultiGauge returns a wrapper around multiple Gauges. -func NewMultiGauge(name string, gauges ...Gauge) Gauge { - return &multiGauge{ - name: name, - a: gauges, - } -} - -func (g multiGauge) With(f Field) Gauge { - next := &multiGauge{ - name: g.name, - a: make([]Gauge, len(g.a)), - } - for i, gauge := range g.a { - next.a[i] = gauge.With(f) - } - return next -} - -func (g multiGauge) Set(value float64) { - for _, gauge := range g.a { - gauge.Set(value) - } -} - -func (g multiGauge) Add(delta float64) { - for _, gauge := range g.a { - gauge.Add(delta) - } -} - -func (g multiGauge) Get() float64 { - panic("cannot call Get on a MultiGauge") -} - -type multiHistogram struct { - name string - a []Histogram -} - -// NewMultiHistogram returns a wrapper around multiple Histograms. -func NewMultiHistogram(name string, histograms ...Histogram) Histogram { - return &multiHistogram{ - name: name, - a: histograms, - } -} - -func (h multiHistogram) Name() string { return h.name } - -func (h multiHistogram) With(f Field) Histogram { - next := &multiHistogram{ - name: h.name, - a: make([]Histogram, len(h.a)), - } - for i, histogram := range h.a { - next.a[i] = histogram.With(f) - } - return next -} - -func (h multiHistogram) Observe(value int64) { - for _, histogram := range h.a { - histogram.Observe(value) - } -} - -func (h multiHistogram) Distribution() ([]Bucket, []Quantile) { - // TODO(pb): there may be a way to do this - panic("cannot call Distribution on a MultiHistogram") -} diff --git a/metrics/multi_test.go b/metrics/multi_test.go deleted file mode 100644 index 8102dac..0000000 --- a/metrics/multi_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package metrics_test - -import ( - stdexpvar "expvar" - "fmt" - "io/ioutil" - "math" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "strings" - "testing" - - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestMultiWith(t *testing.T) { - c := metrics.NewMultiCounter( - "multifoo", - expvar.NewCounter("foo"), - prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "multi_with", - Name: "bar", - Help: "Bar counter.", - }, []string{"a"}), - ) - - c.Add(1) - c.With(metrics.Field{Key: "a", Value: "1"}).Add(2) - c.Add(3) - - if want, have := strings.Join([]string{ - `# HELP test_multi_with_bar Bar counter.`, - `# TYPE test_multi_with_bar counter`, - `test_multi_with_bar{a="1"} 2`, - `test_multi_with_bar{a="unknown"} 4`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } -} - -func TestMultiCounter(t *testing.T) { - metrics.NewMultiCounter( - "multialpha", - expvar.NewCounter("alpha"), - prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "multi_counter", - Name: "beta", - Help: "Beta counter.", - }, []string{"a"}), - ).With(metrics.Field{Key: "a", Value: "b"}).Add(123) - - if want, have := "123", stdexpvar.Get("alpha").String(); want != have { - t.Errorf("expvar: want %q, have %q", want, have) - } - - if want, have := strings.Join([]string{ - `# HELP test_multi_counter_beta Beta counter.`, - `# TYPE test_multi_counter_beta counter`, - `test_multi_counter_beta{a="b"} 123`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } -} - -func TestMultiGauge(t *testing.T) { - g := metrics.NewMultiGauge( - "multidelta", - expvar.NewGauge("delta"), - prometheus.NewGauge(stdprometheus.GaugeOpts{ - Namespace: "test", - Subsystem: "multi_gauge", - Name: "kappa", - Help: "Kappa gauge.", - }, []string{"a"}), - ) - - f := metrics.Field{Key: "a", Value: "aaa"} - g.With(f).Set(34) - - if want, have := "34", stdexpvar.Get("delta").String(); want != have { - t.Errorf("expvar: want %q, have %q", want, have) - } - if want, have := strings.Join([]string{ - `# HELP test_multi_gauge_kappa Kappa gauge.`, - `# TYPE test_multi_gauge_kappa gauge`, - `test_multi_gauge_kappa{a="aaa"} 34`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } - - g.With(f).Add(-40) - - if want, have := "-6", stdexpvar.Get("delta").String(); want != have { - t.Errorf("expvar: want %q, have %q", want, have) - } - if want, have := strings.Join([]string{ - `# HELP test_multi_gauge_kappa Kappa gauge.`, - `# TYPE test_multi_gauge_kappa gauge`, - `test_multi_gauge_kappa{a="aaa"} -6`, - }, "\n"), scrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("Prometheus metric stanza not found or incorrect\n%s", have) - } -} - -func TestMultiHistogram(t *testing.T) { - quantiles := []int{50, 90, 99} - h := metrics.NewMultiHistogram( - "multiomicron", - expvar.NewHistogram("omicron", 0, 100, 3, quantiles...), - prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "test", - Subsystem: "multi_histogram", - Name: "nu", - Help: "Nu histogram.", - }, []string{}), - ) - - const seed, mean, stdev int64 = 123, 50, 10 - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - assertExpvarNormalHistogram(t, "omicron", mean, stdev, quantiles) - assertPrometheusNormalHistogram(t, `test_multi_histogram_nu`, mean, stdev) -} - -func assertExpvarNormalHistogram(t *testing.T, metricName string, mean, stdev int64, quantiles []int) { - const tolerance int = 2 - for _, quantile := range quantiles { - want := normalValueAtQuantile(mean, stdev, quantile) - s := stdexpvar.Get(fmt.Sprintf("%s_p%02d", metricName, quantile)).String() - have, err := strconv.Atoi(s) - if err != nil { - t.Fatal(err) - } - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("quantile %d: want %d, have %d", quantile, want, have) - } - } -} - -func assertPrometheusNormalHistogram(t *testing.T, metricName string, mean, stdev int64) { - scrape := scrapePrometheus(t) - const tolerance int = 5 // Prometheus approximates higher quantiles badly -_-; - for quantileInt, quantileStr := range map[int]string{50: "0.5", 90: "0.9", 99: "0.99"} { - want := normalValueAtQuantile(mean, stdev, quantileInt) - have := getPrometheusQuantile(t, scrape, metricName, quantileStr) - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("%q: want %d, have %d", quantileStr, want, have) - } - } -} - -// https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function -func normalValueAtQuantile(mean, stdev int64, quantile int) int64 { - return int64(float64(mean) + float64(stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1)) -} - -// https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function -func erfinv(y float64) float64 { - if y < -1.0 || y > 1.0 { - panic("invalid input") - } - - var ( - a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} - b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} - c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} - d = [2]float64{3.543889200, 1.637067800} - ) - - const y0 = 0.7 - var x, z float64 - - if math.Abs(y) == 1.0 { - x = -y * math.Log(0.0) - } else if y < -y0 { - z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) - x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } else { - if y < y0 { - z = y * y - x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) - } else { - z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) - x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - } - - return x -} - -func scrapePrometheus(t *testing.T) string { - server := httptest.NewServer(stdprometheus.UninstrumentedHandler()) - defer server.Close() - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - return strings.TrimSpace(string(buf)) -} - -func getPrometheusQuantile(t *testing.T, scrape, name, quantileStr string) int { - re := name + `{quantile="` + quantileStr + `"} ([0-9]+)` - matches := regexp.MustCompile(re).FindAllStringSubmatch(scrape, -1) - if len(matches) < 1 { - t.Fatalf("%q: quantile %q not found in scrape (%s)", name, quantileStr, re) - } - if len(matches[0]) < 2 { - t.Fatalf("%q: quantile %q not found in scrape (%s)", name, quantileStr, re) - } - i, err := strconv.Atoi(matches[0][1]) - if err != nil { - t.Fatal(err) - } - return i -} diff --git a/metrics/print.go b/metrics/print.go deleted file mode 100644 index d3feae7..0000000 --- a/metrics/print.go +++ /dev/null @@ -1,42 +0,0 @@ -package metrics - -import ( - "fmt" - "io" - "text/tabwriter" -) - -const ( - bs = "####################################################################################################" - bsz = float64(len(bs)) -) - -// PrintDistribution writes a human-readable graph of the distribution to the -// passed writer. -func PrintDistribution(w io.Writer, h Histogram) { - buckets, quantiles := h.Distribution() - - fmt.Fprintf(w, "name: %v\n", h.Name()) - fmt.Fprintf(w, "quantiles: %v\n", quantiles) - - var total float64 - for _, bucket := range buckets { - total += float64(bucket.Count) - } - - tw := tabwriter.NewWriter(w, 0, 2, 2, ' ', 0) - fmt.Fprintf(tw, "From\tTo\tCount\tProb\tBar\n") - - axis := "|" - for _, bucket := range buckets { - if bucket.Count > 0 { - p := float64(bucket.Count) / total - fmt.Fprintf(tw, "%d\t%d\t%d\t%.4f\t%s%s\n", bucket.From, bucket.To, bucket.Count, p, axis, bs[:int(p*bsz)]) - axis = "|" - } else { - axis = ":" // show that some bars were skipped - } - } - - tw.Flush() -} diff --git a/metrics/print_test.go b/metrics/print_test.go deleted file mode 100644 index 5291784..0000000 --- a/metrics/print_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package metrics_test - -import ( - "bytes" - "testing" - - "math" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestPrintDistribution(t *testing.T) { - var ( - quantiles = []int{50, 90, 95, 99} - h = expvar.NewHistogram("test_print_distribution", 0, 100, 3, quantiles...) - seed = int64(555) - mean = int64(5) - stdev = int64(1) - ) - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) - - var buf bytes.Buffer - metrics.PrintDistribution(&buf, h) - t.Logf("\n%s\n", buf.String()) - - // Count the number of bar chart characters. - // We should have ca. 100 in any distribution with a small-enough stdev. - - var n int - for _, r := range buf.String() { - if r == '#' { - n++ - } - } - if want, have, tol := 100, n, 5; int(math.Abs(float64(want-have))) > tol { - t.Errorf("want %d, have %d (tolerance %d)", want, have, tol) - } -} diff --git a/metrics/prometheus/prometheus.go b/metrics/prometheus/prometheus.go index 3fc774d..ab161eb 100644 --- a/metrics/prometheus/prometheus.go +++ b/metrics/prometheus/prometheus.go @@ -1,202 +1,157 @@ -// Package prometheus implements a Prometheus backend for package metrics. +// Package prometheus provides Prometheus implementations for metrics. +// Individual metrics are mapped to their Prometheus counterparts, and +// (depending on the constructor used) may be automatically registered in the +// global Prometheus metrics registry. package prometheus import ( "github.com/prometheus/client_golang/prometheus" - "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/internal/lv" ) -// Prometheus has strong opinions about the dimensionality of fields. Users -// must predeclare every field key they intend to use. On every observation, -// fields with keys that haven't been predeclared will be silently dropped, -// and predeclared field keys without values will receive the value -// PrometheusLabelValueUnknown. -var PrometheusLabelValueUnknown = "unknown" - -type counter struct { - *prometheus.CounterVec - name string - Pairs map[string]string +// Counter implements Counter, via a Prometheus CounterVec. +type Counter struct { + cv *prometheus.CounterVec + lvs lv.LabelValues } -// NewCounter returns a new Counter backed by a Prometheus metric. The counter -// is automatically registered via prometheus.Register. -func NewCounter(opts prometheus.CounterOpts, fieldKeys []string) metrics.Counter { - m := prometheus.NewCounterVec(opts, fieldKeys) - prometheus.MustRegister(m) - p := map[string]string{} - for _, fieldName := range fieldKeys { - p[fieldName] = PrometheusLabelValueUnknown - } - return counter{ - CounterVec: m, - name: opts.Name, - Pairs: p, +// NewCounterFrom constructs and registers a Prometheus CounterVec, +// and returns a usable Counter object. +func NewCounterFrom(opts prometheus.CounterOpts, labelNames []string) *Counter { + cv := prometheus.NewCounterVec(opts, labelNames) + prometheus.MustRegister(cv) + return NewCounter(cv) +} + +// NewCounter wraps the CounterVec and returns a usable Counter object. +func NewCounter(cv *prometheus.CounterVec) *Counter { + return &Counter{ + cv: cv, } } -func (c counter) Name() string { return c.name } - -func (c counter) With(f metrics.Field) metrics.Counter { - return counter{ - CounterVec: c.CounterVec, - name: c.name, - Pairs: merge(c.Pairs, f), +// With implements Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + cv: c.cv, + lvs: c.lvs.With(labelValues...), } } -func (c counter) Add(delta uint64) { - c.CounterVec.With(prometheus.Labels(c.Pairs)).Add(float64(delta)) +// Add implements Counter. +func (c *Counter) Add(delta float64) { + c.cv.WithLabelValues(c.lvs...).Add(delta) } -type gauge struct { - *prometheus.GaugeVec - name string - Pairs map[string]string +// Gauge implements Gauge, via a Prometheus GaugeVec. +type Gauge struct { + gv *prometheus.GaugeVec + lvs lv.LabelValues } -// NewGauge returns a new Gauge backed by a Prometheus metric. The gauge is -// automatically registered via prometheus.Register. -func NewGauge(opts prometheus.GaugeOpts, fieldKeys []string) metrics.Gauge { - m := prometheus.NewGaugeVec(opts, fieldKeys) - prometheus.MustRegister(m) - return gauge{ - GaugeVec: m, - name: opts.Name, - Pairs: pairsFrom(fieldKeys), +// NewGaugeFrom construts and registers a Prometheus GaugeVec, +// and returns a usable Gauge object. +func NewGaugeFrom(opts prometheus.GaugeOpts, labelNames []string) *Gauge { + gv := prometheus.NewGaugeVec(opts, labelNames) + prometheus.MustRegister(gv) + return NewGauge(gv) +} + +// NewGauge wraps the GaugeVec and returns a usable Gauge object. +func NewGauge(gv *prometheus.GaugeVec) *Gauge { + return &Gauge{ + gv: gv, } } -func (g gauge) Name() string { return g.name } - -func (g gauge) With(f metrics.Field) metrics.Gauge { - return gauge{ - GaugeVec: g.GaugeVec, - name: g.name, - Pairs: merge(g.Pairs, f), +// With implements Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + gv: g.gv, + lvs: g.lvs.With(labelValues...), } } -func (g gauge) Set(value float64) { - g.GaugeVec.With(prometheus.Labels(g.Pairs)).Set(value) +// Set implements Gauge. +func (g *Gauge) Set(value float64) { + g.gv.WithLabelValues(g.lvs...).Set(value) } -func (g gauge) Add(delta float64) { - g.GaugeVec.With(prometheus.Labels(g.Pairs)).Add(delta) +// Add is supported by Prometheus GaugeVecs. +func (g *Gauge) Add(delta float64) { + g.gv.WithLabelValues(g.lvs...).Add(delta) } -func (g gauge) Get() float64 { - // TODO(pb): see https://github.com/prometheus/client_golang/issues/58 - return 0.0 +// Summary implements Histogram, via a Prometheus SummaryVec. The difference +// between a Summary and a Histogram is that Summaries don't require predefined +// quantile buckets, but cannot be statistically aggregated. +type Summary struct { + sv *prometheus.SummaryVec + lvs lv.LabelValues } -// RegisterCallbackGauge registers a Gauge with Prometheus whose value is -// determined at collect time by the passed callback function. The callback -// determines the value, and fields are ignored, so RegisterCallbackGauge -// returns nothing. -func RegisterCallbackGauge(opts prometheus.GaugeOpts, callback func() float64) { - prometheus.MustRegister(prometheus.NewGaugeFunc(opts, callback)) +// NewSummaryFrom constructs and registers a Prometheus SummaryVec, +// and returns a usable Summary object. +func NewSummaryFrom(opts prometheus.SummaryOpts, labelNames []string) *Summary { + sv := prometheus.NewSummaryVec(opts, labelNames) + prometheus.MustRegister(sv) + return NewSummary(sv) } -type summary struct { - *prometheus.SummaryVec - name string - Pairs map[string]string -} - -// NewSummary returns a new Histogram backed by a Prometheus summary. The -// histogram is automatically registered via prometheus.Register. -// -// For more information on Prometheus histograms and summaries, refer to -// http://prometheus.io/docs/practices/histograms. -func NewSummary(opts prometheus.SummaryOpts, fieldKeys []string) metrics.Histogram { - m := prometheus.NewSummaryVec(opts, fieldKeys) - prometheus.MustRegister(m) - return summary{ - SummaryVec: m, - name: opts.Name, - Pairs: pairsFrom(fieldKeys), +// NewSummary wraps the SummaryVec and returns a usable Summary object. +func NewSummary(sv *prometheus.SummaryVec) *Summary { + return &Summary{ + sv: sv, } } -func (s summary) Name() string { return s.name } - -func (s summary) With(f metrics.Field) metrics.Histogram { - return summary{ - SummaryVec: s.SummaryVec, - name: s.name, - Pairs: merge(s.Pairs, f), +// With implements Histogram. +func (s *Summary) With(labelValues ...string) metrics.Histogram { + return &Summary{ + sv: s.sv, + lvs: s.lvs.With(labelValues...), } } -func (s summary) Observe(value int64) { - s.SummaryVec.With(prometheus.Labels(s.Pairs)).Observe(float64(value)) +// Observe implements Histogram. +func (s *Summary) Observe(value float64) { + s.sv.WithLabelValues(s.lvs...).Observe(value) } -func (s summary) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): see https://github.com/prometheus/client_golang/issues/58 - return []metrics.Bucket{}, []metrics.Quantile{} +// Histogram implements Histogram via a Prometheus HistogramVec. The difference +// between a Histogram and a Summary is that Histograms require predefined +// quantile buckets, and can be statistically aggregated. +type Histogram struct { + hv *prometheus.HistogramVec + lvs lv.LabelValues } -type histogram struct { - *prometheus.HistogramVec - name string - Pairs map[string]string +// NewHistogramFrom constructs and registers a Prometheus HistogramVec, +// and returns a usable Histogram object. +func NewHistogramFrom(opts prometheus.HistogramOpts, labelNames []string) *Histogram { + hv := prometheus.NewHistogramVec(opts, labelNames) + prometheus.MustRegister(hv) + return NewHistogram(hv) } -// NewHistogram returns a new Histogram backed by a Prometheus Histogram. The -// histogram is automatically registered via prometheus.Register. -// -// For more information on Prometheus histograms and summaries, refer to -// http://prometheus.io/docs/practices/histograms. -func NewHistogram(opts prometheus.HistogramOpts, fieldKeys []string) metrics.Histogram { - m := prometheus.NewHistogramVec(opts, fieldKeys) - prometheus.MustRegister(m) - return histogram{ - HistogramVec: m, - name: opts.Name, - Pairs: pairsFrom(fieldKeys), +// NewHistogram wraps the HistogramVec and returns a usable Histogram object. +func NewHistogram(hv *prometheus.HistogramVec) *Histogram { + return &Histogram{ + hv: hv, } } -func (h histogram) Name() string { return h.name } - -func (h histogram) With(f metrics.Field) metrics.Histogram { - return histogram{ - HistogramVec: h.HistogramVec, - name: h.name, - Pairs: merge(h.Pairs, f), +// With implements Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + hv: h.hv, + lvs: h.lvs.With(labelValues...), } } -func (h histogram) Observe(value int64) { - h.HistogramVec.With(prometheus.Labels(h.Pairs)).Observe(float64(value)) +// Observe implements Histogram. +func (h *Histogram) Observe(value float64) { + h.hv.WithLabelValues(h.lvs...).Observe(value) } - -func (h histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): see https://github.com/prometheus/client_golang/issues/58 - return []metrics.Bucket{}, []metrics.Quantile{} -} - -func pairsFrom(fieldKeys []string) map[string]string { - p := map[string]string{} - for _, fieldName := range fieldKeys { - p[fieldName] = PrometheusLabelValueUnknown - } - return p -} - -func merge(orig map[string]string, f metrics.Field) map[string]string { - if _, ok := orig[f.Key]; !ok { - return orig - } - - newPairs := make(map[string]string, len(orig)) - for k, v := range orig { - newPairs[k] = v - } - - newPairs[f.Key] = f.Value - return newPairs -} diff --git a/metrics/prometheus/prometheus_test.go b/metrics/prometheus/prometheus_test.go index 5115f17..d8d24d3 100644 --- a/metrics/prometheus/prometheus_test.go +++ b/metrics/prometheus/prometheus_test.go @@ -1,130 +1,191 @@ -package prometheus_test +package prometheus import ( + "io/ioutil" + "math" + "math/rand" + "net/http" + "net/http/httptest" + "regexp" + "strconv" "strings" "testing" + "github.com/go-kit/kit/metrics3/teststat" stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics/teststat" ) -func TestPrometheusLabelBehavior(t *testing.T) { - c := prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "prometheus_label_behavior", - Name: "foobar", - Help: "Abc def.", - }, []string{"used_key", "unused_key"}) - c.With(metrics.Field{Key: "used_key", Value: "declared"}).Add(1) - c.Add(1) +func TestCounter(t *testing.T) { + s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + defer s.Close() - if want, have := strings.Join([]string{ - `# HELP test_prometheus_label_behavior_foobar Abc def.`, - `# TYPE test_prometheus_label_behavior_foobar counter`, - `test_prometheus_label_behavior_foobar{unused_key="unknown",used_key="declared"} 1`, - `test_prometheus_label_behavior_foobar{unused_key="unknown",used_key="unknown"} 1`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) + scrape := func() string { + resp, _ := http.Get(s.URL) + buf, _ := ioutil.ReadAll(resp.Body) + return string(buf) + } + + namespace, subsystem, name := "ns", "ss", "foo" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) + + counter := NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is the help string.", + }, []string{}) + + value := func() float64 { + matches := re.FindStringSubmatch(scrape()) + f, _ := strconv.ParseFloat(matches[1], 64) + return f + } + + if err := teststat.TestCounter(counter, value); err != nil { + t.Fatal(err) } } -func TestPrometheusCounter(t *testing.T) { - c := prometheus.NewCounter(stdprometheus.CounterOpts{ - Namespace: "test", - Subsystem: "prometheus_counter", - Name: "foobar", - Help: "Lorem ipsum.", +func TestGauge(t *testing.T) { + s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + defer s.Close() + + scrape := func() string { + resp, _ := http.Get(s.URL) + buf, _ := ioutil.ReadAll(resp.Body) + return string(buf) + } + + namespace, subsystem, name := "aaa", "bbb", "ccc" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) + + gauge := NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is a different help string.", }, []string{}) - c.Add(1) - c.Add(2) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_counter_foobar Lorem ipsum.`, - `# TYPE test_prometheus_counter_foobar counter`, - `test_prometheus_counter_foobar 3`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) + + value := func() float64 { + matches := re.FindStringSubmatch(scrape()) + f, _ := strconv.ParseFloat(matches[1], 64) + return f } - c.Add(3) - c.Add(4) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_counter_foobar Lorem ipsum.`, - `# TYPE test_prometheus_counter_foobar counter`, - `test_prometheus_counter_foobar 10`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) + + if err := teststat.TestGauge(gauge, value); err != nil { + t.Fatal(err) } } -func TestPrometheusGauge(t *testing.T) { - c := prometheus.NewGauge(stdprometheus.GaugeOpts{ - Namespace: "test", - Subsystem: "prometheus_gauge", - Name: "foobar", - Help: "Dolor sit.", +func TestSummary(t *testing.T) { + s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + defer s.Close() + + scrape := func() string { + resp, _ := http.Get(s.URL) + buf, _ := ioutil.ReadAll(resp.Body) + return string(buf) + } + + namespace, subsystem, name := "test", "prometheus", "summary" + re50 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{quantile="0.5"} ([0-9\.]+)`) + re90 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{quantile="0.9"} ([0-9\.]+)`) + re99 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{quantile="0.99"} ([0-9\.]+)`) + + summary := NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is the help string for the summary.", }, []string{}) - c.Set(42) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_gauge_foobar Dolor sit.`, - `# TYPE test_prometheus_gauge_foobar gauge`, - `test_prometheus_gauge_foobar 42`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) + + quantiles := func() (float64, float64, float64, float64) { + buf := scrape() + match50 := re50.FindStringSubmatch(buf) + p50, _ := strconv.ParseFloat(match50[1], 64) + match90 := re90.FindStringSubmatch(buf) + p90, _ := strconv.ParseFloat(match90[1], 64) + match99 := re99.FindStringSubmatch(buf) + p99, _ := strconv.ParseFloat(match99[1], 64) + p95 := p90 + ((p99 - p90) / 2) // Prometheus, y u no p95??? :< #yolo + return p50, p90, p95, p99 } - c.Add(-43) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_gauge_foobar Dolor sit.`, - `# TYPE test_prometheus_gauge_foobar gauge`, - `test_prometheus_gauge_foobar -1`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) + + if err := teststat.TestHistogram(summary, quantiles, 0.01); err != nil { + t.Fatal(err) } } -func TestPrometheusCallbackGauge(t *testing.T) { - value := 123.456 - cb := func() float64 { return value } - prometheus.RegisterCallbackGauge(stdprometheus.GaugeOpts{ - Namespace: "test", - Subsystem: "prometheus_gauge", - Name: "bazbaz", - Help: "Help string.", - }, cb) - if want, have := strings.Join([]string{ - `# HELP test_prometheus_gauge_bazbaz Help string.`, - `# TYPE test_prometheus_gauge_bazbaz gauge`, - `test_prometheus_gauge_bazbaz 123.456`, - }, "\n"), teststat.ScrapePrometheus(t); !strings.Contains(have, want) { - t.Errorf("metric stanza not found or incorrect\n%s", have) +func TestHistogram(t *testing.T) { + // Prometheus reports histograms as a count of observations that fell into + // each predefined bucket, with the bucket value representing a global upper + // limit. That is, the count monotonically increases over the buckets. This + // requires a different strategy to test. + + s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) + defer s.Close() + + scrape := func() string { + resp, _ := http.Get(s.URL) + buf, _ := ioutil.ReadAll(resp.Body) + return string(buf) + } + + namespace, subsystem, name := "test", "prometheus", "histogram" + re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `_bucket{le="([0-9]+|\+Inf)"} ([0-9\.]+)`) + + numStdev := 3 + bucketMin := (teststat.Mean - (numStdev * teststat.Stdev)) + bucketMax := (teststat.Mean + (numStdev * teststat.Stdev)) + if bucketMin < 0 { + bucketMin = 0 + } + bucketCount := 10 + bucketDelta := (bucketMax - bucketMin) / bucketCount + buckets := []float64{} + for i := bucketMin; i <= bucketMax; i += bucketDelta { + buckets = append(buckets, float64(i)) + } + + histogram := NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: name, + Help: "This is the help string for the histogram.", + Buckets: buckets, + }, []string{}) + + // Can't TestHistogram, because Prometheus Histograms don't dynamically + // compute quantiles. Instead, they fill up buckets. So, let's populate the + // histogram kind of manually. + teststat.PopulateNormalHistogram(histogram, rand.Int()) + + // Then, we use ExpectedObservationsLessThan to validate. + for _, line := range strings.Split(scrape(), "\n") { + match := re.FindStringSubmatch(line) + if match == nil { + continue + } + + bucket, _ := strconv.ParseInt(match[1], 10, 64) + have, _ := strconv.ParseInt(match[2], 10, 64) + + want := teststat.ExpectedObservationsLessThan(bucket) + if match[1] == "+Inf" { + want = int64(teststat.Count) // special case + } + + // Unfortunately, we observe experimentally that Prometheus is quite + // imprecise at the extremes. I'm setting a very high tolerance for now. + // It would be great to dig in and figure out whether that's a problem + // with my Expected calculation, or in Prometheus. + tolerance := 0.25 + if delta := math.Abs(float64(want) - float64(have)); (delta / float64(want)) > tolerance { + t.Errorf("Bucket %d: want %d, have %d (%.1f%%)", bucket, want, have, (100.0 * delta / float64(want))) + } } } -func TestPrometheusSummary(t *testing.T) { - h := prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "test", - Subsystem: "prometheus_summary_histogram", - Name: "foobar", - Help: "Qwerty asdf.", - }, []string{}) - - const mean, stdev int64 = 50, 10 - teststat.PopulateNormalHistogram(t, h, 34, mean, stdev) - teststat.AssertPrometheusNormalSummary(t, "test_prometheus_summary_histogram_foobar", mean, stdev) +func TestWith(t *testing.T) { + t.Skip("TODO") } - -func TestPrometheusHistogram(t *testing.T) { - buckets := []float64{20, 40, 60, 80, 100} - h := prometheus.NewHistogram(stdprometheus.HistogramOpts{ - Namespace: "test", - Subsystem: "prometheus_histogram_histogram", - Name: "quux", - Help: "Qwerty asdf.", - Buckets: buckets, - }, []string{}) - - const mean, stdev int64 = 50, 10 - teststat.PopulateNormalHistogram(t, h, 34, mean, stdev) - teststat.AssertPrometheusBucketedHistogram(t, "test_prometheus_histogram_histogram_quux_bucket", mean, stdev, buckets) -} diff --git a/metrics/provider/circonus.go b/metrics/provider/circonus.go new file mode 100644 index 0000000..54dd3fc --- /dev/null +++ b/metrics/provider/circonus.go @@ -0,0 +1,36 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/circonus" +) + +type circonusProvider struct { + c *circonus.Circonus +} + +// NewCirconusProvider takes the given Circonnus object and returns a Provider +// that produces Circonus metrics. +func NewCirconusProvider(c *circonus.Circonus) Provider { + return &circonusProvider{ + c: c, + } +} + +// NewCounter implements Provider. +func (p *circonusProvider) NewCounter(name string) metrics.Counter { + return p.c.NewCounter(name) +} + +// NewGauge implements Provider. +func (p *circonusProvider) NewGauge(name string) metrics.Gauge { + return p.c.NewGauge(name) +} + +// NewHistogram implements Provider. The buckets parameter is ignored. +func (p *circonusProvider) NewHistogram(name string, _ int) metrics.Histogram { + return p.c.NewHistogram(name) +} + +// Stop implements Provider, but is a no-op. +func (p *circonusProvider) Stop() {} diff --git a/metrics/provider/discard.go b/metrics/provider/discard.go new file mode 100644 index 0000000..82de282 --- /dev/null +++ b/metrics/provider/discard.go @@ -0,0 +1,24 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/discard" +) + +type discardProvider struct{} + +// NewDiscardProvider returns a provider that produces no-op metrics via the +// discarding backend. +func NewDiscardProvider() Provider { return discardProvider{} } + +// NewCounter implements Provider. +func (discardProvider) NewCounter(string) metrics.Counter { return discard.NewCounter() } + +// NewGauge implements Provider. +func (discardProvider) NewGauge(string) metrics.Gauge { return discard.NewGauge() } + +// NewHistogram implements Provider. +func (discardProvider) NewHistogram(string, int) metrics.Histogram { return discard.NewHistogram() } + +// Stop implements Provider. +func (discardProvider) Stop() {} diff --git a/metrics/provider/dogstatsd.go b/metrics/provider/dogstatsd.go new file mode 100644 index 0000000..076abf9 --- /dev/null +++ b/metrics/provider/dogstatsd.go @@ -0,0 +1,43 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/dogstatsd" +) + +type dogstatsdProvider struct { + d *dogstatsd.Dogstatsd + stop func() +} + +// NewDogstatsdProvider wraps the given Dogstatsd object and stop func and +// returns a Provider that produces Dogstatsd metrics. A typical stop function +// would be ticker.Stop from the ticker passed to the SendLoop helper method. +func NewDogstatsdProvider(d *dogstatsd.Dogstatsd, stop func()) Provider { + return &dogstatsdProvider{ + d: d, + stop: stop, + } +} + +// NewCounter implements Provider, returning a new Dogstatsd Counter with a +// sample rate of 1.0. +func (p *dogstatsdProvider) NewCounter(name string) metrics.Counter { + return p.d.NewCounter(name, 1.0) +} + +// NewGauge implements Provider. +func (p *dogstatsdProvider) NewGauge(name string) metrics.Gauge { + return p.d.NewGauge(name) +} + +// NewHistogram implements Provider, returning a new Dogstatsd Histogram (note: +// not a Timing) with a sample rate of 1.0. The buckets argument is ignored. +func (p *dogstatsdProvider) NewHistogram(name string, _ int) metrics.Histogram { + return p.d.NewHistogram(name, 1.0) +} + +// Stop implements Provider, invoking the stop function passed at construction. +func (p *dogstatsdProvider) Stop() { + p.stop() +} diff --git a/metrics/provider/expvar.go b/metrics/provider/expvar.go new file mode 100644 index 0000000..57a90cf --- /dev/null +++ b/metrics/provider/expvar.go @@ -0,0 +1,31 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/expvar" +) + +type expvarProvider struct{} + +// NewExpvarProvider returns a Provider that produces expvar metrics. +func NewExpvarProvider() Provider { + return expvarProvider{} +} + +// NewCounter implements Provider. +func (p expvarProvider) NewCounter(name string) metrics.Counter { + return expvar.NewCounter(name) +} + +// NewGauge implements Provider. +func (p expvarProvider) NewGauge(name string) metrics.Gauge { + return expvar.NewGauge(name) +} + +// NewHistogram implements Provider. +func (p expvarProvider) NewHistogram(name string, buckets int) metrics.Histogram { + return expvar.NewHistogram(name, buckets) +} + +// Stop implements Provider, but is a no-op. +func (p expvarProvider) Stop() {} diff --git a/metrics/provider/graphite.go b/metrics/provider/graphite.go new file mode 100644 index 0000000..68696f2 --- /dev/null +++ b/metrics/provider/graphite.go @@ -0,0 +1,41 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/graphite" +) + +type graphiteProvider struct { + g *graphite.Graphite + stop func() +} + +// NewGraphiteProvider wraps the given Graphite object and stop func and returns +// a Provider that produces Graphite metrics. A typical stop function would be +// ticker.Stop from the ticker passed to the SendLoop helper method. +func NewGraphiteProvider(g *graphite.Graphite, stop func()) Provider { + return &graphiteProvider{ + g: g, + stop: stop, + } +} + +// NewCounter implements Provider. +func (p *graphiteProvider) NewCounter(name string) metrics.Counter { + return p.g.NewCounter(name) +} + +// NewGauge implements Provider. +func (p *graphiteProvider) NewGauge(name string) metrics.Gauge { + return p.g.NewGauge(name) +} + +// NewHistogram implements Provider. +func (p *graphiteProvider) NewHistogram(name string, buckets int) metrics.Histogram { + return p.g.NewHistogram(name, buckets) +} + +// Stop implements Provider, invoking the stop function passed at construction. +func (p *graphiteProvider) Stop() { + p.stop() +} diff --git a/metrics/provider/influx.go b/metrics/provider/influx.go new file mode 100644 index 0000000..d57f4ee --- /dev/null +++ b/metrics/provider/influx.go @@ -0,0 +1,40 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/influx" +) + +type influxProvider struct { + in *influx.Influx + stop func() +} + +// NewInfluxProvider takes the given Influx object and stop func, and returns +// a Provider that produces Influx metrics. +func NewInfluxProvider(in *influx.Influx, stop func()) Provider { + return &influxProvider{ + in: in, + stop: stop, + } +} + +// NewCounter implements Provider. Per-metric tags are not supported. +func (p *influxProvider) NewCounter(name string) metrics.Counter { + return p.in.NewCounter(name) +} + +// NewGauge implements Provider. Per-metric tags are not supported. +func (p *influxProvider) NewGauge(name string) metrics.Gauge { + return p.in.NewGauge(name) +} + +// NewHistogram implements Provider. Per-metric tags are not supported. +func (p *influxProvider) NewHistogram(name string, buckets int) metrics.Histogram { + return p.in.NewHistogram(name) +} + +// Stop implements Provider, invoking the stop function passed at construction. +func (p *influxProvider) Stop() { + p.stop() +} diff --git a/metrics/provider/prometheus.go b/metrics/provider/prometheus.go new file mode 100644 index 0000000..e3c1189 --- /dev/null +++ b/metrics/provider/prometheus.go @@ -0,0 +1,63 @@ +package provider + +import ( + stdprometheus "github.com/prometheus/client_golang/prometheus" + + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/prometheus" +) + +type prometheusProvider struct { + namespace string + subsystem string +} + +// NewPrometheusProvider returns a Provider that produces Prometheus metrics. +// Namespace and subsystem are applied to all produced metrics. +func NewPrometheusProvider(namespace, subsystem string) Provider { + return &prometheusProvider{ + namespace: namespace, + subsystem: subsystem, + } +} + +// NewCounter implements Provider via prometheus.NewCounterFrom, i.e. the +// counter is registered. The metric's namespace and subsystem are taken from +// the Provider. Help is set to the name of the metric, and no const label names +// are set. +func (p *prometheusProvider) NewCounter(name string) metrics.Counter { + return prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: p.namespace, + Subsystem: p.subsystem, + Name: name, + Help: name, + }, []string{}) +} + +// NewGauge implements Provider via prometheus.NewGaugeFrom, i.e. the gauge is +// registered. The metric's namespace and subsystem are taken from the Provider. +// Help is set to the name of the metric, and no const label names are set. +func (p *prometheusProvider) NewGauge(name string) metrics.Gauge { + return prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: p.namespace, + Subsystem: p.subsystem, + Name: name, + Help: name, + }, []string{}) +} + +// NewGauge implements Provider via prometheus.NewSummaryFrom, i.e. the summary +// is registered. The metric's namespace and subsystem are taken from the +// Provider. Help is set to the name of the metric, and no const label names are +// set. Buckets are ignored. +func (p *prometheusProvider) NewHistogram(name string, _ int) metrics.Histogram { + return prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ + Namespace: p.namespace, + Subsystem: p.subsystem, + Name: name, + Help: name, + }, []string{}) +} + +// Stop implements Provider, but is a no-op. +func (p *prometheusProvider) Stop() {} diff --git a/metrics/provider/provider.go b/metrics/provider/provider.go new file mode 100644 index 0000000..174f237 --- /dev/null +++ b/metrics/provider/provider.go @@ -0,0 +1,42 @@ +// Package provider provides a factory-like abstraction for metrics backends. +// This package is provided specifically for the needs of the NY Times framework +// Gizmo. Most normal Go kit users shouldn't need to use it. +// +// Normally, if your microservice needs to support different metrics backends, +// you can simply do different construction based on a flag. For example, +// +// var latency metrics.Histogram +// var requests metrics.Counter +// switch *metricsBackend { +// case "prometheus": +// latency = prometheus.NewSummaryVec(...) +// requests = prometheus.NewCounterVec(...) +// case "statsd": +// s := statsd.New(...) +// t := time.NewTicker(5*time.Second) +// go s.SendLoop(t.C, "tcp", "statsd.local:8125") +// latency = s.NewHistogram(...) +// requests = s.NewCounter(...) +// default: +// log.Fatal("unsupported metrics backend %q", *metricsBackend) +// } +// +package provider + +import ( + "github.com/go-kit/kit/metrics3" +) + +// Provider abstracts over constructors and lifecycle management functions for +// each supported metrics backend. It should only be used by those who need to +// swap out implementations dynamically. +// +// This is primarily useful for intermediating frameworks, and is likely +// unnecessary for most Go kit services. See the package-level doc comment for +// more typical usage instructions. +type Provider interface { + NewCounter(name string) metrics.Counter + NewGauge(name string) metrics.Gauge + NewHistogram(name string, buckets int) metrics.Histogram + Stop() +} diff --git a/metrics/provider/providers.go b/metrics/provider/providers.go deleted file mode 100644 index 34cdf33..0000000 --- a/metrics/provider/providers.go +++ /dev/null @@ -1,259 +0,0 @@ -package provider - -import ( - "errors" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/dogstatsd" - kitexp "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/graphite" - kitprom "github.com/go-kit/kit/metrics/prometheus" - "github.com/go-kit/kit/metrics/statsd" -) - -// Provider represents a union set of constructors and lifecycle management -// functions for each supported metrics backend. It should be used by those who -// need to easily swap out implementations, e.g. dynamically, or at a single -// point in an intermediating framework. -type Provider interface { - NewCounter(name, help string) metrics.Counter - NewHistogram(name, help string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) - NewGauge(name, help string) metrics.Gauge - Stop() -} - -// NewGraphiteProvider will return a Provider implementation that is a simple -// wrapper around a graphite.Emitter. All metric names will be prefixed with the -// given value and data will be emitted once every interval. If no network value -// is given, it will default to "udp". -func NewGraphiteProvider(network, address, prefix string, interval time.Duration, logger log.Logger) (Provider, error) { - if network == "" { - network = "udp" - } - if address == "" { - return nil, errors.New("address is required") - } - return graphiteProvider{ - e: graphite.NewEmitter(network, address, prefix, interval, logger), - }, nil -} - -type graphiteProvider struct { - e *graphite.Emitter -} - -var _ Provider = graphiteProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p graphiteProvider) NewCounter(name, _ string) metrics.Counter { - return p.e.NewCounter(name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p graphiteProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return p.e.NewHistogram(name, min, max, sigfigs, quantiles...) -} - -// NewGauge implements Provider. Help is ignored. -func (p graphiteProvider) NewGauge(name, _ string) metrics.Gauge { - return p.e.NewGauge(name) -} - -// Stop implements Provider. -func (p graphiteProvider) Stop() { - p.e.Stop() -} - -// NewStatsdProvider will return a Provider implementation that is a simple -// wrapper around a statsd.Emitter. All metric names will be prefixed with the -// given value and data will be emitted once every interval or when the buffer -// has reached its max size. If no network value is given, it will default to -// "udp". -func NewStatsdProvider(network, address, prefix string, interval time.Duration, logger log.Logger) (Provider, error) { - if network == "" { - network = "udp" - } - if address == "" { - return nil, errors.New("address is required") - } - return statsdProvider{ - e: statsd.NewEmitter(network, address, prefix, interval, logger), - }, nil -} - -type statsdProvider struct { - e *statsd.Emitter -} - -var _ Provider = statsdProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p statsdProvider) NewCounter(name, _ string) metrics.Counter { - return p.e.NewCounter(name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p statsdProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return p.e.NewHistogram(name), nil -} - -// NewGauge implements Provider. Help is ignored. -func (p statsdProvider) NewGauge(name, _ string) metrics.Gauge { - return p.e.NewGauge(name) -} - -// Stop will call the underlying statsd.Emitter's Stop method. -func (p statsdProvider) Stop() { - p.e.Stop() -} - -// NewDogStatsdProvider will return a Provider implementation that is a simple -// wrapper around a dogstatsd.Emitter. All metric names will be prefixed with -// the given value and data will be emitted once every interval or when the -// buffer has reached its max size. If no network value is given, it will -// default to "udp". -func NewDogStatsdProvider(network, address, prefix string, interval time.Duration, logger log.Logger) (Provider, error) { - if network == "" { - network = "udp" - } - if address == "" { - return nil, errors.New("address is required") - } - return dogstatsdProvider{ - e: dogstatsd.NewEmitter(network, address, prefix, interval, logger), - }, nil -} - -type dogstatsdProvider struct { - e *dogstatsd.Emitter -} - -var _ Provider = dogstatsdProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p dogstatsdProvider) NewCounter(name, _ string) metrics.Counter { - return p.e.NewCounter(name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p dogstatsdProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return p.e.NewHistogram(name), nil -} - -// NewGauge implements Provider. Help is ignored. -func (p dogstatsdProvider) NewGauge(name, _ string) metrics.Gauge { - return p.e.NewGauge(name) -} - -// Stop will call the underlying statsd.Emitter's Stop method. -func (p dogstatsdProvider) Stop() { - p.e.Stop() -} - -// NewExpvarProvider is a very thin wrapper over the expvar package. -// If a prefix is provided, it will prefix all metric names. -func NewExpvarProvider(prefix string) Provider { - return expvarProvider{prefix: prefix} -} - -type expvarProvider struct { - prefix string -} - -var _ Provider = expvarProvider{} - -// NewCounter implements Provider. Help is ignored. -func (p expvarProvider) NewCounter(name, _ string) metrics.Counter { - return kitexp.NewCounter(p.prefix + name) -} - -// NewHistogram implements Provider. Help is ignored. -func (p expvarProvider) NewHistogram(name, _ string, min, max int64, sigfigs int, quantiles ...int) (metrics.Histogram, error) { - return kitexp.NewHistogram(p.prefix+name, min, max, sigfigs, quantiles...), nil -} - -// NewGauge implements Provider. Help is ignored. -func (p expvarProvider) NewGauge(name, _ string) metrics.Gauge { - return kitexp.NewGauge(p.prefix + name) -} - -// Stop is a no-op. -func (expvarProvider) Stop() {} - -type prometheusProvider struct { - namespace string - subsystem string -} - -var _ Provider = prometheusProvider{} - -// NewPrometheusProvider returns a Prometheus provider that uses the provided -// namespace and subsystem for all metrics. -func NewPrometheusProvider(namespace, subsystem string) Provider { - return prometheusProvider{ - namespace: namespace, - subsystem: subsystem, - } -} - -// NewCounter implements Provider. -func (p prometheusProvider) NewCounter(name, help string) metrics.Counter { - return kitprom.NewCounter(prometheus.CounterOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: help, - }, nil) -} - -// NewHistogram ignores all parameters except name and help. -func (p prometheusProvider) NewHistogram(name, help string, _, _ int64, _ int, _ ...int) (metrics.Histogram, error) { - return kitprom.NewHistogram(prometheus.HistogramOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: help, - }, nil), nil -} - -// NewGauge implements Provider. -func (p prometheusProvider) NewGauge(name, help string) metrics.Gauge { - return kitprom.NewGauge(prometheus.GaugeOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: help, - }, nil) -} - -// Stop is a no-op. -func (prometheusProvider) Stop() {} - -var _ Provider = discardProvider{} - -// NewDiscardProvider returns a provider that will discard all metrics. -func NewDiscardProvider() Provider { - return discardProvider{} -} - -type discardProvider struct{} - -func (p discardProvider) NewCounter(name string, _ string) metrics.Counter { - return discard.NewCounter(name) -} - -func (p discardProvider) NewHistogram(name string, _ string, _ int64, _ int64, _ int, _ ...int) (metrics.Histogram, error) { - return discard.NewHistogram(name), nil -} - -func (p discardProvider) NewGauge(name string, _ string) metrics.Gauge { - return discard.NewGauge(name) -} - -// Stop is a no-op. -func (p discardProvider) Stop() {} diff --git a/metrics/provider/providers_test.go b/metrics/provider/providers_test.go deleted file mode 100644 index 15d3e4d..0000000 --- a/metrics/provider/providers_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package provider - -import ( - "testing" - "time" - - "github.com/go-kit/kit/log" -) - -func TestGraphite(t *testing.T) { - p, err := NewGraphiteProvider("network", "address", "prefix", time.Second, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - testProvider(t, "Graphite", p) -} - -func TestStatsd(t *testing.T) { - p, err := NewStatsdProvider("network", "address", "prefix", time.Second, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - testProvider(t, "Statsd", p) -} - -func TestDogStatsd(t *testing.T) { - p, err := NewDogStatsdProvider("network", "address", "prefix", time.Second, log.NewNopLogger()) - if err != nil { - t.Fatal(err) - } - testProvider(t, "DogStatsd", p) -} - -func TestExpvar(t *testing.T) { - testProvider(t, "Expvar", NewExpvarProvider("prefix")) -} - -func TestPrometheus(t *testing.T) { - testProvider(t, "Prometheus", NewPrometheusProvider("namespace", "subsystem")) -} - -func testProvider(t *testing.T, what string, p Provider) { - c := p.NewCounter("counter", "Counter help.") - c.Add(1) - - h, err := p.NewHistogram("histogram", "Histogram help.", 1, 100, 3, 50, 95, 99) - if err != nil { - t.Errorf("%s: NewHistogram: %v", what, err) - } - h.Observe(99) - - g := p.NewGauge("gauge", "Gauge help.") - g.Set(123) - - p.Stop() -} diff --git a/metrics/provider/statsd.go b/metrics/provider/statsd.go new file mode 100644 index 0000000..2dc1b8c --- /dev/null +++ b/metrics/provider/statsd.go @@ -0,0 +1,43 @@ +package provider + +import ( + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/statsd" +) + +type statsdProvider struct { + s *statsd.Statsd + stop func() +} + +// NewStatsdProvider wraps the given Statsd object and stop func and returns a +// Provider that produces Statsd metrics. A typical stop function would be +// ticker.Stop from the ticker passed to the SendLoop helper method. +func NewStatsdProvider(s *statsd.Statsd, stop func()) Provider { + return &statsdProvider{ + s: s, + stop: stop, + } +} + +// NewCounter implements Provider. +func (p *statsdProvider) NewCounter(name string) metrics.Counter { + return p.s.NewCounter(name, 1.0) +} + +// NewGauge implements Provider. +func (p *statsdProvider) NewGauge(name string) metrics.Gauge { + return p.s.NewGauge(name) +} + +// NewHistogram implements Provider, returning a StatsD Timing that accepts +// observations in milliseconds. The sample rate is fixed at 1.0. The bucket +// parameter is ignored. +func (p *statsdProvider) NewHistogram(name string, _ int) metrics.Histogram { + return p.s.NewTiming(name, 1.0) +} + +// Stop implements Provider, invoking the stop function passed at construction. +func (p *statsdProvider) Stop() { + p.stop() +} diff --git a/metrics/scaled_histogram.go b/metrics/scaled_histogram.go deleted file mode 100644 index d63bb97..0000000 --- a/metrics/scaled_histogram.go +++ /dev/null @@ -1,23 +0,0 @@ -package metrics - -type scaledHistogram struct { - Histogram - scale int64 -} - -// NewScaledHistogram returns a Histogram whose observed values are downscaled -// (divided) by scale. -func NewScaledHistogram(h Histogram, scale int64) Histogram { - return scaledHistogram{h, scale} -} - -func (h scaledHistogram) With(f Field) Histogram { - return scaledHistogram{ - Histogram: h.Histogram.With(f), - scale: h.scale, - } -} - -func (h scaledHistogram) Observe(value int64) { - h.Histogram.Observe(value / h.scale) -} diff --git a/metrics/scaled_histogram_test.go b/metrics/scaled_histogram_test.go deleted file mode 100644 index 138f572..0000000 --- a/metrics/scaled_histogram_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package metrics_test - -import ( - "testing" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" - "github.com/go-kit/kit/metrics/teststat" -) - -func TestScaledHistogram(t *testing.T) { - var ( - quantiles = []int{50, 90, 99} - scale = int64(10) - metricName = "test_scaled_histogram" - ) - - var h metrics.Histogram - h = expvar.NewHistogram(metricName, 0, 1000, 3, quantiles...) - h = metrics.NewScaledHistogram(h, scale) - h = h.With(metrics.Field{Key: "a", Value: "b"}) - - const seed, mean, stdev = 333, 500, 100 // input values - teststat.PopulateNormalHistogram(t, h, seed, mean, stdev) // will be scaled down - assertExpvarNormalHistogram(t, metricName, mean/scale, stdev/scale, quantiles) -} diff --git a/metrics/statsd/emitter.go b/metrics/statsd/emitter.go deleted file mode 100644 index be3bb66..0000000 --- a/metrics/statsd/emitter.go +++ /dev/null @@ -1,159 +0,0 @@ -package statsd - -import ( - "bytes" - "fmt" - "net" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/util/conn" -) - -// Emitter is a struct to manage connections and orchestrate the emission of -// metrics to a Statsd process. -type Emitter struct { - prefix string - keyVals chan keyVal - mgr *conn.Manager - logger log.Logger - quitc chan chan struct{} -} - -type keyVal struct { - key string - val string -} - -func stringToKeyVal(key string, keyVals chan keyVal) chan string { - vals := make(chan string) - go func() { - for val := range vals { - keyVals <- keyVal{key: key, val: val} - } - }() - return vals -} - -// NewEmitter will return an Emitter that will prefix all metrics names with the -// given prefix. Once started, it will attempt to create a connection with the -// given network and address via `net.Dial` and periodically post metrics to the -// connection in the statsd protocol. -func NewEmitter(network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - return NewEmitterDial(net.Dial, network, address, metricsPrefix, flushInterval, logger) -} - -// NewEmitterDial is the same as NewEmitter, but allows you to specify your own -// Dialer function. This is primarily useful for tests. -func NewEmitterDial(dialer conn.Dialer, network, address string, metricsPrefix string, flushInterval time.Duration, logger log.Logger) *Emitter { - e := &Emitter{ - prefix: metricsPrefix, - mgr: conn.NewManager(dialer, network, address, time.After, logger), - logger: logger, - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(flushInterval) - return e -} - -// NewCounter returns a Counter that emits observations in the statsd protocol -// via the Emitter's connection manager. Observations are buffered for the -// report interval or until the buffer exceeds a max packet size, whichever -// comes first. Fields are ignored. -func (e *Emitter) NewCounter(key string) metrics.Counter { - key = e.prefix + key - return &counter{ - key: key, - c: stringToKeyVal(key, e.keyVals), - } -} - -// NewHistogram returns a Histogram that emits observations in the statsd -// protocol via the Emitter's connection manager. Observations are buffered for -// the reporting interval or until the buffer exceeds a max packet size, -// whichever comes first. Fields are ignored. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(histogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(histogram, time.Millisecond) -// -// TODO: support for sampling. -func (e *Emitter) NewHistogram(key string) metrics.Histogram { - key = e.prefix + key - return &histogram{ - key: key, - h: stringToKeyVal(key, e.keyVals), - } -} - -// NewGauge returns a Gauge that emits values in the statsd protocol via the -// the Emitter's connection manager. Values are buffered for the report -// interval or until the buffer exceeds a max packet size, whichever comes -// first. Fields are ignored. -// -// TODO: support for sampling -func (e *Emitter) NewGauge(key string) metrics.Gauge { - key = e.prefix + key - return &gauge{ - key: key, - g: stringToKeyVal(key, e.keyVals), - } -} - -func (e *Emitter) loop(d time.Duration) { - ticker := time.NewTicker(d) - defer ticker.Stop() - buf := &bytes.Buffer{} - for { - select { - case kv := <-e.keyVals: - fmt.Fprintf(buf, "%s:%s\n", kv.key, kv.val) - if buf.Len() > maxBufferSize { - e.Flush(buf) - } - - case <-ticker.C: - e.Flush(buf) - - case q := <-e.quitc: - e.Flush(buf) - close(q) - return - } - } -} - -// Stop will flush the current metrics and close the active connection. Calling -// stop more than once is a programmer error. -func (e *Emitter) Stop() { - q := make(chan struct{}) - e.quitc <- q - <-q -} - -// Flush will write the given buffer to a connection provided by the Emitter's -// connection manager. -func (e *Emitter) Flush(buf *bytes.Buffer) { - conn := e.mgr.Take() - if conn == nil { - e.logger.Log("during", "flush", "err", "connection unavailable") - return - } - - _, err := conn.Write(buf.Bytes()) - if err != nil { - e.logger.Log("during", "flush", "err", err) - } - buf.Reset() - - e.mgr.Put(err) -} diff --git a/metrics/statsd/statsd.go b/metrics/statsd/statsd.go index 4a87b7a..8a35c6d 100644 --- a/metrics/statsd/statsd.go +++ b/metrics/statsd/statsd.go @@ -1,215 +1,232 @@ -// Package statsd implements a statsd backend for package metrics. +// Package statsd provides a StatsD backend for package metrics. StatsD has no +// concept of arbitrary key-value tagging, so label values are not supported, +// and With is a no-op on all metrics. // -// The current implementation ignores fields. In the future, it would be good -// to have an implementation that accepted a set of predeclared field names at -// construction time, and used field values to produce delimiter-separated -// bucket (key) names. That is, -// -// c := NewFieldedCounter(..., "path", "status") -// c.Add(1) // "myprefix.unknown.unknown:1|c\n" -// c2 := c.With("path", "foo").With("status": "200") -// c2.Add(1) // "myprefix.foo.200:1|c\n" -// +// This package batches observations and emits them on some schedule to the +// remote server. This is useful even if you connect to your StatsD server over +// UDP. Emitting one network packet per observation can quickly overwhelm even +// the fastest internal network. package statsd import ( - "bytes" "fmt" "io" - "log" - "math" "time" - "sync/atomic" - - "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics3" + "github.com/go-kit/kit/metrics3/internal/lv" + "github.com/go-kit/kit/metrics3/internal/ratemap" + "github.com/go-kit/kit/util/conn" ) -// statsd metrics take considerable influence from -// https://github.com/streadway/handy package statsd. - -const maxBufferSize = 1400 // bytes - -type counter struct { - key string - c chan string -} - -// NewCounter returns a Counter that emits observations in the statsd protocol -// to the passed writer. Observations are buffered for the report interval or -// until the buffer exceeds a max packet size, whichever comes first. Fields -// are ignored. +// Statsd receives metrics observations and forwards them to a StatsD server. +// Create a Statsd object, use it to create metrics, and pass those metrics as +// dependencies to the components that will use them. // -// TODO: support for sampling. -func NewCounter(w io.Writer, key string, reportInterval time.Duration) metrics.Counter { - return NewCounterTick(w, key, time.Tick(reportInterval)) -} - -// NewCounterTick is the same as NewCounter, but allows the user to pass in a -// ticker channel instead of invoking time.Tick. -func NewCounterTick(w io.Writer, key string, reportTicker <-chan time.Time) metrics.Counter { - c := &counter{ - key: key, - c: make(chan string), - } - go fwd(w, key, reportTicker, c.c) +// All metrics are buffered until WriteTo is called. Counters and gauges are +// aggregated into a single observation per timeseries per write. Timings are +// buffered but not aggregated. +// +// To regularly report metrics to an io.Writer, use the WriteLoop helper method. +// To send to a StatsD server, use the SendLoop helper method. +type Statsd struct { + prefix string + rates *ratemap.RateMap + + // The observations are collected in an N-dimensional vector space, even + // though they only take advantage of a single dimension (name). This is an + // implementation detail born purely from convenience. It would be more + // accurate to collect them in a map[string][]float64, but we already have + // this nice data structure and helper methods. + counters *lv.Space + gauges *lv.Space + timings *lv.Space + + logger log.Logger +} + +// New returns a Statsd object that may be used to create metrics. Prefix is +// applied to all created metrics. Callers must ensure that regular calls to +// WriteTo are performed, either manually or with one of the helper methods. +func New(prefix string, logger log.Logger) *Statsd { + return &Statsd{ + prefix: prefix, + rates: ratemap.New(), + counters: lv.NewSpace(), + gauges: lv.NewSpace(), + timings: lv.NewSpace(), + logger: logger, + } +} + +// NewCounter returns a counter, sending observations to this Statsd object. +func (s *Statsd) NewCounter(name string, sampleRate float64) *Counter { + s.rates.Set(s.prefix+name, sampleRate) + return &Counter{ + name: s.prefix + name, + obs: s.counters.Observe, + } +} + +// NewGauge returns a gauge, sending observations to this Statsd object. +func (s *Statsd) NewGauge(name string) *Gauge { + return &Gauge{ + name: s.prefix + name, + obs: s.gauges.Observe, + } +} + +// NewTiming returns a histogram whose observations are interpreted as +// millisecond durations, and are forwarded to this Statsd object. +func (s *Statsd) NewTiming(name string, sampleRate float64) *Timing { + s.rates.Set(s.prefix+name, sampleRate) + return &Timing{ + name: s.prefix + name, + obs: s.timings.Observe, + } +} + +// WriteLoop is a helper method that invokes WriteTo to the passed writer every +// time the passed channel fires. This method blocks until the channel is +// closed, so clients probably want to run it in its own goroutine. For typical +// usage, create a time.Ticker and pass its C channel to this method. +func (s *Statsd) WriteLoop(c <-chan time.Time, w io.Writer) { + for range c { + if _, err := s.WriteTo(w); err != nil { + s.logger.Log("during", "WriteTo", "err", err) + } + } +} + +// SendLoop is a helper method that wraps WriteLoop, passing a managed +// connection to the network and address. Like WriteLoop, this method blocks +// until the channel is closed, so clients probably want to start it in its own +// goroutine. For typical usage, create a time.Ticker and pass its C channel to +// this method. +func (s *Statsd) SendLoop(c <-chan time.Time, network, address string) { + s.WriteLoop(c, conn.NewDefaultManager(network, address, s.logger)) +} + +// WriteTo flushes the buffered content of the metrics to the writer, in +// StatsD format. WriteTo abides best-effort semantics, so observations are +// lost if there is a problem with the write. Clients should be sure to call +// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods. +func (s *Statsd) WriteTo(w io.Writer) (count int64, err error) { + var n int + + s.counters.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool { + n, err = fmt.Fprintf(w, "%s:%f|c%s\n", name, sum(values), sampling(s.rates.Get(name))) + if err != nil { + return false + } + count += int64(n) + return true + }) + if err != nil { + return count, err + } + + s.gauges.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool { + n, err = fmt.Fprintf(w, "%s:%f|g\n", name, last(values)) + if err != nil { + return false + } + count += int64(n) + return true + }) + if err != nil { + return count, err + } + + s.timings.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool { + sampleRate := s.rates.Get(name) + for _, value := range values { + n, err = fmt.Fprintf(w, "%s:%f|ms%s\n", name, value, sampling(sampleRate)) + if err != nil { + return false + } + count += int64(n) + } + return true + }) + if err != nil { + return count, err + } + + return count, err +} + +func sum(a []float64) float64 { + var v float64 + for _, f := range a { + v += f + } + return v +} + +func last(a []float64) float64 { + return a[len(a)-1] +} + +func sampling(r float64) string { + var sv string + if r < 1.0 { + sv = fmt.Sprintf("|@%f", r) + } + return sv +} + +type observeFunc func(name string, lvs lv.LabelValues, value float64) + +// Counter is a StatsD counter. Observations are forwarded to a Statsd object, +// and aggregated (summed) per timeseries. +type Counter struct { + name string + obs observeFunc +} + +// With is a no-op. +func (c *Counter) With(...string) metrics.Counter { return c } -func (c *counter) Name() string { return c.key } - -func (c *counter) With(metrics.Field) metrics.Counter { return c } - -func (c *counter) Add(delta uint64) { c.c <- fmt.Sprintf("%d|c", delta) } - -type gauge struct { - key string - lastValue uint64 // math.Float64frombits - g chan string -} - -// NewGauge returns a Gauge that emits values in the statsd protocol to the -// passed writer. Values are buffered for the report interval or until the -// buffer exceeds a max packet size, whichever comes first. Fields are -// ignored. -// -// TODO: support for sampling. -func NewGauge(w io.Writer, key string, reportInterval time.Duration) metrics.Gauge { - return NewGaugeTick(w, key, time.Tick(reportInterval)) -} - -// NewGaugeTick is the same as NewGauge, but allows the user to pass in a ticker -// channel instead of invoking time.Tick. -func NewGaugeTick(w io.Writer, key string, reportTicker <-chan time.Time) metrics.Gauge { - g := &gauge{ - key: key, - g: make(chan string), - } - go fwd(w, key, reportTicker, g.g) +// Add implements metrics.Counter. +func (c *Counter) Add(delta float64) { + c.obs(c.name, lv.LabelValues{}, delta) +} + +// Gauge is a StatsD gauge. Observations are forwarded to a Statsd object, and +// aggregated (the last observation selected) per timeseries. +type Gauge struct { + name string + obs observeFunc +} + +// With is a no-op. +func (g *Gauge) With(...string) metrics.Gauge { return g } -func (g *gauge) Name() string { return g.key } - -func (g *gauge) With(metrics.Field) metrics.Gauge { return g } - -func (g *gauge) Add(delta float64) { - // https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges - sign := "+" - if delta < 0 { - sign, delta = "-", -delta - } - g.g <- fmt.Sprintf("%s%f|g", sign, delta) -} - -func (g *gauge) Set(value float64) { - atomic.StoreUint64(&g.lastValue, math.Float64bits(value)) - g.g <- fmt.Sprintf("%f|g", value) -} - -func (g *gauge) Get() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.lastValue)) -} - -// NewCallbackGauge emits values in the statsd protocol to the passed writer. -// It collects values every scrape interval from the callback. Values are -// buffered for the report interval or until the buffer exceeds a max packet -// size, whichever comes first. The report and scrape intervals may be the -// same. The callback determines the value, and fields are ignored, so -// NewCallbackGauge returns nothing. -func NewCallbackGauge(w io.Writer, key string, reportInterval, scrapeInterval time.Duration, callback func() float64) { - NewCallbackGaugeTick(w, key, time.Tick(reportInterval), time.Tick(scrapeInterval), callback) -} - -// NewCallbackGaugeTick is the same as NewCallbackGauge, but allows the user to -// pass in ticker channels instead of durations to control report and scrape -// intervals. -func NewCallbackGaugeTick(w io.Writer, key string, reportTicker, scrapeTicker <-chan time.Time, callback func() float64) { - go fwd(w, key, reportTicker, emitEvery(scrapeTicker, callback)) -} - -func emitEvery(emitTicker <-chan time.Time, callback func() float64) <-chan string { - c := make(chan string) - go func() { - for range emitTicker { - c <- fmt.Sprintf("%f|g", callback()) - } - }() - return c -} - -type histogram struct { - key string - h chan string -} - -// NewHistogram returns a Histogram that emits observations in the statsd -// protocol to the passed writer. Observations are buffered for the reporting -// interval or until the buffer exceeds a max packet size, whichever comes -// first. Fields are ignored. -// -// NewHistogram is mapped to a statsd Timing, so observations should represent -// milliseconds. If you observe in units of nanoseconds, you can make the -// translation with a ScaledHistogram: -// -// NewScaledHistogram(statsdHistogram, time.Millisecond) -// -// You can also enforce the constraint in a typesafe way with a millisecond -// TimeHistogram: -// -// NewTimeHistogram(statsdHistogram, time.Millisecond) -// -// TODO: support for sampling. -func NewHistogram(w io.Writer, key string, reportInterval time.Duration) metrics.Histogram { - return NewHistogramTick(w, key, time.Tick(reportInterval)) -} - -// NewHistogramTick is the same as NewHistogram, but allows the user to pass a -// ticker channel instead of invoking time.Tick. -func NewHistogramTick(w io.Writer, key string, reportTicker <-chan time.Time) metrics.Histogram { - h := &histogram{ - key: key, - h: make(chan string), - } - go fwd(w, key, reportTicker, h.h) - return h -} - -func (h *histogram) Name() string { return h.key } - -func (h *histogram) With(metrics.Field) metrics.Histogram { return h } - -func (h *histogram) Observe(value int64) { - h.h <- fmt.Sprintf("%d|ms", value) -} - -func (h *histogram) Distribution() ([]metrics.Bucket, []metrics.Quantile) { - // TODO(pb): no way to do this without introducing e.g. codahale/hdrhistogram - return []metrics.Bucket{}, []metrics.Quantile{} -} - -func fwd(w io.Writer, key string, reportTicker <-chan time.Time, c <-chan string) { - buf := &bytes.Buffer{} - for { - select { - case s := <-c: - fmt.Fprintf(buf, "%s:%s\n", key, s) - if buf.Len() > maxBufferSize { - flush(w, buf) - } - - case <-reportTicker: - flush(w, buf) - } - } -} - -func flush(w io.Writer, buf *bytes.Buffer) { - if buf.Len() <= 0 { - return - } - if _, err := w.Write(buf.Bytes()); err != nil { - log.Printf("error: could not write to statsd: %v", err) - } - buf.Reset() -} +// Set implements metrics.Gauge. +func (g *Gauge) Set(value float64) { + g.obs(g.name, lv.LabelValues{}, value) +} + +// Timing is a StatsD timing, or metrics.Histogram. Observations are +// forwarded to a Statsd object, and collected (but not aggregated) per +// timeseries. +type Timing struct { + name string + obs observeFunc +} + +// With is a no-op. +func (t *Timing) With(...string) metrics.Histogram { + return t +} + +// Observe implements metrics.Histogram. Value is interpreted as milliseconds. +func (t *Timing) Observe(value float64) { + t.obs(t.name, lv.LabelValues{}, value) +} diff --git a/metrics/statsd/statsd_test.go b/metrics/statsd/statsd_test.go index 516520d..a09eccd 100644 --- a/metrics/statsd/statsd_test.go +++ b/metrics/statsd/statsd_test.go @@ -1,259 +1,66 @@ package statsd import ( - "bytes" - "fmt" - "net" - "strings" - "sync" "testing" - "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/util/conn" + "github.com/go-kit/kit/metrics3/teststat" ) -func TestEmitterCounter(t *testing.T) { - e, buf := testEmitter() - - c := e.NewCounter("test_statsd_counter") - c.Add(1) - c.Add(2) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_counter:1|c\nprefix.test_statsd_counter:2|c\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) +func TestCounter(t *testing.T) { + prefix, name := "abc.", "def" + label, value := "label", "value" // ignored + regex := `^` + prefix + name + `:([0-9\.]+)\|c$` + s := New(prefix, log.NewNopLogger()) + counter := s.NewCounter(name, 1.0).With(label, value) + valuef := teststat.SumLines(s, regex) + if err := teststat.TestCounter(counter, valuef); err != nil { + t.Fatal(err) } } -func TestEmitterGauge(t *testing.T) { - e, buf := testEmitter() +func TestCounterSampled(t *testing.T) { + // This will involve multiplying the observed sum by the inverse of the + // sample rate and checking against the expected value within some + // tolerance. + t.Skip("TODO") +} - g := e.NewGauge("test_statsd_gauge") - - delta := 1.0 - g.Add(delta) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := fmt.Sprintf("prefix.test_statsd_gauge:+%f|g\n", delta) - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) +func TestGauge(t *testing.T) { + prefix, name := "ghi.", "jkl" + label, value := "xyz", "abc" // ignored + regex := `^` + prefix + name + `:([0-9\.]+)\|g$` + s := New(prefix, log.NewNopLogger()) + gauge := s.NewGauge(name).With(label, value) + valuef := teststat.LastLine(s, regex) + if err := teststat.TestGauge(gauge, valuef); err != nil { + t.Fatal(err) } } -func TestEmitterHistogram(t *testing.T) { - e, buf := testEmitter() - h := e.NewHistogram("test_statsd_histogram") +// StatsD timings just emit all observations. So, we collect them into a generic +// histogram, and run the statistics test on that. - h.Observe(123) - - // give time for things to emit - time.Sleep(time.Millisecond * 250) - // force a flush and stop - e.Stop() - - want := "prefix.test_statsd_histogram:123|ms\n" - have := buf.String() - if want != have { - t.Errorf("want %q, have %q", want, have) +func TestTiming(t *testing.T) { + prefix, name := "statsd.", "timing_test" + label, value := "abc", "def" // ignored + regex := `^` + prefix + name + `:([0-9\.]+)\|ms$` + s := New(prefix, log.NewNopLogger()) + timing := s.NewTiming(name, 1.0).With(label, value) + quantiles := teststat.Quantiles(s, regex, 50) // no |@0.X + if err := teststat.TestHistogram(timing, quantiles, 0.01); err != nil { + t.Fatal(err) } } -func TestCounter(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - c := NewCounterTick(buf, "test_statsd_counter", reportc) - - c.Add(1) - c.Add(2) - - want, have := "test_statsd_counter:1|c\ntest_statsd_counter:2|c\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - g := NewGaugeTick(buf, "test_statsd_gauge", reportc) - - delta := 1.0 - g.Add(delta) - - want, have := fmt.Sprintf("test_statsd_gauge:+%f|g\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - delta = -2.0 - g.Add(delta) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g\n", delta), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) - - buf.Reset() - value := 3.0 - g.Set(value) - - want, have = fmt.Sprintf("test_statsd_gauge:%f|g\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestCallbackGauge(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc, scrapec := make(chan time.Time), make(chan time.Time) - value := 55.55 - cb := func() float64 { return value } - NewCallbackGaugeTick(buf, "test_statsd_callback_gauge", reportc, scrapec, cb) - - scrapec <- time.Now() - reportc <- time.Now() - - // Travis is annoying - by(t, time.Second, func() bool { - return buf.String() != "" - }, func() { - reportc <- time.Now() - }, "buffer never got write+flush") - - want, have := fmt.Sprintf("test_statsd_callback_gauge:%f|g\n", value), "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return strings.HasPrefix(have, want) // HasPrefix because we might get multiple writes - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func TestHistogram(t *testing.T) { - buf := &syncbuf{buf: &bytes.Buffer{}} - reportc := make(chan time.Time) - h := NewHistogramTick(buf, "test_statsd_histogram", reportc) - - h.Observe(123) - - want, have := "test_statsd_histogram:123|ms\n", "" - by(t, 100*time.Millisecond, func() bool { - have = buf.String() - return want == have - }, func() { - reportc <- time.Now() - }, fmt.Sprintf("want %q, have %q", want, have)) -} - -func by(t *testing.T, d time.Duration, check func() bool, execute func(), msg string) { - deadline := time.Now().Add(d) - for !check() { - if time.Now().After(deadline) { - t.Fatal(msg) - } - execute() +func TestTimingSampled(t *testing.T) { + prefix, name := "statsd.", "sampled_timing_test" + label, value := "foo", "bar" // ignored + regex := `^` + prefix + name + `:([0-9\.]+)\|ms\|@0\.01[0]*$` + s := New(prefix, log.NewNopLogger()) + timing := s.NewTiming(name, 0.01).With(label, value) + quantiles := teststat.Quantiles(s, regex, 50) + if err := teststat.TestHistogram(timing, quantiles, 0.02); err != nil { + t.Fatal(err) } } - -type syncbuf struct { - mtx sync.Mutex - buf *bytes.Buffer -} - -func (s *syncbuf) Write(p []byte) (int, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.Write(p) -} - -func (s *syncbuf) String() string { - s.mtx.Lock() - defer s.mtx.Unlock() - return s.buf.String() -} - -func (s *syncbuf) Reset() { - s.mtx.Lock() - defer s.mtx.Unlock() - s.buf.Reset() -} - -func testEmitter() (*Emitter, *syncbuf) { - buf := &syncbuf{buf: &bytes.Buffer{}} - e := &Emitter{ - prefix: "prefix.", - mgr: conn.NewManager(mockDialer(buf), "", "", time.After, log.NewNopLogger()), - logger: log.NewNopLogger(), - keyVals: make(chan keyVal), - quitc: make(chan chan struct{}), - } - go e.loop(time.Millisecond * 20) - return e, buf -} - -func mockDialer(buf *syncbuf) conn.Dialer { - return func(net, addr string) (net.Conn, error) { - return &mockConn{buf}, nil - } -} - -type mockConn struct { - buf *syncbuf -} - -func (c *mockConn) Read(b []byte) (n int, err error) { - panic("not implemented") -} - -func (c *mockConn) Write(b []byte) (n int, err error) { - return c.buf.Write(b) -} - -func (c *mockConn) Close() error { - panic("not implemented") -} - -func (c *mockConn) LocalAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) RemoteAddr() net.Addr { - panic("not implemented") -} - -func (c *mockConn) SetDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetReadDeadline(t time.Time) error { - panic("not implemented") -} - -func (c *mockConn) SetWriteDeadline(t time.Time) error { - panic("not implemented") -} diff --git a/metrics/teststat/buffers.go b/metrics/teststat/buffers.go new file mode 100644 index 0000000..6021780 --- /dev/null +++ b/metrics/teststat/buffers.go @@ -0,0 +1,65 @@ +package teststat + +import ( + "bufio" + "bytes" + "io" + "regexp" + "strconv" + + "github.com/go-kit/kit/metrics3/generic" +) + +// SumLines expects a regex whose first capture group can be parsed as a +// float64. It will dump the WriterTo and parse each line, expecting to find a +// match. It returns the sum of all captured floats. +func SumLines(w io.WriterTo, regex string) func() float64 { + return func() float64 { + sum, _ := stats(w, regex, nil) + return sum + } +} + +// LastLine expects a regex whose first capture group can be parsed as a +// float64. It will dump the WriterTo and parse each line, expecting to find a +// match. It returns the final captured float. +func LastLine(w io.WriterTo, regex string) func() float64 { + return func() float64 { + _, final := stats(w, regex, nil) + return final + } +} + +// Quantiles expects a regex whose first capture group can be parsed as a +// float64. It will dump the WriterTo and parse each line, expecting to find a +// match. It observes all captured floats into a generic.Histogram with the +// given number of buckets, and returns the 50th, 90th, 95th, and 99th quantiles +// from that histogram. +func Quantiles(w io.WriterTo, regex string, buckets int) func() (float64, float64, float64, float64) { + return func() (float64, float64, float64, float64) { + h := generic.NewHistogram("quantile-test", buckets) + stats(w, regex, h) + return h.Quantile(0.50), h.Quantile(0.90), h.Quantile(0.95), h.Quantile(0.99) + } +} + +func stats(w io.WriterTo, regex string, h *generic.Histogram) (sum, final float64) { + re := regexp.MustCompile(regex) + buf := &bytes.Buffer{} + w.WriteTo(buf) + //fmt.Fprintf(os.Stderr, "%s\n", buf.String()) + s := bufio.NewScanner(buf) + for s.Scan() { + match := re.FindStringSubmatch(s.Text()) + f, err := strconv.ParseFloat(match[1], 64) + if err != nil { + panic(err) + } + sum += f + final = f + if h != nil { + h.Observe(f) + } + } + return sum, final +} diff --git a/metrics/teststat/circonus.go b/metrics/teststat/circonus.go deleted file mode 100644 index c070d37..0000000 --- a/metrics/teststat/circonus.go +++ /dev/null @@ -1,55 +0,0 @@ -package teststat - -import ( - "math" - "strconv" - "strings" - "testing" - - "github.com/codahale/hdrhistogram" -) - -// AssertCirconusNormalHistogram ensures the Circonus Histogram data captured in -// the result slice abides a normal distribution. -func AssertCirconusNormalHistogram(t *testing.T, mean, stdev, min, max int64, result []string) { - if len(result) <= 0 { - t.Fatal("no results") - } - - // Circonus just dumps the raw counts. We need to do our own statistical analysis. - h := hdrhistogram.New(min, max, 3) - - for _, s := range result { - // "H[1.23e04]=123" - toks := strings.Split(s, "=") - if len(toks) != 2 { - t.Fatalf("bad H value: %q", s) - } - - var bucket string - bucket = toks[0] - bucket = bucket[2 : len(bucket)-1] // "H[1.23e04]" -> "1.23e04" - f, err := strconv.ParseFloat(bucket, 64) - if err != nil { - t.Fatalf("error parsing H value: %q: %v", s, err) - } - - count, err := strconv.ParseFloat(toks[1], 64) - if err != nil { - t.Fatalf("error parsing H count: %q: %v", s, err) - } - - h.RecordValues(int64(f), int64(count)) - } - - // Apparently Circonus buckets observations by dropping a sigfig, so we have - // very coarse tolerance. - var tolerance int64 = 30 - for _, quantile := range []int{50, 90, 99} { - want := normalValueAtQuantile(mean, stdev, quantile) - have := h.ValueAtQuantile(float64(quantile)) - if int64(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("quantile %d: want %d, have %d", quantile, want, have) - } - } -} diff --git a/metrics/teststat/common.go b/metrics/teststat/common.go deleted file mode 100644 index 9f2d1af..0000000 --- a/metrics/teststat/common.go +++ /dev/null @@ -1,73 +0,0 @@ -// Package teststat contains helper functions for statistical testing of -// metrics implementations. -package teststat - -import ( - "math" - "math/rand" - "testing" - - "github.com/go-kit/kit/metrics" -) - -const population = 1234 - -// PopulateNormalHistogram populates the Histogram with a normal distribution -// of observations. -func PopulateNormalHistogram(t *testing.T, h metrics.Histogram, seed int64, mean, stdev int64) { - r := rand.New(rand.NewSource(seed)) - for i := 0; i < population; i++ { - sample := int64(r.NormFloat64()*float64(stdev) + float64(mean)) - if sample < 0 { - sample = 0 - } - h.Observe(sample) - } -} - -// https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function -func normalValueAtQuantile(mean, stdev int64, quantile int) int64 { - return int64(float64(mean) + float64(stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1)) -} - -// https://code.google.com/p/gostat/source/browse/stat/normal.go -func observationsLessThan(mean, stdev int64, x float64, total int) int { - cdf := ((1.0 / 2.0) * (1 + math.Erf((x-float64(mean))/(float64(stdev)*math.Sqrt2)))) - return int(cdf * float64(total)) -} - -// https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function -func erfinv(y float64) float64 { - if y < -1.0 || y > 1.0 { - panic("invalid input") - } - - var ( - a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} - b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} - c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} - d = [2]float64{3.543889200, 1.637067800} - ) - - const y0 = 0.7 - var x, z float64 - - if math.Abs(y) == 1.0 { - x = -y * math.Log(0.0) - } else if y < -y0 { - z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) - x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } else { - if y < y0 { - z = y * y - x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) - } else { - z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) - x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - } - - return x -} diff --git a/metrics/teststat/expvar.go b/metrics/teststat/expvar.go deleted file mode 100644 index 35215b0..0000000 --- a/metrics/teststat/expvar.go +++ /dev/null @@ -1,26 +0,0 @@ -package teststat - -import ( - "expvar" - "fmt" - "math" - "strconv" - "testing" -) - -// AssertExpvarNormalHistogram ensures the expvar Histogram referenced by -// metricName abides a normal distribution. -func AssertExpvarNormalHistogram(t *testing.T, metricName string, mean, stdev int64, quantiles []int) { - const tolerance int = 2 - for _, quantile := range quantiles { - want := normalValueAtQuantile(mean, stdev, quantile) - s := expvar.Get(fmt.Sprintf("%s_p%02d", metricName, quantile)).String() - have, err := strconv.Atoi(s) - if err != nil { - t.Fatal(err) - } - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("quantile %d: want %d, have %d", quantile, want, have) - } - } -} diff --git a/metrics/teststat/graphite.go b/metrics/teststat/graphite.go deleted file mode 100644 index 6b52e71..0000000 --- a/metrics/teststat/graphite.go +++ /dev/null @@ -1,63 +0,0 @@ -package teststat - -import ( - "fmt" - "math" - "regexp" - "strconv" - "testing" -) - -// AssertGraphiteNormalHistogram ensures the expvar Histogram referenced by -// metricName abides a normal distribution. -func AssertGraphiteNormalHistogram(t *testing.T, prefix, metricName string, mean, stdev int64, quantiles []int, gPayload string) { - // check for hdr histo data - wants := map[string]int64{"count": 1234, "min": 15, "max": 83} - for key, want := range wants { - re := regexp.MustCompile(fmt.Sprintf("%s%s.%s (\\d*)", prefix, metricName, key)) - res := re.FindAllStringSubmatch(gPayload, 1) - if res == nil { - t.Error("did not find metrics log for", key, "in \n", gPayload) - continue - } - - if len(res[0]) == 1 { - t.Fatalf("%q: bad regex, please check the test scenario", key) - } - - have, err := strconv.ParseInt(res[0][1], 10, 64) - if err != nil { - t.Fatal(err) - } - - if want != have { - t.Errorf("key %s: want %d, have %d", key, want, have) - } - } - - const tolerance int = 2 - wants = map[string]int64{".std-dev": stdev, ".mean": mean} - for _, quantile := range quantiles { - wants[fmt.Sprintf("_p%02d", quantile)] = normalValueAtQuantile(mean, stdev, quantile) - } - // check for quantile gauges - for key, want := range wants { - re := regexp.MustCompile(fmt.Sprintf("%s%s%s (\\d*\\.\\d*)", prefix, metricName, key)) - res := re.FindAllStringSubmatch(gPayload, 1) - if res == nil { - t.Errorf("did not find metrics log for %s", key) - continue - } - - if len(res[0]) == 1 { - t.Fatalf("%q: bad regex found, please check the test scenario", key) - } - have, err := strconv.ParseFloat(res[0][1], 64) - if err != nil { - t.Fatal(err) - } - if int(math.Abs(float64(want)-have)) > tolerance { - t.Errorf("key %s: want %.2f, have %.2f", key, want, have) - } - } -} diff --git a/metrics/teststat/populate.go b/metrics/teststat/populate.go new file mode 100644 index 0000000..64be756 --- /dev/null +++ b/metrics/teststat/populate.go @@ -0,0 +1,72 @@ +package teststat + +import ( + "math" + "math/rand" + + "github.com/go-kit/kit/metrics3" +) + +// PopulateNormalHistogram makes a series of normal random observations into the +// histogram. The number of observations is determined by Count. The randomness +// is determined by Mean, Stdev, and the seed parameter. +// +// This is a low-level function, exported only for metrics that don't perform +// dynamic quantile computation, like a Prometheus Histogram (c.f. Summary). In +// most cases, you don't need to use this function, and can use TestHistogram +// instead. +func PopulateNormalHistogram(h metrics.Histogram, seed int) { + r := rand.New(rand.NewSource(int64(seed))) + for i := 0; i < Count; i++ { + sample := r.NormFloat64()*float64(Stdev) + float64(Mean) + if sample < 0 { + sample = 0 + } + h.Observe(sample) + } +} + +func normalQuantiles() (p50, p90, p95, p99 float64) { + return nvq(50), nvq(90), nvq(95), nvq(99) +} + +func nvq(quantile int) float64 { + // https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function + return float64(Mean) + float64(Stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1) +} + +func erfinv(y float64) float64 { + // https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function + if y < -1.0 || y > 1.0 { + panic("invalid input") + } + + var ( + a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} + b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} + c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} + d = [2]float64{3.543889200, 1.637067800} + ) + + const y0 = 0.7 + var x, z float64 + + if math.Abs(y) == 1.0 { + x = -y * math.Log(0.0) + } else if y < -y0 { + z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) + x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) + } else { + if y < y0 { + z = y * y + x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) + } else { + z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) + x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) + } + x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) + x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) + } + + return x +} diff --git a/metrics/teststat/prometheus.go b/metrics/teststat/prometheus.go deleted file mode 100644 index d3cae89..0000000 --- a/metrics/teststat/prometheus.go +++ /dev/null @@ -1,93 +0,0 @@ -package teststat - -import ( - "io/ioutil" - "math" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/prometheus/client_golang/prometheus" -) - -// ScrapePrometheus returns the text encoding of the current state of -// Prometheus. -func ScrapePrometheus(t *testing.T) string { - server := httptest.NewServer(prometheus.UninstrumentedHandler()) - defer server.Close() - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatal(err) - } - defer resp.Body.Close() - - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - - return strings.TrimSpace(string(buf)) -} - -// AssertPrometheusNormalSummary ensures the Prometheus Summary referenced by -// name abides a normal distribution. -func AssertPrometheusNormalSummary(t *testing.T, metricName string, mean, stdev int64) { - scrape := ScrapePrometheus(t) - const tolerance int = 5 // Prometheus approximates higher quantiles badly -_-; - for quantileInt, quantileStr := range map[int]string{50: "0.5", 90: "0.9", 99: "0.99"} { - want := normalValueAtQuantile(mean, stdev, quantileInt) - have := getPrometheusQuantile(t, scrape, metricName, quantileStr) - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("%q: want %d, have %d", quantileStr, want, have) - } - } -} - -// AssertPrometheusBucketedHistogram ensures the Prometheus Histogram -// referenced by name has observations in the expected quantity and bucket. -func AssertPrometheusBucketedHistogram(t *testing.T, metricName string, mean, stdev int64, buckets []float64) { - scrape := ScrapePrometheus(t) - const tolerance int = population / 50 // pretty coarse-grained - for _, bucket := range buckets { - want := observationsLessThan(mean, stdev, bucket, population) - have := getPrometheusLessThan(t, scrape, metricName, strconv.FormatFloat(bucket, 'f', 0, 64)) - if int(math.Abs(float64(want)-float64(have))) > tolerance { - t.Errorf("%.0f: want %d, have %d", bucket, want, have) - } - } -} - -func getPrometheusQuantile(t *testing.T, scrape, name, quantileStr string) int { - matches := regexp.MustCompile(name+`{quantile="`+quantileStr+`"} ([0-9]+)`).FindAllStringSubmatch(scrape, -1) - if len(matches) < 1 { - t.Fatalf("%q: quantile %q not found in scrape", name, quantileStr) - } - if len(matches[0]) < 2 { - t.Fatalf("%q: quantile %q not found in scrape", name, quantileStr) - } - i, err := strconv.Atoi(matches[0][1]) - if err != nil { - t.Fatal(err) - } - return i -} - -func getPrometheusLessThan(t *testing.T, scrape, name, target string) int { - matches := regexp.MustCompile(name+`{le="`+target+`"} ([0-9]+)`).FindAllStringSubmatch(scrape, -1) - if len(matches) < 1 { - t.Logf(">>>\n%s\n", scrape) - t.Fatalf("%q: bucket %q not found in scrape", name, target) - } - if len(matches[0]) < 2 { - t.Fatalf("%q: bucket %q not found in scrape", name, target) - } - i, err := strconv.Atoi(matches[0][1]) - if err != nil { - t.Fatal(err) - } - return i -} diff --git a/metrics/teststat/teststat.go b/metrics/teststat/teststat.go new file mode 100644 index 0000000..b303c40 --- /dev/null +++ b/metrics/teststat/teststat.go @@ -0,0 +1,103 @@ +// Package teststat provides helpers for testing metrics backends. +package teststat + +import ( + "errors" + "fmt" + "math" + "math/rand" + "strings" + + "github.com/go-kit/kit/metrics3" +) + +// TestCounter puts some deltas through the counter, and then calls the value +// func to check that the counter has the correct final value. +func TestCounter(counter metrics.Counter, value func() float64) error { + a := rand.Perm(100) + n := rand.Intn(len(a)) + + var want float64 + for i := 0; i < n; i++ { + f := float64(a[i]) + counter.Add(f) + want += f + } + + if have := value(); want != have { + return fmt.Errorf("want %f, have %f", want, have) + } + + return nil +} + +// TestGauge puts some values through the gauge, and then calls the value func +// to check that the gauge has the correct final value. +func TestGauge(gauge metrics.Gauge, value func() float64) error { + a := rand.Perm(100) + n := rand.Intn(len(a)) + + var want float64 + for i := 0; i < n; i++ { + f := float64(a[i]) + gauge.Set(f) + want = f + } + + if have := value(); want != have { + return fmt.Errorf("want %f, have %f", want, have) + } + + return nil +} + +// TestHistogram puts some observations through the histogram, and then calls +// the quantiles func to checks that the histogram has computed the correct +// quantiles within some tolerance +func TestHistogram(histogram metrics.Histogram, quantiles func() (p50, p90, p95, p99 float64), tolerance float64) error { + PopulateNormalHistogram(histogram, rand.Int()) + + want50, want90, want95, want99 := normalQuantiles() + have50, have90, have95, have99 := quantiles() + + var errs []string + if want, have := want50, have50; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p50: want %f, have %f", want, have)) + } + if want, have := want90, have90; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p90: want %f, have %f", want, have)) + } + if want, have := want95, have95; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p95: want %f, have %f", want, have)) + } + if want, have := want99, have99; !cmp(want, have, tolerance) { + errs = append(errs, fmt.Sprintf("p99: want %f, have %f", want, have)) + } + if len(errs) > 0 { + return errors.New(strings.Join(errs, "; ")) + } + + return nil +} + +var ( + Count = 12345 + Mean = 500 + Stdev = 25 +) + +// ExpectedObservationsLessThan returns the number of observations that should +// have a value less than or equal to the given value, given a normal +// distribution of observations described by Count, Mean, and Stdev. +func ExpectedObservationsLessThan(bucket int64) int64 { + // https://code.google.com/p/gostat/source/browse/stat/normal.go + cdf := ((1.0 / 2.0) * (1 + math.Erf((float64(bucket)-float64(Mean))/(float64(Stdev)*math.Sqrt2)))) + return int64(cdf * float64(Count)) +} + +func cmp(want, have, tol float64) bool { + if (math.Abs(want-have) / want) > tol { + return false + } + return true +} diff --git a/metrics/time_histogram.go b/metrics/time_histogram.go deleted file mode 100644 index a8fc54c..0000000 --- a/metrics/time_histogram.go +++ /dev/null @@ -1,34 +0,0 @@ -package metrics - -import "time" - -// TimeHistogram is a convenience wrapper for a Histogram of time.Durations. -type TimeHistogram interface { - With(Field) TimeHistogram - Observe(time.Duration) -} - -type timeHistogram struct { - unit time.Duration - Histogram -} - -// NewTimeHistogram returns a TimeHistogram wrapper around the passed -// Histogram, in units of unit. -func NewTimeHistogram(unit time.Duration, h Histogram) TimeHistogram { - return &timeHistogram{ - unit: unit, - Histogram: h, - } -} - -func (h *timeHistogram) With(f Field) TimeHistogram { - return &timeHistogram{ - Histogram: h.Histogram.With(f), - unit: h.unit, - } -} - -func (h *timeHistogram) Observe(d time.Duration) { - h.Histogram.Observe(int64(d / h.unit)) -} diff --git a/metrics/time_histogram_test.go b/metrics/time_histogram_test.go deleted file mode 100644 index e7ea24b..0000000 --- a/metrics/time_histogram_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package metrics_test - -import ( - "math/rand" - "testing" - "time" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/expvar" -) - -func TestTimeHistogram(t *testing.T) { - var ( - metricName = "test_time_histogram" - minValue = int64(0) - maxValue = int64(200) - sigfigs = 3 - quantiles = []int{50, 90, 99} - h = expvar.NewHistogram(metricName, minValue, maxValue, sigfigs, quantiles...) - th = metrics.NewTimeHistogram(time.Millisecond, h).With(metrics.Field{Key: "a", Value: "b"}) - ) - - const seed, mean, stdev int64 = 321, 100, 20 - r := rand.New(rand.NewSource(seed)) - - for i := 0; i < 4321; i++ { - sample := time.Duration(r.NormFloat64()*float64(stdev)+float64(mean)) * time.Millisecond - th.Observe(sample) - } - - assertExpvarNormalHistogram(t, metricName, mean, stdev, quantiles) -} diff --git a/metrics3/README.md b/metrics3/README.md deleted file mode 100644 index b9e6115..0000000 --- a/metrics3/README.md +++ /dev/null @@ -1,83 +0,0 @@ -# package metrics - -`package metrics` provides a set of uniform interfaces for service instrumentation. -It has - [counters](http://prometheus.io/docs/concepts/metric_types/#counter), - [gauges](http://prometheus.io/docs/concepts/metric_types/#gauge), and - [histograms](http://prometheus.io/docs/concepts/metric_types/#histogram), -and provides adapters to popular metrics packages, like - [expvar](https://golang.org/pkg/expvar), - [StatsD](https://github.com/etsy/statsd), and - [Prometheus](https://prometheus.io). - -## Rationale - -Code instrumentation is absolutely essential to achieve - [observability](https://speakerdeck.com/mattheath/observability-in-micro-service-architectures) - into a distributed system. -Metrics and instrumentation tools have coalesced around a few well-defined idioms. -`package metrics` provides a common, minimal interface those idioms for service authors. - -## Usage - -A simple counter, exported via expvar. - -```go -import "github.com/go-kit/kit/metrics/expvar" - -func main() { - myCount := expvar.NewCounter("my_count") - myCount.Add(1) -} -``` - -A histogram for request duration, - exported via a Prometheus summary with dynamically-computed quantiles. - -```go -import ( - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/prometheus" -) - -var dur = prometheus.NewSummary(stdprometheus.SummaryOpts{ - Namespace: "myservice", - Subsystem: "api", - Name: "request_duration_seconds", - Help: "Total time spent serving requests.", -}, []string{}) - -func handleRequest() { - defer func(begin time.Time) { dur.Observe(time.Since(begin).Seconds()) }(time.Now()) - // handle request -} -``` - -A gauge for the number of goroutines currently running, exported via StatsD. - -```go -import ( - "net" - "os" - "runtime" - "time" - - "github.com/go-kit/kit/metrics/statsd" - "github.com/go-kit/kit/log" -) - -func main() { - statsd := statsd.New("foo_svc.", log.NewNopLogger()) - - report := time.NewTicker(5*time.Second) - defer report.Stop() - go statsd.SendLoop(report.C, "tcp", "statsd.internal:8125") - - goroutines := statsd.NewGauge("goroutine_count") - for range time.Tick(time.Second) { - goroutines.Set(float64(runtime.NumGoroutine())) - } -} -``` diff --git a/metrics3/circonus/circonus.go b/metrics3/circonus/circonus.go deleted file mode 100644 index 01cdb79..0000000 --- a/metrics3/circonus/circonus.go +++ /dev/null @@ -1,85 +0,0 @@ -// Package circonus provides a Circonus backend for metrics. -package circonus - -import ( - "github.com/circonus-labs/circonus-gometrics" - - "github.com/go-kit/kit/metrics3" -) - -// Circonus wraps a CirconusMetrics object and provides constructors for each of -// the Go kit metrics. The CirconusMetrics object manages aggregation of -// observations and emission to the Circonus server. -type Circonus struct { - m *circonusgometrics.CirconusMetrics -} - -// New creates a new Circonus object wrapping the passed CirconusMetrics, which -// the caller should create and set in motion. The Circonus object can be used -// to construct individual Go kit metrics. -func New(m *circonusgometrics.CirconusMetrics) *Circonus { - return &Circonus{ - m: m, - } -} - -// NewCounter returns a counter metric with the given name. -func (c *Circonus) NewCounter(name string) *Counter { - return &Counter{ - name: name, - m: c.m, - } -} - -// NewGauge returns a gauge metric with the given name. -func (c *Circonus) NewGauge(name string) *Gauge { - return &Gauge{ - name: name, - m: c.m, - } -} - -// NewHistogram returns a histogram metric with the given name. -func (c *Circonus) NewHistogram(name string) *Histogram { - return &Histogram{ - h: c.m.NewHistogram(name), - } -} - -// Counter is a Circonus implementation of a counter metric. -type Counter struct { - name string - m *circonusgometrics.CirconusMetrics -} - -// With implements Counter, but is a no-op, because Circonus metrics have no -// concept of per-observation label values. -func (c *Counter) With(labelValues ...string) metrics.Counter { return c } - -// Add implements Counter. Delta is converted to uint64; precision will be lost. -func (c *Counter) Add(delta float64) { c.m.Add(c.name, uint64(delta)) } - -// Gauge is a Circonus implementation of a gauge metric. -type Gauge struct { - name string - m *circonusgometrics.CirconusMetrics -} - -// With implements Gauge, but is a no-op, because Circonus metrics have no -// concept of per-observation label values. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { g.m.SetGauge(g.name, value) } - -// Histogram is a Circonus implementation of a histogram metric. -type Histogram struct { - h *circonusgometrics.Histogram -} - -// With implements Histogram, but is a no-op, because Circonus metrics have no -// concept of per-observation label values. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } - -// Observe implements Histogram. No precision is lost. -func (h *Histogram) Observe(value float64) { h.h.RecordValue(value) } diff --git a/metrics3/circonus/circonus_test.go b/metrics3/circonus/circonus_test.go deleted file mode 100644 index a563783..0000000 --- a/metrics3/circonus/circonus_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package circonus - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "testing" - - "github.com/circonus-labs/circonus-gometrics" - "github.com/circonus-labs/circonus-gometrics/checkmgr" - - "github.com/go-kit/kit/metrics3/generic" - "github.com/go-kit/kit/metrics3/teststat" -) - -func TestCounter(t *testing.T) { - // The only way to extract values from Circonus is to pose as a Circonus - // server and receive real HTTP writes. - const name = "abc" - var val int64 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var res map[string]struct { - Value int64 `json:"_value"` // reverse-engineered :\ - } - json.NewDecoder(r.Body).Decode(&res) - val = res[name].Value - })) - defer s.Close() - - // Set up a Circonus object, submitting to our HTTP server. - m := newCirconusMetrics(s.URL) - counter := New(m).NewCounter(name).With("label values", "not supported") - value := func() float64 { m.Flush(); return float64(val) } - - // Engage. - if err := teststat.TestCounter(counter, value); err != nil { - t.Fatal(err) - } -} - -func TestGauge(t *testing.T) { - const name = "def" - var val float64 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var res map[string]struct { - Value string `json:"_value"` - } - json.NewDecoder(r.Body).Decode(&res) - val, _ = strconv.ParseFloat(res[name].Value, 64) - })) - defer s.Close() - - m := newCirconusMetrics(s.URL) - gauge := New(m).NewGauge(name).With("label values", "not supported") - value := func() float64 { m.Flush(); return val } - - if err := teststat.TestGauge(gauge, value); err != nil { - t.Fatal(err) - } -} - -func TestHistogram(t *testing.T) { - const name = "ghi" - - // Circonus just emits bucketed counts. We'll dump them into a generic - // histogram (losing some precision) and take statistics from there. Note - // this does assume that the generic histogram computes statistics properly, - // but we have another test for that :) - re := regexp.MustCompile(`^H\[([0-9\.e\+]+)\]=([0-9]+)$`) // H[1.2e+03]=456 - - var p50, p90, p95, p99 float64 - s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var res map[string]struct { - Values []string `json:"_value"` // reverse-engineered :\ - } - json.NewDecoder(r.Body).Decode(&res) - - h := generic.NewHistogram("dummy", len(res[name].Values)) // match tbe bucket counts - for _, v := range res[name].Values { - match := re.FindStringSubmatch(v) - f, _ := strconv.ParseFloat(match[1], 64) - n, _ := strconv.ParseInt(match[2], 10, 64) - for i := int64(0); i < n; i++ { - h.Observe(f) - } - } - - p50 = h.Quantile(0.50) - p90 = h.Quantile(0.90) - p95 = h.Quantile(0.95) - p99 = h.Quantile(0.99) - })) - defer s.Close() - - m := newCirconusMetrics(s.URL) - histogram := New(m).NewHistogram(name).With("label values", "not supported") - quantiles := func() (float64, float64, float64, float64) { m.Flush(); return p50, p90, p95, p99 } - - // Circonus metrics, because they do their own bucketing, are less precise - // than other systems. So, we bump the tolerance to 5 percent. - if err := teststat.TestHistogram(histogram, quantiles, 0.05); err != nil { - t.Fatal(err) - } -} - -func newCirconusMetrics(url string) *circonusgometrics.CirconusMetrics { - m, err := circonusgometrics.NewCirconusMetrics(&circonusgometrics.Config{ - CheckManager: checkmgr.Config{ - Check: checkmgr.CheckConfig{ - SubmissionURL: url, - }, - }, - }) - if err != nil { - panic(err) - } - return m -} diff --git a/metrics3/discard/discard.go b/metrics3/discard/discard.go deleted file mode 100644 index ed6ff1c..0000000 --- a/metrics3/discard/discard.go +++ /dev/null @@ -1,37 +0,0 @@ -// Package discard provides a no-op metrics backend. -package discard - -import "github.com/go-kit/kit/metrics3" - -type counter struct{} - -// NewCounter returns a new no-op counter. -func NewCounter() metrics.Counter { return counter{} } - -// With implements Counter. -func (c counter) With(labelValues ...string) metrics.Counter { return c } - -// Add implements Counter. -func (c counter) Add(delta float64) {} - -type gauge struct{} - -// NewGauge returns a new no-op gauge. -func NewGauge() metrics.Gauge { return gauge{} } - -// With implements Gauge. -func (g gauge) With(labelValues ...string) metrics.Gauge { return g } - -// Set implements Gauge. -func (g gauge) Set(value float64) {} - -type histogram struct{} - -// NewHistogram returns a new no-op histogram. -func NewHistogram() metrics.Histogram { return histogram{} } - -// With implements Histogram. -func (h histogram) With(labelValues ...string) metrics.Histogram { return h } - -// Observe implements histogram. -func (h histogram) Observe(value float64) {} diff --git a/metrics3/doc.go b/metrics3/doc.go deleted file mode 100644 index fa30337..0000000 --- a/metrics3/doc.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package metrics provides a framework for application instrumentation. All -// metrics are safe for concurrent use. Considerable design influence has been -// taken from https://github.com/codahale/metrics and https://prometheus.io. -// -// This package contains the common interfaces. Your code should take these -// interfaces as parameters. Implementations are provided for different -// instrumentation systems in the various subdirectories. -// -// Usage -// -// Metrics are dependencies and should be passed to the components that need -// them in the same way you'd construct and pass a database handle, or reference -// to another component. So, create metrics in your func main, using whichever -// concrete implementation is appropriate for your organization. -// -// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ -// Namespace: "myteam", -// Subsystem: "foosvc", -// Name: "request_latency_seconds", -// Help: "Incoming request latency in seconds." -// }, []string{"method", "status_code"}) -// -// Write your components to take the metrics they will use as parameters to -// their constructors. Use the interface types, not the concrete types. That is, -// -// // NewAPI takes metrics.Histogram, not *prometheus.Summary -// func NewAPI(s Store, logger log.Logger, latency metrics.Histogram) *API { -// // ... -// } -// -// func (a *API) ServeFoo(w http.ResponseWriter, r *http.Request) { -// begin := time.Now() -// // ... -// a.latency.Observe(time.Since(begin).Seconds()) -// } -// -// Finally, pass the metrics as dependencies when building your object graph. -// This should happen in func main, not in the global scope. -// -// api := NewAPI(store, logger, latency) -// http.ListenAndServe("/", api) -// -// Implementation details -// -// Each telemetry system has different semantics for label values, push vs. -// pull, support for histograms, etc. These properties influence the design of -// their respective packages. This table attempts to summarize the key points of -// distinction. -// -// SYSTEM DIM COUNTERS GAUGES HISTOGRAMS -// dogstatsd n batch, push-aggregate batch, push-aggregate native, batch, push-each -// statsd 1 batch, push-aggregate batch, push-aggregate native, batch, push-each -// graphite 1 batch, push-aggregate batch, push-aggregate synthetic, batch, push-aggregate -// expvar 1 atomic atomic synthetic, batch, in-place expose -// influx n custom custom custom -// prometheus n native native native -// circonus 1 native native native -// -package metrics diff --git a/metrics3/dogstatsd/dogstatsd.go b/metrics3/dogstatsd/dogstatsd.go deleted file mode 100644 index 3258062..0000000 --- a/metrics3/dogstatsd/dogstatsd.go +++ /dev/null @@ -1,306 +0,0 @@ -// Package dogstatsd provides a DogStatsD backend for package metrics. It's very -// similar to StatsD, but supports arbitrary tags per-metric, which map to Go -// kit's label values. So, while label values are no-ops in StatsD, they are -// supported here. For more details, see the documentation at -// http://docs.datadoghq.com/guides/dogstatsd/. -// -// This package batches observations and emits them on some schedule to the -// remote server. This is useful even if you connect to your DogStatsD server -// over UDP. Emitting one network packet per observation can quickly overwhelm -// even the fastest internal network. -package dogstatsd - -import ( - "fmt" - "io" - "strings" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/internal/lv" - "github.com/go-kit/kit/metrics3/internal/ratemap" - "github.com/go-kit/kit/util/conn" -) - -// Dogstatsd receives metrics observations and forwards them to a DogStatsD -// server. Create a Dogstatsd object, use it to create metrics, and pass those -// metrics as dependencies to the components that will use them. -// -// All metrics are buffered until WriteTo is called. Counters and gauges are -// aggregated into a single observation per timeseries per write. Timings and -// histograms are buffered but not aggregated. -// -// To regularly report metrics to an io.Writer, use the WriteLoop helper method. -// To send to a DogStatsD server, use the SendLoop helper method. -type Dogstatsd struct { - prefix string - rates *ratemap.RateMap - counters *lv.Space - gauges *lv.Space - timings *lv.Space - histograms *lv.Space - logger log.Logger -} - -// New returns a Dogstatsd object that may be used to create metrics. Prefix is -// applied to all created metrics. Callers must ensure that regular calls to -// WriteTo are performed, either manually or with one of the helper methods. -func New(prefix string, logger log.Logger) *Dogstatsd { - return &Dogstatsd{ - prefix: prefix, - rates: ratemap.New(), - counters: lv.NewSpace(), - gauges: lv.NewSpace(), - timings: lv.NewSpace(), - histograms: lv.NewSpace(), - logger: logger, - } -} - -// NewCounter returns a counter, sending observations to this Dogstatsd object. -func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter { - d.rates.Set(d.prefix+name, sampleRate) - return &Counter{ - name: d.prefix + name, - obs: d.counters.Observe, - } -} - -// NewGauge returns a gauge, sending observations to this Dogstatsd object. -func (d *Dogstatsd) NewGauge(name string) *Gauge { - return &Gauge{ - name: d.prefix + name, - obs: d.gauges.Observe, - } -} - -// NewTiming returns a histogram whose observations are interpreted as -// millisecond durations, and are forwarded to this Dogstatsd object. -func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing { - d.rates.Set(d.prefix+name, sampleRate) - return &Timing{ - name: d.prefix + name, - obs: d.timings.Observe, - } -} - -// NewHistogram returns a histogram whose observations are of an unspecified -// unit, and are forwarded to this Dogstatsd object. -func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram { - d.rates.Set(d.prefix+name, sampleRate) - return &Histogram{ - name: d.prefix + name, - obs: d.histograms.Observe, - } -} - -// WriteLoop is a helper method that invokes WriteTo to the passed writer every -// time the passed channel fires. This method blocks until the channel is -// closed, so clients probably want to run it in its own goroutine. For typical -// usage, create a time.Ticker and pass its C channel to this method. -func (d *Dogstatsd) WriteLoop(c <-chan time.Time, w io.Writer) { - for range c { - if _, err := d.WriteTo(w); err != nil { - d.logger.Log("during", "WriteTo", "err", err) - } - } -} - -// SendLoop is a helper method that wraps WriteLoop, passing a managed -// connection to the network and address. Like WriteLoop, this method blocks -// until the channel is closed, so clients probably want to start it in its own -// goroutine. For typical usage, create a time.Ticker and pass its C channel to -// this method. -func (d *Dogstatsd) SendLoop(c <-chan time.Time, network, address string) { - d.WriteLoop(c, conn.NewDefaultManager(network, address, d.logger)) -} - -// WriteTo flushes the buffered content of the metrics to the writer, in -// DogStatsD format. WriteTo abides best-effort semantics, so observations are -// lost if there is a problem with the write. Clients should be sure to call -// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods. -func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) { - var n int - - d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs)) - if err != nil { - return false - } - count += int64(n) - return true - }) - if err != nil { - return count, err - } - - d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs)) - if err != nil { - return false - } - count += int64(n) - return true - }) - if err != nil { - return count, err - } - - d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - sampleRate := d.rates.Get(name) - for _, value := range values { - n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) - if err != nil { - return false - } - count += int64(n) - } - return true - }) - if err != nil { - return count, err - } - - d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - sampleRate := d.rates.Get(name) - for _, value := range values { - n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) - if err != nil { - return false - } - count += int64(n) - } - return true - }) - if err != nil { - return count, err - } - - return count, err -} - -func sum(a []float64) float64 { - var v float64 - for _, f := range a { - v += f - } - return v -} - -func last(a []float64) float64 { - return a[len(a)-1] -} - -func sampling(r float64) string { - var sv string - if r < 1.0 { - sv = fmt.Sprintf("|@%f", r) - } - return sv -} - -func tagValues(labelValues []string) string { - if len(labelValues) == 0 { - return "" - } - if len(labelValues)%2 != 0 { - panic("tagValues received a labelValues with an odd number of strings") - } - pairs := make([]string, 0, len(labelValues)/2) - for i := 0; i < len(labelValues); i += 2 { - pairs = append(pairs, labelValues[i]+":"+labelValues[i+1]) - } - return "|#" + strings.Join(pairs, ",") -} - -type observeFunc func(name string, lvs lv.LabelValues, value float64) - -// Counter is a DogStatsD counter. Observations are forwarded to a Dogstatsd -// object, and aggregated (summed) per timeseries. -type Counter struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - name: c.name, - lvs: c.lvs.With(labelValues...), - obs: c.obs, - } -} - -// Add implements metrics.Counter. -func (c *Counter) Add(delta float64) { - c.obs(c.name, c.lvs, delta) -} - -// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd -// object, and aggregated (the last observation selected) per timeseries. -type Gauge struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - name: g.name, - lvs: g.lvs.With(labelValues...), - obs: g.obs, - } -} - -// Set implements metrics.Gauge. -func (g *Gauge) Set(value float64) { - g.obs(g.name, g.lvs, value) -} - -// Timing is a DogStatsD timing, or metrics.Histogram. Observations are -// forwarded to a Dogstatsd object, and collected (but not aggregated) per -// timeseries. -type Timing struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Timing. -func (t *Timing) With(labelValues ...string) metrics.Histogram { - return &Timing{ - name: t.name, - lvs: t.lvs.With(labelValues...), - obs: t.obs, - } -} - -// Observe implements metrics.Histogram. Value is interpreted as milliseconds. -func (t *Timing) Observe(value float64) { - t.obs(t.name, t.lvs, value) -} - -// Histogram is a DogStatsD histrogram. Observations are forwarded to a -// Dogstatsd object, and collected (but not aggregated) per timeseries. -type Histogram struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - name: h.name, - lvs: h.lvs.With(labelValues...), - obs: h.obs, - } -} - -// Observe implements metrics.Histogram. -func (h *Histogram) Observe(value float64) { - h.obs(h.name, h.lvs, value) -} diff --git a/metrics3/dogstatsd/dogstatsd_test.go b/metrics3/dogstatsd/dogstatsd_test.go deleted file mode 100644 index cd7e5af..0000000 --- a/metrics3/dogstatsd/dogstatsd_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package dogstatsd - -import ( - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3/teststat" -) - -func TestCounter(t *testing.T) { - prefix, name := "abc.", "def" - label, value := "label", "value" - regex := `^` + prefix + name + `:([0-9\.]+)\|c\|#` + label + `:` + value + `$` - d := New(prefix, log.NewNopLogger()) - counter := d.NewCounter(name, 1.0).With(label, value) - valuef := teststat.SumLines(d, regex) - if err := teststat.TestCounter(counter, valuef); err != nil { - t.Fatal(err) - } -} - -func TestCounterSampled(t *testing.T) { - // This will involve multiplying the observed sum by the inverse of the - // sample rate and checking against the expected value within some - // tolerance. - t.Skip("TODO") -} - -func TestGauge(t *testing.T) { - prefix, name := "ghi.", "jkl" - label, value := "xyz", "abc" - regex := `^` + prefix + name + `:([0-9\.]+)\|g\|#` + label + `:` + value + `$` - d := New(prefix, log.NewNopLogger()) - gauge := d.NewGauge(name).With(label, value) - valuef := teststat.LastLine(d, regex) - if err := teststat.TestGauge(gauge, valuef); err != nil { - t.Fatal(err) - } -} - -// DogStatsD histograms just emit all observations. So, we collect them into -// a generic histogram, and run the statistics test on that. - -func TestHistogram(t *testing.T) { - prefix, name := "dogstatsd.", "histogram_test" - label, value := "abc", "def" - regex := `^` + prefix + name + `:([0-9\.]+)\|h\|#` + label + `:` + value + `$` - d := New(prefix, log.NewNopLogger()) - histogram := d.NewHistogram(name, 1.0).With(label, value) - quantiles := teststat.Quantiles(d, regex, 50) // no |@0.X - if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} - -func TestHistogramSampled(t *testing.T) { - prefix, name := "dogstatsd.", "sampled_histogram_test" - label, value := "foo", "bar" - regex := `^` + prefix + name + `:([0-9\.]+)\|h\|@0\.01[0]*\|#` + label + `:` + value + `$` - d := New(prefix, log.NewNopLogger()) - histogram := d.NewHistogram(name, 0.01).With(label, value) - quantiles := teststat.Quantiles(d, regex, 50) - if err := teststat.TestHistogram(histogram, quantiles, 0.02); err != nil { - t.Fatal(err) - } -} - -func TestTiming(t *testing.T) { - prefix, name := "dogstatsd.", "timing_test" - label, value := "wiggle", "bottom" - regex := `^` + prefix + name + `:([0-9\.]+)\|ms\|#` + label + `:` + value + `$` - d := New(prefix, log.NewNopLogger()) - histogram := d.NewTiming(name, 1.0).With(label, value) - quantiles := teststat.Quantiles(d, regex, 50) // no |@0.X - if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} - -func TestTimingSampled(t *testing.T) { - prefix, name := "dogstatsd.", "sampled_timing_test" - label, value := "internal", "external" - regex := `^` + prefix + name + `:([0-9\.]+)\|ms\|@0.03[0]*\|#` + label + `:` + value + `$` - d := New(prefix, log.NewNopLogger()) - histogram := d.NewTiming(name, 0.03).With(label, value) - quantiles := teststat.Quantiles(d, regex, 50) - if err := teststat.TestHistogram(histogram, quantiles, 0.02); err != nil { - t.Fatal(err) - } -} diff --git a/metrics3/expvar/expvar.go b/metrics3/expvar/expvar.go deleted file mode 100644 index d27964f..0000000 --- a/metrics3/expvar/expvar.go +++ /dev/null @@ -1,91 +0,0 @@ -// Package expvar provides expvar backends for metrics. -// Label values are not supported. -package expvar - -import ( - "expvar" - "sync" - - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/generic" -) - -// Counter implements the counter metric with an expvar float. -// Label values are not supported. -type Counter struct { - f *expvar.Float -} - -// NewCounter creates an expvar Float with the given name, and returns an object -// that implements the Counter interface. -func NewCounter(name string) *Counter { - return &Counter{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (c *Counter) With(labelValues ...string) metrics.Counter { return c } - -// Add implements Counter. -func (c *Counter) Add(delta float64) { c.f.Add(delta) } - -// Gauge implements the gauge metric wtih an expvar float. -// Label values are not supported. -type Gauge struct { - f *expvar.Float -} - -// NewGauge creates an expvar Float with the given name, and returns an object -// that implements the Gauge interface. -func NewGauge(name string) *Gauge { - return &Gauge{ - f: expvar.NewFloat(name), - } -} - -// With is a no-op. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { return g } - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { g.f.Set(value) } - -// Histogram implements the histogram metric with a combination of the generic -// Histogram object and several expvar Floats, one for each of the 50th, 90th, -// 95th, and 99th quantiles of observed values, with the quantile attached to -// the name as a suffix. Label values are not supported. -type Histogram struct { - mtx sync.Mutex - h *generic.Histogram - p50 *expvar.Float - p90 *expvar.Float - p95 *expvar.Float - p99 *expvar.Float -} - -// NewHistogram returns a Histogram object with the given name and number of -// buckets in the underlying histogram object. 50 is a good default number of -// buckets. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - h: generic.NewHistogram(name, buckets), - p50: expvar.NewFloat(name + ".p50"), - p90: expvar.NewFloat(name + ".p90"), - p95: expvar.NewFloat(name + ".p95"), - p99: expvar.NewFloat(name + ".p99"), - } -} - -// With is a no-op. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { return h } - -// Observe impleemts Histogram. -func (h *Histogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.h.Observe(value) - h.p50.Set(h.h.Quantile(0.50)) - h.p90.Set(h.h.Quantile(0.90)) - h.p95.Set(h.h.Quantile(0.95)) - h.p99.Set(h.h.Quantile(0.99)) -} diff --git a/metrics3/expvar/expvar_test.go b/metrics3/expvar/expvar_test.go deleted file mode 100644 index 5307473..0000000 --- a/metrics3/expvar/expvar_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package expvar - -import ( - "strconv" - "testing" - - "github.com/go-kit/kit/metrics3/teststat" -) - -func TestCounter(t *testing.T) { - counter := NewCounter("expvar_counter").With("label values", "not supported").(*Counter) - value := func() float64 { f, _ := strconv.ParseFloat(counter.f.String(), 64); return f } - if err := teststat.TestCounter(counter, value); err != nil { - t.Fatal(err) - } -} - -func TestGauge(t *testing.T) { - gauge := NewGauge("expvar_gauge").With("label values", "not supported").(*Gauge) - value := func() float64 { f, _ := strconv.ParseFloat(gauge.f.String(), 64); return f } - if err := teststat.TestGauge(gauge, value); err != nil { - t.Fatal(err) - } -} - -func TestHistogram(t *testing.T) { - histogram := NewHistogram("expvar_histogram", 50).With("label values", "not supported").(*Histogram) - quantiles := func() (float64, float64, float64, float64) { - p50, _ := strconv.ParseFloat(histogram.p50.String(), 64) - p90, _ := strconv.ParseFloat(histogram.p90.String(), 64) - p95, _ := strconv.ParseFloat(histogram.p95.String(), 64) - p99, _ := strconv.ParseFloat(histogram.p99.String(), 64) - return p50, p90, p95, p99 - } - if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} diff --git a/metrics3/generic/generic.go b/metrics3/generic/generic.go deleted file mode 100644 index 0a251dc..0000000 --- a/metrics3/generic/generic.go +++ /dev/null @@ -1,218 +0,0 @@ -// Package generic implements generic versions of each of the metric types. They -// can be embedded by other implementations, and converted to specific formats -// as necessary. -package generic - -import ( - "fmt" - "io" - "math" - "sync" - "sync/atomic" - - "github.com/VividCortex/gohistogram" - - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/internal/lv" -) - -// Counter is an in-memory implementation of a Counter. -type Counter struct { - Name string - lvs lv.LabelValues - bits uint64 -} - -// NewCounter returns a new, usable Counter. -func NewCounter(name string) *Counter { - return &Counter{ - Name: name, - } -} - -// With implements Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - bits: atomic.LoadUint64(&c.bits), - lvs: c.lvs.With(labelValues...), - } -} - -// Add implements Counter. -func (c *Counter) Add(delta float64) { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = math.Float64frombits(old) + delta - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - break - } - } -} - -// Value returns the current value of the counter. -func (c *Counter) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&c.bits)) -} - -// ValueReset returns the current value of the counter, and resets it to zero. -// This is useful for metrics backends whose counter aggregations expect deltas, -// like Graphite. -func (c *Counter) ValueReset() float64 { - for { - var ( - old = atomic.LoadUint64(&c.bits) - newf = 0.0 - new = math.Float64bits(newf) - ) - if atomic.CompareAndSwapUint64(&c.bits, old, new) { - return math.Float64frombits(old) - } - } -} - -// LabelValues returns the set of label values attached to the counter. -func (c *Counter) LabelValues() []string { - return c.lvs -} - -// Gauge is an in-memory implementation of a Gauge. -type Gauge struct { - Name string - lvs lv.LabelValues - bits uint64 -} - -// NewGauge returns a new, usable Gauge. -func NewGauge(name string) *Gauge { - return &Gauge{ - Name: name, - } -} - -// With implements Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - bits: atomic.LoadUint64(&g.bits), - lvs: g.lvs.With(labelValues...), - } -} - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { - atomic.StoreUint64(&g.bits, math.Float64bits(value)) -} - -// Value returns the current value of the gauge. -func (g *Gauge) Value() float64 { - return math.Float64frombits(atomic.LoadUint64(&g.bits)) -} - -// LabelValues returns the set of label values attached to the gauge. -func (g *Gauge) LabelValues() []string { - return g.lvs -} - -// Histogram is an in-memory implementation of a streaming histogram, based on -// VividCortex/gohistogram. It dynamically computes quantiles, so it's not -// suitable for aggregation. -type Histogram struct { - Name string - lvs lv.LabelValues - h gohistogram.Histogram -} - -// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A -// good default value for buckets is 50. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{ - Name: name, - h: gohistogram.NewHistogram(buckets), - } -} - -// With implements Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - lvs: h.lvs.With(labelValues...), - h: h.h, - } -} - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.h.Add(value) -} - -// Quantile returns the value of the quantile q, 0.0 < q < 1.0. -func (h *Histogram) Quantile(q float64) float64 { - return h.h.Quantile(q) -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *Histogram) LabelValues() []string { - return h.lvs -} - -// Print writes a string representation of the histogram to the passed writer. -// Useful for printing to a terminal. -func (h *Histogram) Print(w io.Writer) { - fmt.Fprintf(w, h.h.String()) -} - -// Bucket is a range in a histogram which aggregates observations. -type Bucket struct { - From, To, Count int64 -} - -// Quantile is a pair of a quantile (0..100) and its observed maximum value. -type Quantile struct { - Quantile int // 0..100 - Value int64 -} - -// SimpleHistogram is an in-memory implementation of a Histogram. It only tracks -// an approximate moving average, so is likely too naïve for many use cases. -type SimpleHistogram struct { - mtx sync.RWMutex - lvs lv.LabelValues - avg float64 - n uint64 -} - -// NewSimpleHistogram returns a SimpleHistogram, ready for observations. -func NewSimpleHistogram() *SimpleHistogram { - return &SimpleHistogram{} -} - -// With implements Histogram. -func (h *SimpleHistogram) With(labelValues ...string) metrics.Histogram { - return &SimpleHistogram{ - lvs: h.lvs.With(labelValues...), - avg: h.avg, - n: h.n, - } -} - -// Observe implements Histogram. -func (h *SimpleHistogram) Observe(value float64) { - h.mtx.Lock() - defer h.mtx.Unlock() - h.n++ - h.avg -= h.avg / float64(h.n) - h.avg += value / float64(h.n) -} - -// ApproximateMovingAverage returns the approximate moving average of observations. -func (h *SimpleHistogram) ApproximateMovingAverage() float64 { - h.mtx.RLock() - h.mtx.RUnlock() - return h.avg -} - -// LabelValues returns the set of label values attached to the histogram. -func (h *SimpleHistogram) LabelValues() []string { - return h.lvs -} diff --git a/metrics3/generic/generic_test.go b/metrics3/generic/generic_test.go deleted file mode 100644 index 7f8f2a9..0000000 --- a/metrics3/generic/generic_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package generic_test - -// This is package generic_test in order to get around an import cycle: this -// package imports teststat to do its testing, but package teststat imports -// generic to use its Histogram in the Quantiles helper function. - -import ( - "math" - "math/rand" - "testing" - - "github.com/go-kit/kit/metrics3/generic" - "github.com/go-kit/kit/metrics3/teststat" -) - -func TestCounter(t *testing.T) { - counter := generic.NewCounter("my_counter").With("label", "counter").(*generic.Counter) - value := func() float64 { return counter.Value() } - if err := teststat.TestCounter(counter, value); err != nil { - t.Fatal(err) - } -} - -func TestValueReset(t *testing.T) { - counter := generic.NewCounter("test_value_reset") - counter.Add(123) - counter.Add(456) - counter.Add(789) - if want, have := float64(123+456+789), counter.ValueReset(); want != have { - t.Errorf("want %f, have %f", want, have) - } - if want, have := float64(0), counter.Value(); want != have { - t.Errorf("want %f, have %f", want, have) - } -} - -func TestGauge(t *testing.T) { - gauge := generic.NewGauge("my_gauge").With("label", "gauge").(*generic.Gauge) - value := func() float64 { return gauge.Value() } - if err := teststat.TestGauge(gauge, value); err != nil { - t.Fatal(err) - } -} - -func TestHistogram(t *testing.T) { - histogram := generic.NewHistogram("my_histogram", 50).With("label", "histogram").(*generic.Histogram) - quantiles := func() (float64, float64, float64, float64) { - return histogram.Quantile(0.50), histogram.Quantile(0.90), histogram.Quantile(0.95), histogram.Quantile(0.99) - } - if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} - -func TestSimpleHistogram(t *testing.T) { - histogram := generic.NewSimpleHistogram().With("label", "simple_histogram").(*generic.SimpleHistogram) - var ( - sum int - count = 1234 // not too big - ) - for i := 0; i < count; i++ { - value := rand.Intn(1000) - sum += value - histogram.Observe(float64(value)) - } - - var ( - want = float64(sum) / float64(count) - have = histogram.ApproximateMovingAverage() - tolerance = 0.001 // real real slim - ) - if math.Abs(want-have)/want > tolerance { - t.Errorf("want %f, have %f", want, have) - } -} diff --git a/metrics3/graphite/graphite.go b/metrics3/graphite/graphite.go deleted file mode 100644 index fcae7d2..0000000 --- a/metrics3/graphite/graphite.go +++ /dev/null @@ -1,200 +0,0 @@ -// Package graphite provides a Graphite backend for metrics. Metrics are batched -// and emitted in the plaintext protocol. For more information, see -// http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol -// -// Graphite does not have a native understanding of metric parameterization, so -// label values not supported. Use distinct metrics for each unique combination -// of label values. -package graphite - -import ( - "fmt" - "io" - "sync" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/generic" - "github.com/go-kit/kit/util/conn" -) - -// Graphite receives metrics observations and forwards them to a Graphite server. -// Create a Graphite object, use it to create metrics, and pass those metrics as -// dependencies to the components that will use them. -// -// All metrics are buffered until WriteTo is called. Counters and gauges are -// aggregated into a single observation per timeseries per write. Histograms are -// exploded into per-quantile gauges and reported once per write. -// -// To regularly report metrics to an io.Writer, use the WriteLoop helper method. -// To send to a Graphite server, use the SendLoop helper method. -type Graphite struct { - mtx sync.RWMutex - prefix string - counters map[string]*Counter - gauges map[string]*Gauge - histograms map[string]*Histogram - logger log.Logger -} - -// New returns a Statsd object that may be used to create metrics. Prefix is -// applied to all created metrics. Callers must ensure that regular calls to -// WriteTo are performed, either manually or with one of the helper methods. -func New(prefix string, logger log.Logger) *Graphite { - return &Graphite{ - prefix: prefix, - counters: map[string]*Counter{}, - gauges: map[string]*Gauge{}, - histograms: map[string]*Histogram{}, - logger: logger, - } -} - -// NewCounter returns a counter. Observations are aggregated and emitted once -// per write invocation. -func (g *Graphite) NewCounter(name string) *Counter { - c := NewCounter(g.prefix + name) - g.mtx.Lock() - g.counters[g.prefix+name] = c - g.mtx.Unlock() - return c -} - -// NewGauge returns a gauge. Observations are aggregated and emitted once per -// write invocation. -func (g *Graphite) NewGauge(name string) *Gauge { - ga := NewGauge(g.prefix + name) - g.mtx.Lock() - g.gauges[g.prefix+name] = ga - g.mtx.Unlock() - return ga -} - -// NewHistogram returns a histogram. Observations are aggregated and emitted as -// per-quantile gauges, once per write invocation. 50 is a good default value -// for buckets. -func (g *Graphite) NewHistogram(name string, buckets int) *Histogram { - h := NewHistogram(g.prefix+name, buckets) - g.mtx.Lock() - g.histograms[g.prefix+name] = h - g.mtx.Unlock() - return h -} - -// WriteLoop is a helper method that invokes WriteTo to the passed writer every -// time the passed channel fires. This method blocks until the channel is -// closed, so clients probably want to run it in its own goroutine. For typical -// usage, create a time.Ticker and pass its C channel to this method. -func (g *Graphite) WriteLoop(c <-chan time.Time, w io.Writer) { - for range c { - if _, err := g.WriteTo(w); err != nil { - g.logger.Log("during", "WriteTo", "err", err) - } - } -} - -// SendLoop is a helper method that wraps WriteLoop, passing a managed -// connection to the network and address. Like WriteLoop, this method blocks -// until the channel is closed, so clients probably want to start it in its own -// goroutine. For typical usage, create a time.Ticker and pass its C channel to -// this method. -func (g *Graphite) SendLoop(c <-chan time.Time, network, address string) { - g.WriteLoop(c, conn.NewDefaultManager(network, address, g.logger)) -} - -// WriteTo flushes the buffered content of the metrics to the writer, in -// Graphite plaintext format. WriteTo abides best-effort semantics, so -// observations are lost if there is a problem with the write. Clients should be -// sure to call WriteTo regularly, ideally through the WriteLoop or SendLoop -// helper methods. -func (g *Graphite) WriteTo(w io.Writer) (count int64, err error) { - g.mtx.RLock() - defer g.mtx.RUnlock() - now := time.Now().Unix() - - for name, c := range g.counters { - n, err := fmt.Fprintf(w, "%s %f %d\n", name, c.c.ValueReset(), now) - if err != nil { - return count, err - } - count += int64(n) - } - - for name, ga := range g.gauges { - n, err := fmt.Fprintf(w, "%s %f %d\n", name, ga.g.Value(), now) - if err != nil { - return count, err - } - count += int64(n) - } - - for name, h := range g.histograms { - for _, p := range []struct { - s string - f float64 - }{ - {"50", 0.50}, - {"90", 0.90}, - {"95", 0.95}, - {"99", 0.99}, - } { - n, err := fmt.Fprintf(w, "%s.p%s %f %d\n", name, p.s, h.h.Quantile(p.f), now) - if err != nil { - return count, err - } - count += int64(n) - } - } - - return count, err -} - -// Counter is a Graphite counter metric. -type Counter struct { - c *generic.Counter -} - -// NewCounter returns a new usable counter metric. -func NewCounter(name string) *Counter { - return &Counter{generic.NewCounter(name)} -} - -// With is a no-op. -func (c *Counter) With(...string) metrics.Counter { return c } - -// Add implements counter. -func (c *Counter) Add(delta float64) { c.c.Add(delta) } - -// Gauge is a Graphite gauge metric. -type Gauge struct { - g *generic.Gauge -} - -// NewGauge returns a new usable Gauge metric. -func NewGauge(name string) *Gauge { - return &Gauge{generic.NewGauge(name)} -} - -// With is a no-op. -func (g *Gauge) With(...string) metrics.Gauge { return g } - -// Set implements gauge. -func (g *Gauge) Set(value float64) { g.g.Set(value) } - -// Histogram is a Graphite histogram metric. Observations are bucketed into -// per-quantile gauges. -type Histogram struct { - h *generic.Histogram -} - -// NewHistogram returns a new usable Histogram metric. -func NewHistogram(name string, buckets int) *Histogram { - return &Histogram{generic.NewHistogram(name, buckets)} -} - -// With is a no-op. -func (h *Histogram) With(...string) metrics.Histogram { return h } - -// Observe implements histogram. -func (h *Histogram) Observe(value float64) { h.h.Observe(value) } diff --git a/metrics3/graphite/graphite_test.go b/metrics3/graphite/graphite_test.go deleted file mode 100644 index 468c523..0000000 --- a/metrics3/graphite/graphite_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package graphite - -import ( - "bytes" - "regexp" - "strconv" - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3/teststat" -) - -func TestCounter(t *testing.T) { - prefix, name := "abc.", "def" - label, value := "label", "value" // ignored for Graphite - regex := `^` + prefix + name + ` ([0-9\.]+) [0-9]+$` - g := New(prefix, log.NewNopLogger()) - counter := g.NewCounter(name).With(label, value) - valuef := teststat.SumLines(g, regex) - if err := teststat.TestCounter(counter, valuef); err != nil { - t.Fatal(err) - } -} - -func TestGauge(t *testing.T) { - prefix, name := "ghi.", "jkl" - label, value := "xyz", "abc" // ignored for Graphite - regex := `^` + prefix + name + ` ([0-9\.]+) [0-9]+$` - g := New(prefix, log.NewNopLogger()) - gauge := g.NewGauge(name).With(label, value) - valuef := teststat.LastLine(g, regex) - if err := teststat.TestGauge(gauge, valuef); err != nil { - t.Fatal(err) - } -} - -func TestHistogram(t *testing.T) { - // The histogram test is actually like 4 gauge tests. - prefix, name := "statsd.", "histogram_test" - label, value := "abc", "def" // ignored for Graphite - re50 := regexp.MustCompile(prefix + name + `.p50 ([0-9\.]+) [0-9]+`) - re90 := regexp.MustCompile(prefix + name + `.p90 ([0-9\.]+) [0-9]+`) - re95 := regexp.MustCompile(prefix + name + `.p95 ([0-9\.]+) [0-9]+`) - re99 := regexp.MustCompile(prefix + name + `.p99 ([0-9\.]+) [0-9]+`) - g := New(prefix, log.NewNopLogger()) - histogram := g.NewHistogram(name, 50).With(label, value) - quantiles := func() (float64, float64, float64, float64) { - var buf bytes.Buffer - g.WriteTo(&buf) - match50 := re50.FindStringSubmatch(buf.String()) - p50, _ := strconv.ParseFloat(match50[1], 64) - match90 := re90.FindStringSubmatch(buf.String()) - p90, _ := strconv.ParseFloat(match90[1], 64) - match95 := re95.FindStringSubmatch(buf.String()) - p95, _ := strconv.ParseFloat(match95[1], 64) - match99 := re99.FindStringSubmatch(buf.String()) - p99, _ := strconv.ParseFloat(match99[1], 64) - return p50, p90, p95, p99 - } - if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} diff --git a/metrics3/influx/influx.go b/metrics3/influx/influx.go deleted file mode 100644 index bdc1b51..0000000 --- a/metrics3/influx/influx.go +++ /dev/null @@ -1,249 +0,0 @@ -// Package influx provides an InfluxDB implementation for metrics. The model is -// similar to other push-based instrumentation systems. Observations are -// aggregated locally and emitted to the Influx server on regular intervals. -package influx - -import ( - "time" - - influxdb "github.com/influxdata/influxdb/client/v2" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/internal/lv" -) - -// Influx is a store for metrics that will be emitted to an Influx database. -// -// Influx is a general purpose time-series database, and has no native concepts -// of counters, gauges, or histograms. Counters are modeled as a timeseries with -// one data point per flush, with a "count" field that reflects all adds since -// the last flush. Gauges are modeled as a timeseries with one data point per -// flush, with a "value" field that reflects the current state of the gauge. -// Histograms are modeled as a timeseries with one data point per observation, -// with a "value" field that reflects each observation; use e.g. the HISTOGRAM -// aggregate function to compute histograms. -// -// Influx tags are immutable, attached to the Influx object, and given to each -// metric at construction. Influx fields are mapped to Go kit label values, and -// may be mutated via With functions. Actual metric values are provided as -// fields with specific names depending on the metric. -// -// All observations are collected in memory locally, and flushed on demand. -type Influx struct { - counters *lv.Space - gauges *lv.Space - histograms *lv.Space - tags map[string]string - conf influxdb.BatchPointsConfig - logger log.Logger -} - -// New returns an Influx, ready to create metrics and collect observations. Tags -// are applied to all metrics created from this object. The BatchPointsConfig is -// used during flushing. -func New(tags map[string]string, conf influxdb.BatchPointsConfig, logger log.Logger) *Influx { - return &Influx{ - counters: lv.NewSpace(), - gauges: lv.NewSpace(), - histograms: lv.NewSpace(), - tags: tags, - conf: conf, - logger: logger, - } -} - -// NewCounter returns an Influx counter. -func (in *Influx) NewCounter(name string) *Counter { - return &Counter{ - name: name, - obs: in.counters.Observe, - } -} - -// NewGauge returns an Influx gauge. -func (in *Influx) NewGauge(name string) *Gauge { - return &Gauge{ - name: name, - obs: in.gauges.Observe, - } -} - -// NewHistogram returns an Influx histogram. -func (in *Influx) NewHistogram(name string) *Histogram { - return &Histogram{ - name: name, - obs: in.histograms.Observe, - } -} - -// BatchPointsWriter captures a subset of the influxdb.Client methods necessary -// for emitting metrics observations. -type BatchPointsWriter interface { - Write(influxdb.BatchPoints) error -} - -// WriteLoop is a helper method that invokes WriteTo to the passed writer every -// time the passed channel fires. This method blocks until the channel is -// closed, so clients probably want to run it in its own goroutine. For typical -// usage, create a time.Ticker and pass its C channel to this method. -func (in *Influx) WriteLoop(c <-chan time.Time, w BatchPointsWriter) { - for range c { - if err := in.WriteTo(w); err != nil { - in.logger.Log("during", "WriteTo", "err", err) - } - } -} - -// WriteTo flushes the buffered content of the metrics to the writer, in an -// Influx BatchPoints format. WriteTo abides best-effort semantics, so -// observations are lost if there is a problem with the write. Clients should be -// sure to call WriteTo regularly, ideally through the WriteLoop helper method. -func (in *Influx) WriteTo(w BatchPointsWriter) (err error) { - bp, err := influxdb.NewBatchPoints(in.conf) - if err != nil { - return err - } - - now := time.Now() - - in.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - fields := fieldsFrom(lvs) - fields["count"] = sum(values) - var p *influxdb.Point - p, err = influxdb.NewPoint(name, in.tags, fields, now) - if err != nil { - return false - } - bp.AddPoint(p) - return true - }) - if err != nil { - return err - } - - in.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - fields := fieldsFrom(lvs) - fields["value"] = last(values) - var p *influxdb.Point - p, err = influxdb.NewPoint(name, in.tags, fields, now) - if err != nil { - return false - } - bp.AddPoint(p) - return true - }) - if err != nil { - return err - } - - in.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { - fields := fieldsFrom(lvs) - ps := make([]*influxdb.Point, len(values)) - for i, v := range values { - fields["value"] = v // overwrite each time - ps[i], err = influxdb.NewPoint(name, in.tags, fields, now) - if err != nil { - return false - } - } - bp.AddPoints(ps) - return true - }) - if err != nil { - return err - } - - return w.Write(bp) -} - -func fieldsFrom(labelValues []string) map[string]interface{} { - if len(labelValues)%2 != 0 { - panic("fieldsFrom received a labelValues with an odd number of strings") - } - fields := make(map[string]interface{}, len(labelValues)/2) - for i := 0; i < len(labelValues); i += 2 { - fields[labelValues[i]] = labelValues[i+1] - } - return fields -} - -func sum(a []float64) float64 { - var v float64 - for _, f := range a { - v += f - } - return v -} - -func last(a []float64) float64 { - return a[len(a)-1] -} - -type observeFunc func(name string, lvs lv.LabelValues, value float64) - -// Counter is an Influx counter. Observations are forwarded to an Influx -// object, and aggregated (summed) per timeseries. -type Counter struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - name: c.name, - lvs: c.lvs.With(labelValues...), - obs: c.obs, - } -} - -// Add implements metrics.Counter. -func (c *Counter) Add(delta float64) { - c.obs(c.name, c.lvs, delta) -} - -// Gauge is an Influx gauge. Observations are forwarded to a Dogstatsd -// object, and aggregated (the last observation selected) per timeseries. -type Gauge struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - name: g.name, - lvs: g.lvs.With(labelValues...), - obs: g.obs, - } -} - -// Set implements metrics.Gauge. -func (g *Gauge) Set(value float64) { - g.obs(g.name, g.lvs, value) -} - -// Histogram is an Influx histrogram. Observations are aggregated into a -// generic.Histogram and emitted as per-quantile gauges to the Influx server. -type Histogram struct { - name string - lvs lv.LabelValues - obs observeFunc -} - -// With implements metrics.Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - name: h.name, - lvs: h.lvs.With(labelValues...), - obs: h.obs, - } -} - -// Observe implements metrics.Histogram. -func (h *Histogram) Observe(value float64) { - h.obs(h.name, h.lvs, value) -} diff --git a/metrics3/influx/influx_test.go b/metrics3/influx/influx_test.go deleted file mode 100644 index 1c44aed..0000000 --- a/metrics3/influx/influx_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package influx - -import ( - "bytes" - "fmt" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3/generic" - "github.com/go-kit/kit/metrics3/teststat" - influxdb "github.com/influxdata/influxdb/client/v2" -) - -func TestCounter(t *testing.T) { - in := New(map[string]string{"a": "b"}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) - re := regexp.MustCompile(`influx_counter,a=b count=([0-9\.]+) [0-9]+`) // reverse-engineered :\ - counter := in.NewCounter("influx_counter") - value := func() float64 { - client := &bufWriter{} - in.WriteTo(client) - match := re.FindStringSubmatch(client.buf.String()) - f, _ := strconv.ParseFloat(match[1], 64) - return f - } - if err := teststat.TestCounter(counter, value); err != nil { - t.Fatal(err) - } -} - -func TestGauge(t *testing.T) { - in := New(map[string]string{"foo": "alpha"}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) - re := regexp.MustCompile(`influx_gauge,foo=alpha value=([0-9\.]+) [0-9]+`) - gauge := in.NewGauge("influx_gauge") - value := func() float64 { - client := &bufWriter{} - in.WriteTo(client) - match := re.FindStringSubmatch(client.buf.String()) - f, _ := strconv.ParseFloat(match[1], 64) - return f - } - if err := teststat.TestGauge(gauge, value); err != nil { - t.Fatal(err) - } -} - -func TestHistogram(t *testing.T) { - in := New(map[string]string{"foo": "alpha"}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) - re := regexp.MustCompile(`influx_histogram,foo=alpha bar="beta",value=([0-9\.]+) [0-9]+`) - histogram := in.NewHistogram("influx_histogram").With("bar", "beta") - quantiles := func() (float64, float64, float64, float64) { - w := &bufWriter{} - in.WriteTo(w) - h := generic.NewHistogram("h", 50) - matches := re.FindAllStringSubmatch(w.buf.String(), -1) - for _, match := range matches { - f, _ := strconv.ParseFloat(match[1], 64) - h.Observe(f) - } - return h.Quantile(0.50), h.Quantile(0.90), h.Quantile(0.95), h.Quantile(0.99) - } - if err := teststat.TestHistogram(histogram, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} - -func TestHistogramLabels(t *testing.T) { - in := New(map[string]string{}, influxdb.BatchPointsConfig{}, log.NewNopLogger()) - h := in.NewHistogram("foo") - h.Observe(123) - h.With("abc", "xyz").Observe(456) - w := &bufWriter{} - if err := in.WriteTo(w); err != nil { - t.Fatal(err) - } - if want, have := 2, len(strings.Split(strings.TrimSpace(w.buf.String()), "\n")); want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -type bufWriter struct { - buf bytes.Buffer -} - -func (w *bufWriter) Write(bp influxdb.BatchPoints) error { - for _, p := range bp.Points() { - fmt.Fprintf(&w.buf, p.String()+"\n") - } - return nil -} diff --git a/metrics3/internal/emitting/buffer.go b/metrics3/internal/emitting/buffer.go deleted file mode 100644 index bca8930..0000000 --- a/metrics3/internal/emitting/buffer.go +++ /dev/null @@ -1,94 +0,0 @@ -package emitting - -import ( - "fmt" - "strings" - "sync" - - "sort" - - "github.com/go-kit/kit/metrics3/generic" -) - -type Buffer struct { - buckets int - - mtx sync.Mutex - counters map[point]*generic.Counter - gauges map[point]*generic.Gauge - histograms map[point]*generic.Histogram -} - -func (b *Buffer) Add(a Add) { - pt := makePoint(a.Name, a.LabelValues) - b.mtx.Lock() - defer b.mtx.Unlock() - c, ok := b.counters[pt] - if !ok { - c = generic.NewCounter(a.Name).With(a.LabelValues...).(*generic.Counter) - } - c.Add(a.Delta) - b.counters[pt] = c -} - -func (b *Buffer) Set(s Set) { - pt := makePoint(s.Name, s.LabelValues) - b.mtx.Lock() - defer b.mtx.Unlock() - g, ok := b.gauges[pt] - if !ok { - g = generic.NewGauge(s.Name).With(s.LabelValues...).(*generic.Gauge) - } - g.Set(s.Value) - b.gauges[pt] = g -} - -func (b *Buffer) Obv(o Obv) { - pt := makePoint(o.Name, o.LabelValues) - b.mtx.Lock() - defer b.mtx.Unlock() - h, ok := b.histograms[pt] - if !ok { - h = generic.NewHistogram(o.Name, b.buckets).With(o.LabelValues...).(*generic.Histogram) - } - h.Observe(o.Value) - b.histograms[pt] = h -} - -// point as in point in N-dimensional vector space; -// a string encoding of name + sorted k/v pairs. -type point string - -const ( - recordDelimiter = "•" - fieldDelimiter = "·" -) - -// (foo, [a b c d]) => "foo•a·b•c·d" -func makePoint(name string, labelValues []string) point { - if len(labelValues)%2 != 0 { - panic("odd number of label values; programmer error!") - } - pairs := make([]string, 0, len(labelValues)/2) - for i := 0; i < len(labelValues); i += 2 { - pairs = append(pairs, fmt.Sprintf("%s%s%s", labelValues[i], fieldDelimiter, labelValues[i+1])) - } - sort.Strings(sort.StringSlice(pairs)) - pairs = append([]string{name}, pairs...) - return point(strings.Join(pairs, recordDelimiter)) -} - -// "foo•a·b•c·d" => (foo, [a b c d]) -func (p point) nameLabelValues() (name string, labelValues []string) { - records := strings.Split(string(p), recordDelimiter) - if len(records)%2 != 1 { // always name + even number of label/values - panic("even number of point records; programmer error!") - } - name, records = records[0], records[1:] - labelValues = make([]string, 0, len(records)*2) - for _, record := range records { - fields := strings.SplitN(record, fieldDelimiter, 2) - labelValues = append(labelValues, fields[0], fields[1]) - } - return name, labelValues -} diff --git a/metrics3/internal/emitting/metrics.go b/metrics3/internal/emitting/metrics.go deleted file mode 100644 index 5168c06..0000000 --- a/metrics3/internal/emitting/metrics.go +++ /dev/null @@ -1,107 +0,0 @@ -package emitting - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/internal/lv" -) - -type Counter struct { - name string - lvs lv.LabelValues - sampleRate float64 - c chan Add -} - -type Add struct { - Name string - LabelValues []string - SampleRate float64 - Delta float64 -} - -func NewCounter(name string, sampleRate float64, c chan Add) *Counter { - return &Counter{ - name: name, - sampleRate: sampleRate, - c: c, - } -} - -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - name: c.name, - lvs: c.lvs.With(labelValues...), - sampleRate: c.sampleRate, - c: c.c, - } -} - -func (c *Counter) Add(delta float64) { - c.c <- Add{c.name, c.lvs, c.sampleRate, delta} -} - -type Gauge struct { - name string - lvs lv.LabelValues - c chan Set -} - -type Set struct { - Name string - LabelValues []string - Value float64 -} - -func NewGauge(name string, c chan Set) *Gauge { - return &Gauge{ - name: name, - c: c, - } -} - -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - name: g.name, - lvs: g.lvs.With(labelValues...), - c: g.c, - } -} - -func (g *Gauge) Set(value float64) { - g.c <- Set{g.name, g.lvs, value} -} - -type Histogram struct { - name string - lvs lv.LabelValues - sampleRate float64 - c chan Obv -} - -type Obv struct { - Name string - LabelValues []string - SampleRate float64 - Value float64 -} - -func NewHistogram(name string, sampleRate float64, c chan Obv) *Histogram { - return &Histogram{ - name: name, - sampleRate: sampleRate, - c: c, - } -} - -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - name: h.name, - lvs: h.lvs.With(labelValues...), - sampleRate: h.sampleRate, - c: h.c, - } -} - -func (h *Histogram) Observe(value float64) { - h.c <- Obv{h.name, h.lvs, h.sampleRate, value} -} diff --git a/metrics3/internal/lv/labelvalues.go b/metrics3/internal/lv/labelvalues.go deleted file mode 100644 index 8bb1ba0..0000000 --- a/metrics3/internal/lv/labelvalues.go +++ /dev/null @@ -1,14 +0,0 @@ -package lv - -// LabelValues is a type alias that provides validation on its With method. -// Metrics may include it as a member to help them satisfy With semantics and -// save some code duplication. -type LabelValues []string - -// With validates the input, and returns a new aggregate labelValues. -func (lvs LabelValues) With(labelValues ...string) LabelValues { - if len(labelValues)%2 != 0 { - labelValues = append(labelValues, "unknown") - } - return append(lvs, labelValues...) -} diff --git a/metrics3/internal/lv/labelvalues_test.go b/metrics3/internal/lv/labelvalues_test.go deleted file mode 100644 index 5e72609..0000000 --- a/metrics3/internal/lv/labelvalues_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package lv - -import ( - "strings" - "testing" -) - -func TestWith(t *testing.T) { - var a LabelValues - b := a.With("a", "1") - c := a.With("b", "2", "c", "3") - - if want, have := "", strings.Join(a, ""); want != have { - t.Errorf("With appears to mutate the original LabelValues: want %q, have %q", want, have) - } - if want, have := "a1", strings.Join(b, ""); want != have { - t.Errorf("With does not appear to return the right thing: want %q, have %q", want, have) - } - if want, have := "b2c3", strings.Join(c, ""); want != have { - t.Errorf("With does not appear to return the right thing: want %q, have %q", want, have) - } -} diff --git a/metrics3/internal/lv/space.go b/metrics3/internal/lv/space.go deleted file mode 100644 index 6807347..0000000 --- a/metrics3/internal/lv/space.go +++ /dev/null @@ -1,106 +0,0 @@ -package lv - -import "sync" - -// NewSpace returns an N-dimensional vector space. -func NewSpace() *Space { - return &Space{} -} - -// Space represents an N-dimensional vector space. Each name and unique label -// value pair establishes a new dimension and point within that dimension. Order -// matters, i.e. [a=1 b=2] identifies a different timeseries than [b=2 a=1]. -type Space struct { - mtx sync.RWMutex - nodes map[string]*node -} - -// Observe locates the time series identified by the name and label values in -// the vector space, and appends the value to the list of observations. -func (s *Space) Observe(name string, lvs LabelValues, value float64) { - s.nodeFor(name).observe(lvs, value) -} - -// Walk traverses the vector space and invokes fn for each non-empty time series -// which is encountered. Return false to abort the traversal. -func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { - s.mtx.RLock() - defer s.mtx.RUnlock() - for name, node := range s.nodes { - f := func(lvs LabelValues, observations []float64) bool { return fn(name, lvs, observations) } - if !node.walk(LabelValues{}, f) { - return - } - } -} - -// Reset empties the current space and returns a new Space with the old -// contents. Reset a Space to get an immutable copy suitable for walking. -func (s *Space) Reset() *Space { - s.mtx.Lock() - defer s.mtx.Unlock() - n := NewSpace() - n.nodes, s.nodes = s.nodes, n.nodes - return n -} - -func (s *Space) nodeFor(name string) *node { - s.mtx.Lock() - defer s.mtx.Unlock() - if s.nodes == nil { - s.nodes = map[string]*node{} - } - n, ok := s.nodes[name] - if !ok { - n = &node{} - s.nodes[name] = n - } - return n -} - -// node exists at a specific point in the N-dimensional vector space of all -// possible label values. The node collects observations and has child nodes -// with greater specificity. -type node struct { - mtx sync.RWMutex - observations []float64 - children map[pair]*node -} - -type pair struct{ label, value string } - -func (n *node) observe(lvs LabelValues, value float64) { - n.mtx.Lock() - defer n.mtx.Unlock() - if len(lvs) == 0 { - n.observations = append(n.observations, value) - return - } - if len(lvs) < 2 { - panic("too few LabelValues; programmer error!") - } - head, tail := pair{lvs[0], lvs[1]}, lvs[2:] - if n.children == nil { - n.children = map[pair]*node{} - } - child, ok := n.children[head] - if !ok { - child = &node{} - n.children[head] = child - } - child.observe(tail, value) -} - -func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { - n.mtx.RLock() - defer n.mtx.RUnlock() - if len(n.observations) > 0 && !fn(lvs, n.observations) { - return false - } - for p, child := range n.children { - if !child.walk(append(lvs, p.label, p.value), fn) { - return false - } - } - return true -} diff --git a/metrics3/internal/lv/space_test.go b/metrics3/internal/lv/space_test.go deleted file mode 100644 index 0ef5517..0000000 --- a/metrics3/internal/lv/space_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package lv - -import ( - "strings" - "testing" -) - -func TestSpaceWalkAbort(t *testing.T) { - s := NewSpace() - s.Observe("a", LabelValues{"a", "b"}, 1) - s.Observe("a", LabelValues{"c", "d"}, 2) - s.Observe("a", LabelValues{"e", "f"}, 4) - s.Observe("a", LabelValues{"g", "h"}, 8) - s.Observe("b", LabelValues{"a", "b"}, 16) - s.Observe("b", LabelValues{"c", "d"}, 32) - s.Observe("b", LabelValues{"e", "f"}, 64) - s.Observe("b", LabelValues{"g", "h"}, 128) - - var count int - s.Walk(func(name string, lvs LabelValues, obs []float64) bool { - count++ - return false - }) - if want, have := 1, count; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func TestSpaceWalkSums(t *testing.T) { - s := NewSpace() - s.Observe("metric_one", LabelValues{}, 1) - s.Observe("metric_one", LabelValues{}, 2) - s.Observe("metric_one", LabelValues{"a", "1", "b", "2"}, 4) - s.Observe("metric_one", LabelValues{"a", "1", "b", "2"}, 8) - s.Observe("metric_one", LabelValues{}, 16) - s.Observe("metric_one", LabelValues{"a", "1", "b", "3"}, 32) - s.Observe("metric_two", LabelValues{}, 64) - s.Observe("metric_two", LabelValues{}, 128) - s.Observe("metric_two", LabelValues{"a", "1", "b", "2"}, 256) - - have := map[string]float64{} - s.Walk(func(name string, lvs LabelValues, obs []float64) bool { - //t.Logf("%s %v => %v", name, lvs, obs) - have[name+" ["+strings.Join(lvs, "")+"]"] += sum(obs) - return true - }) - - want := map[string]float64{ - "metric_one []": 1 + 2 + 16, - "metric_one [a1b2]": 4 + 8, - "metric_one [a1b3]": 32, - "metric_two []": 64 + 128, - "metric_two [a1b2]": 256, - } - for keystr, wantsum := range want { - if havesum := have[keystr]; wantsum != havesum { - t.Errorf("%q: want %.1f, have %.1f", keystr, wantsum, havesum) - } - delete(want, keystr) - delete(have, keystr) - } - for keystr, havesum := range have { - t.Errorf("%q: unexpected observations recorded: %.1f", keystr, havesum) - } -} - -func TestSpaceWalkSkipsEmptyDimensions(t *testing.T) { - s := NewSpace() - s.Observe("foo", LabelValues{"bar", "1", "baz", "2"}, 123) - - var count int - s.Walk(func(name string, lvs LabelValues, obs []float64) bool { - count++ - return true - }) - if want, have := 1, count; want != have { - t.Errorf("want %d, have %d", want, have) - } -} - -func sum(a []float64) (v float64) { - for _, f := range a { - v += f - } - return -} diff --git a/metrics3/internal/ratemap/ratemap.go b/metrics3/internal/ratemap/ratemap.go deleted file mode 100644 index a955c12..0000000 --- a/metrics3/internal/ratemap/ratemap.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package ratemap implements a goroutine-safe map of string to float64. It can -// be embedded in implementations whose metrics support fixed sample rates, so -// that an additional parameter doesn't have to be tracked through the e.g. -// lv.Space object. -package ratemap - -import "sync" - -// RateMap is a simple goroutine-safe map of string to float64. -type RateMap struct { - mtx sync.RWMutex - m map[string]float64 -} - -// New returns a new RateMap. -func New() *RateMap { - return &RateMap{ - m: map[string]float64{}, - } -} - -// Set writes the given name/rate pair to the map. -// Set is safe for concurrent access by multiple goroutines. -func (m *RateMap) Set(name string, rate float64) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.m[name] = rate -} - -// Get retrieves the rate for the given name, or 1.0 if none is set. -// Get is safe for concurrent access by multiple goroutines. -func (m *RateMap) Get(name string) float64 { - m.mtx.RLock() - defer m.mtx.RUnlock() - f, ok := m.m[name] - if !ok { - f = 1.0 - } - return f -} diff --git a/metrics3/metrics.go b/metrics3/metrics.go deleted file mode 100644 index d0ae30f..0000000 --- a/metrics3/metrics.go +++ /dev/null @@ -1,24 +0,0 @@ -package metrics - -// Counter describes a metric that accumulates values monotonically. -// An example of a counter is the number of received HTTP requests. -type Counter interface { - With(labelValues ...string) Counter - Add(delta float64) -} - -// Gauge describes a metric that takes specific values over time. -// An example of a gauge is the current depth of a job queue. -type Gauge interface { - With(labelValues ...string) Gauge - Set(value float64) -} - -// Histogram describes a metric that takes repeated observations of the same -// kind of thing, and produces a statistical summary of those observations, -// typically expressed as quantiles or buckets. An example of a histogram is HTTP -// request latencies. -type Histogram interface { - With(labelValues ...string) Histogram - Observe(value float64) -} diff --git a/metrics3/prometheus/prometheus.go b/metrics3/prometheus/prometheus.go deleted file mode 100644 index ab161eb..0000000 --- a/metrics3/prometheus/prometheus.go +++ /dev/null @@ -1,157 +0,0 @@ -// Package prometheus provides Prometheus implementations for metrics. -// Individual metrics are mapped to their Prometheus counterparts, and -// (depending on the constructor used) may be automatically registered in the -// global Prometheus metrics registry. -package prometheus - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/internal/lv" -) - -// Counter implements Counter, via a Prometheus CounterVec. -type Counter struct { - cv *prometheus.CounterVec - lvs lv.LabelValues -} - -// NewCounterFrom constructs and registers a Prometheus CounterVec, -// and returns a usable Counter object. -func NewCounterFrom(opts prometheus.CounterOpts, labelNames []string) *Counter { - cv := prometheus.NewCounterVec(opts, labelNames) - prometheus.MustRegister(cv) - return NewCounter(cv) -} - -// NewCounter wraps the CounterVec and returns a usable Counter object. -func NewCounter(cv *prometheus.CounterVec) *Counter { - return &Counter{ - cv: cv, - } -} - -// With implements Counter. -func (c *Counter) With(labelValues ...string) metrics.Counter { - return &Counter{ - cv: c.cv, - lvs: c.lvs.With(labelValues...), - } -} - -// Add implements Counter. -func (c *Counter) Add(delta float64) { - c.cv.WithLabelValues(c.lvs...).Add(delta) -} - -// Gauge implements Gauge, via a Prometheus GaugeVec. -type Gauge struct { - gv *prometheus.GaugeVec - lvs lv.LabelValues -} - -// NewGaugeFrom construts and registers a Prometheus GaugeVec, -// and returns a usable Gauge object. -func NewGaugeFrom(opts prometheus.GaugeOpts, labelNames []string) *Gauge { - gv := prometheus.NewGaugeVec(opts, labelNames) - prometheus.MustRegister(gv) - return NewGauge(gv) -} - -// NewGauge wraps the GaugeVec and returns a usable Gauge object. -func NewGauge(gv *prometheus.GaugeVec) *Gauge { - return &Gauge{ - gv: gv, - } -} - -// With implements Gauge. -func (g *Gauge) With(labelValues ...string) metrics.Gauge { - return &Gauge{ - gv: g.gv, - lvs: g.lvs.With(labelValues...), - } -} - -// Set implements Gauge. -func (g *Gauge) Set(value float64) { - g.gv.WithLabelValues(g.lvs...).Set(value) -} - -// Add is supported by Prometheus GaugeVecs. -func (g *Gauge) Add(delta float64) { - g.gv.WithLabelValues(g.lvs...).Add(delta) -} - -// Summary implements Histogram, via a Prometheus SummaryVec. The difference -// between a Summary and a Histogram is that Summaries don't require predefined -// quantile buckets, but cannot be statistically aggregated. -type Summary struct { - sv *prometheus.SummaryVec - lvs lv.LabelValues -} - -// NewSummaryFrom constructs and registers a Prometheus SummaryVec, -// and returns a usable Summary object. -func NewSummaryFrom(opts prometheus.SummaryOpts, labelNames []string) *Summary { - sv := prometheus.NewSummaryVec(opts, labelNames) - prometheus.MustRegister(sv) - return NewSummary(sv) -} - -// NewSummary wraps the SummaryVec and returns a usable Summary object. -func NewSummary(sv *prometheus.SummaryVec) *Summary { - return &Summary{ - sv: sv, - } -} - -// With implements Histogram. -func (s *Summary) With(labelValues ...string) metrics.Histogram { - return &Summary{ - sv: s.sv, - lvs: s.lvs.With(labelValues...), - } -} - -// Observe implements Histogram. -func (s *Summary) Observe(value float64) { - s.sv.WithLabelValues(s.lvs...).Observe(value) -} - -// Histogram implements Histogram via a Prometheus HistogramVec. The difference -// between a Histogram and a Summary is that Histograms require predefined -// quantile buckets, and can be statistically aggregated. -type Histogram struct { - hv *prometheus.HistogramVec - lvs lv.LabelValues -} - -// NewHistogramFrom constructs and registers a Prometheus HistogramVec, -// and returns a usable Histogram object. -func NewHistogramFrom(opts prometheus.HistogramOpts, labelNames []string) *Histogram { - hv := prometheus.NewHistogramVec(opts, labelNames) - prometheus.MustRegister(hv) - return NewHistogram(hv) -} - -// NewHistogram wraps the HistogramVec and returns a usable Histogram object. -func NewHistogram(hv *prometheus.HistogramVec) *Histogram { - return &Histogram{ - hv: hv, - } -} - -// With implements Histogram. -func (h *Histogram) With(labelValues ...string) metrics.Histogram { - return &Histogram{ - hv: h.hv, - lvs: h.lvs.With(labelValues...), - } -} - -// Observe implements Histogram. -func (h *Histogram) Observe(value float64) { - h.hv.WithLabelValues(h.lvs...).Observe(value) -} diff --git a/metrics3/prometheus/prometheus_test.go b/metrics3/prometheus/prometheus_test.go deleted file mode 100644 index d8d24d3..0000000 --- a/metrics3/prometheus/prometheus_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package prometheus - -import ( - "io/ioutil" - "math" - "math/rand" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "strings" - "testing" - - "github.com/go-kit/kit/metrics3/teststat" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -func TestCounter(t *testing.T) { - s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) - defer s.Close() - - scrape := func() string { - resp, _ := http.Get(s.URL) - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := "ns", "ss", "foo" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - - counter := NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: "This is the help string.", - }, []string{}) - - value := func() float64 { - matches := re.FindStringSubmatch(scrape()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - if err := teststat.TestCounter(counter, value); err != nil { - t.Fatal(err) - } -} - -func TestGauge(t *testing.T) { - s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) - defer s.Close() - - scrape := func() string { - resp, _ := http.Get(s.URL) - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := "aaa", "bbb", "ccc" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - - gauge := NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: "This is a different help string.", - }, []string{}) - - value := func() float64 { - matches := re.FindStringSubmatch(scrape()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - if err := teststat.TestGauge(gauge, value); err != nil { - t.Fatal(err) - } -} - -func TestSummary(t *testing.T) { - s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) - defer s.Close() - - scrape := func() string { - resp, _ := http.Get(s.URL) - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := "test", "prometheus", "summary" - re50 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{quantile="0.5"} ([0-9\.]+)`) - re90 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{quantile="0.9"} ([0-9\.]+)`) - re99 := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `{quantile="0.99"} ([0-9\.]+)`) - - summary := NewSummaryFrom(stdprometheus.SummaryOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: "This is the help string for the summary.", - }, []string{}) - - quantiles := func() (float64, float64, float64, float64) { - buf := scrape() - match50 := re50.FindStringSubmatch(buf) - p50, _ := strconv.ParseFloat(match50[1], 64) - match90 := re90.FindStringSubmatch(buf) - p90, _ := strconv.ParseFloat(match90[1], 64) - match99 := re99.FindStringSubmatch(buf) - p99, _ := strconv.ParseFloat(match99[1], 64) - p95 := p90 + ((p99 - p90) / 2) // Prometheus, y u no p95??? :< #yolo - return p50, p90, p95, p99 - } - - if err := teststat.TestHistogram(summary, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} - -func TestHistogram(t *testing.T) { - // Prometheus reports histograms as a count of observations that fell into - // each predefined bucket, with the bucket value representing a global upper - // limit. That is, the count monotonically increases over the buckets. This - // requires a different strategy to test. - - s := httptest.NewServer(stdprometheus.UninstrumentedHandler()) - defer s.Close() - - scrape := func() string { - resp, _ := http.Get(s.URL) - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := "test", "prometheus", "histogram" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + `_bucket{le="([0-9]+|\+Inf)"} ([0-9\.]+)`) - - numStdev := 3 - bucketMin := (teststat.Mean - (numStdev * teststat.Stdev)) - bucketMax := (teststat.Mean + (numStdev * teststat.Stdev)) - if bucketMin < 0 { - bucketMin = 0 - } - bucketCount := 10 - bucketDelta := (bucketMax - bucketMin) / bucketCount - buckets := []float64{} - for i := bucketMin; i <= bucketMax; i += bucketDelta { - buckets = append(buckets, float64(i)) - } - - histogram := NewHistogramFrom(stdprometheus.HistogramOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: "This is the help string for the histogram.", - Buckets: buckets, - }, []string{}) - - // Can't TestHistogram, because Prometheus Histograms don't dynamically - // compute quantiles. Instead, they fill up buckets. So, let's populate the - // histogram kind of manually. - teststat.PopulateNormalHistogram(histogram, rand.Int()) - - // Then, we use ExpectedObservationsLessThan to validate. - for _, line := range strings.Split(scrape(), "\n") { - match := re.FindStringSubmatch(line) - if match == nil { - continue - } - - bucket, _ := strconv.ParseInt(match[1], 10, 64) - have, _ := strconv.ParseInt(match[2], 10, 64) - - want := teststat.ExpectedObservationsLessThan(bucket) - if match[1] == "+Inf" { - want = int64(teststat.Count) // special case - } - - // Unfortunately, we observe experimentally that Prometheus is quite - // imprecise at the extremes. I'm setting a very high tolerance for now. - // It would be great to dig in and figure out whether that's a problem - // with my Expected calculation, or in Prometheus. - tolerance := 0.25 - if delta := math.Abs(float64(want) - float64(have)); (delta / float64(want)) > tolerance { - t.Errorf("Bucket %d: want %d, have %d (%.1f%%)", bucket, want, have, (100.0 * delta / float64(want))) - } - } -} - -func TestWith(t *testing.T) { - t.Skip("TODO") -} diff --git a/metrics3/provider/circonus.go b/metrics3/provider/circonus.go deleted file mode 100644 index 54dd3fc..0000000 --- a/metrics3/provider/circonus.go +++ /dev/null @@ -1,36 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/circonus" -) - -type circonusProvider struct { - c *circonus.Circonus -} - -// NewCirconusProvider takes the given Circonnus object and returns a Provider -// that produces Circonus metrics. -func NewCirconusProvider(c *circonus.Circonus) Provider { - return &circonusProvider{ - c: c, - } -} - -// NewCounter implements Provider. -func (p *circonusProvider) NewCounter(name string) metrics.Counter { - return p.c.NewCounter(name) -} - -// NewGauge implements Provider. -func (p *circonusProvider) NewGauge(name string) metrics.Gauge { - return p.c.NewGauge(name) -} - -// NewHistogram implements Provider. The buckets parameter is ignored. -func (p *circonusProvider) NewHistogram(name string, _ int) metrics.Histogram { - return p.c.NewHistogram(name) -} - -// Stop implements Provider, but is a no-op. -func (p *circonusProvider) Stop() {} diff --git a/metrics3/provider/discard.go b/metrics3/provider/discard.go deleted file mode 100644 index 82de282..0000000 --- a/metrics3/provider/discard.go +++ /dev/null @@ -1,24 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/discard" -) - -type discardProvider struct{} - -// NewDiscardProvider returns a provider that produces no-op metrics via the -// discarding backend. -func NewDiscardProvider() Provider { return discardProvider{} } - -// NewCounter implements Provider. -func (discardProvider) NewCounter(string) metrics.Counter { return discard.NewCounter() } - -// NewGauge implements Provider. -func (discardProvider) NewGauge(string) metrics.Gauge { return discard.NewGauge() } - -// NewHistogram implements Provider. -func (discardProvider) NewHistogram(string, int) metrics.Histogram { return discard.NewHistogram() } - -// Stop implements Provider. -func (discardProvider) Stop() {} diff --git a/metrics3/provider/dogstatsd.go b/metrics3/provider/dogstatsd.go deleted file mode 100644 index 076abf9..0000000 --- a/metrics3/provider/dogstatsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/dogstatsd" -) - -type dogstatsdProvider struct { - d *dogstatsd.Dogstatsd - stop func() -} - -// NewDogstatsdProvider wraps the given Dogstatsd object and stop func and -// returns a Provider that produces Dogstatsd metrics. A typical stop function -// would be ticker.Stop from the ticker passed to the SendLoop helper method. -func NewDogstatsdProvider(d *dogstatsd.Dogstatsd, stop func()) Provider { - return &dogstatsdProvider{ - d: d, - stop: stop, - } -} - -// NewCounter implements Provider, returning a new Dogstatsd Counter with a -// sample rate of 1.0. -func (p *dogstatsdProvider) NewCounter(name string) metrics.Counter { - return p.d.NewCounter(name, 1.0) -} - -// NewGauge implements Provider. -func (p *dogstatsdProvider) NewGauge(name string) metrics.Gauge { - return p.d.NewGauge(name) -} - -// NewHistogram implements Provider, returning a new Dogstatsd Histogram (note: -// not a Timing) with a sample rate of 1.0. The buckets argument is ignored. -func (p *dogstatsdProvider) NewHistogram(name string, _ int) metrics.Histogram { - return p.d.NewHistogram(name, 1.0) -} - -// Stop implements Provider, invoking the stop function passed at construction. -func (p *dogstatsdProvider) Stop() { - p.stop() -} diff --git a/metrics3/provider/expvar.go b/metrics3/provider/expvar.go deleted file mode 100644 index 57a90cf..0000000 --- a/metrics3/provider/expvar.go +++ /dev/null @@ -1,31 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/expvar" -) - -type expvarProvider struct{} - -// NewExpvarProvider returns a Provider that produces expvar metrics. -func NewExpvarProvider() Provider { - return expvarProvider{} -} - -// NewCounter implements Provider. -func (p expvarProvider) NewCounter(name string) metrics.Counter { - return expvar.NewCounter(name) -} - -// NewGauge implements Provider. -func (p expvarProvider) NewGauge(name string) metrics.Gauge { - return expvar.NewGauge(name) -} - -// NewHistogram implements Provider. -func (p expvarProvider) NewHistogram(name string, buckets int) metrics.Histogram { - return expvar.NewHistogram(name, buckets) -} - -// Stop implements Provider, but is a no-op. -func (p expvarProvider) Stop() {} diff --git a/metrics3/provider/graphite.go b/metrics3/provider/graphite.go deleted file mode 100644 index 68696f2..0000000 --- a/metrics3/provider/graphite.go +++ /dev/null @@ -1,41 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/graphite" -) - -type graphiteProvider struct { - g *graphite.Graphite - stop func() -} - -// NewGraphiteProvider wraps the given Graphite object and stop func and returns -// a Provider that produces Graphite metrics. A typical stop function would be -// ticker.Stop from the ticker passed to the SendLoop helper method. -func NewGraphiteProvider(g *graphite.Graphite, stop func()) Provider { - return &graphiteProvider{ - g: g, - stop: stop, - } -} - -// NewCounter implements Provider. -func (p *graphiteProvider) NewCounter(name string) metrics.Counter { - return p.g.NewCounter(name) -} - -// NewGauge implements Provider. -func (p *graphiteProvider) NewGauge(name string) metrics.Gauge { - return p.g.NewGauge(name) -} - -// NewHistogram implements Provider. -func (p *graphiteProvider) NewHistogram(name string, buckets int) metrics.Histogram { - return p.g.NewHistogram(name, buckets) -} - -// Stop implements Provider, invoking the stop function passed at construction. -func (p *graphiteProvider) Stop() { - p.stop() -} diff --git a/metrics3/provider/influx.go b/metrics3/provider/influx.go deleted file mode 100644 index d57f4ee..0000000 --- a/metrics3/provider/influx.go +++ /dev/null @@ -1,40 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/influx" -) - -type influxProvider struct { - in *influx.Influx - stop func() -} - -// NewInfluxProvider takes the given Influx object and stop func, and returns -// a Provider that produces Influx metrics. -func NewInfluxProvider(in *influx.Influx, stop func()) Provider { - return &influxProvider{ - in: in, - stop: stop, - } -} - -// NewCounter implements Provider. Per-metric tags are not supported. -func (p *influxProvider) NewCounter(name string) metrics.Counter { - return p.in.NewCounter(name) -} - -// NewGauge implements Provider. Per-metric tags are not supported. -func (p *influxProvider) NewGauge(name string) metrics.Gauge { - return p.in.NewGauge(name) -} - -// NewHistogram implements Provider. Per-metric tags are not supported. -func (p *influxProvider) NewHistogram(name string, buckets int) metrics.Histogram { - return p.in.NewHistogram(name) -} - -// Stop implements Provider, invoking the stop function passed at construction. -func (p *influxProvider) Stop() { - p.stop() -} diff --git a/metrics3/provider/prometheus.go b/metrics3/provider/prometheus.go deleted file mode 100644 index e3c1189..0000000 --- a/metrics3/provider/prometheus.go +++ /dev/null @@ -1,63 +0,0 @@ -package provider - -import ( - stdprometheus "github.com/prometheus/client_golang/prometheus" - - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/prometheus" -) - -type prometheusProvider struct { - namespace string - subsystem string -} - -// NewPrometheusProvider returns a Provider that produces Prometheus metrics. -// Namespace and subsystem are applied to all produced metrics. -func NewPrometheusProvider(namespace, subsystem string) Provider { - return &prometheusProvider{ - namespace: namespace, - subsystem: subsystem, - } -} - -// NewCounter implements Provider via prometheus.NewCounterFrom, i.e. the -// counter is registered. The metric's namespace and subsystem are taken from -// the Provider. Help is set to the name of the metric, and no const label names -// are set. -func (p *prometheusProvider) NewCounter(name string) metrics.Counter { - return prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: name, - }, []string{}) -} - -// NewGauge implements Provider via prometheus.NewGaugeFrom, i.e. the gauge is -// registered. The metric's namespace and subsystem are taken from the Provider. -// Help is set to the name of the metric, and no const label names are set. -func (p *prometheusProvider) NewGauge(name string) metrics.Gauge { - return prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: name, - }, []string{}) -} - -// NewGauge implements Provider via prometheus.NewSummaryFrom, i.e. the summary -// is registered. The metric's namespace and subsystem are taken from the -// Provider. Help is set to the name of the metric, and no const label names are -// set. Buckets are ignored. -func (p *prometheusProvider) NewHistogram(name string, _ int) metrics.Histogram { - return prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ - Namespace: p.namespace, - Subsystem: p.subsystem, - Name: name, - Help: name, - }, []string{}) -} - -// Stop implements Provider, but is a no-op. -func (p *prometheusProvider) Stop() {} diff --git a/metrics3/provider/provider.go b/metrics3/provider/provider.go deleted file mode 100644 index 174f237..0000000 --- a/metrics3/provider/provider.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package provider provides a factory-like abstraction for metrics backends. -// This package is provided specifically for the needs of the NY Times framework -// Gizmo. Most normal Go kit users shouldn't need to use it. -// -// Normally, if your microservice needs to support different metrics backends, -// you can simply do different construction based on a flag. For example, -// -// var latency metrics.Histogram -// var requests metrics.Counter -// switch *metricsBackend { -// case "prometheus": -// latency = prometheus.NewSummaryVec(...) -// requests = prometheus.NewCounterVec(...) -// case "statsd": -// s := statsd.New(...) -// t := time.NewTicker(5*time.Second) -// go s.SendLoop(t.C, "tcp", "statsd.local:8125") -// latency = s.NewHistogram(...) -// requests = s.NewCounter(...) -// default: -// log.Fatal("unsupported metrics backend %q", *metricsBackend) -// } -// -package provider - -import ( - "github.com/go-kit/kit/metrics3" -) - -// Provider abstracts over constructors and lifecycle management functions for -// each supported metrics backend. It should only be used by those who need to -// swap out implementations dynamically. -// -// This is primarily useful for intermediating frameworks, and is likely -// unnecessary for most Go kit services. See the package-level doc comment for -// more typical usage instructions. -type Provider interface { - NewCounter(name string) metrics.Counter - NewGauge(name string) metrics.Gauge - NewHistogram(name string, buckets int) metrics.Histogram - Stop() -} diff --git a/metrics3/provider/statsd.go b/metrics3/provider/statsd.go deleted file mode 100644 index 2dc1b8c..0000000 --- a/metrics3/provider/statsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package provider - -import ( - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/statsd" -) - -type statsdProvider struct { - s *statsd.Statsd - stop func() -} - -// NewStatsdProvider wraps the given Statsd object and stop func and returns a -// Provider that produces Statsd metrics. A typical stop function would be -// ticker.Stop from the ticker passed to the SendLoop helper method. -func NewStatsdProvider(s *statsd.Statsd, stop func()) Provider { - return &statsdProvider{ - s: s, - stop: stop, - } -} - -// NewCounter implements Provider. -func (p *statsdProvider) NewCounter(name string) metrics.Counter { - return p.s.NewCounter(name, 1.0) -} - -// NewGauge implements Provider. -func (p *statsdProvider) NewGauge(name string) metrics.Gauge { - return p.s.NewGauge(name) -} - -// NewHistogram implements Provider, returning a StatsD Timing that accepts -// observations in milliseconds. The sample rate is fixed at 1.0. The bucket -// parameter is ignored. -func (p *statsdProvider) NewHistogram(name string, _ int) metrics.Histogram { - return p.s.NewTiming(name, 1.0) -} - -// Stop implements Provider, invoking the stop function passed at construction. -func (p *statsdProvider) Stop() { - p.stop() -} diff --git a/metrics3/statsd/statsd.go b/metrics3/statsd/statsd.go deleted file mode 100644 index 8a35c6d..0000000 --- a/metrics3/statsd/statsd.go +++ /dev/null @@ -1,232 +0,0 @@ -// Package statsd provides a StatsD backend for package metrics. StatsD has no -// concept of arbitrary key-value tagging, so label values are not supported, -// and With is a no-op on all metrics. -// -// This package batches observations and emits them on some schedule to the -// remote server. This is useful even if you connect to your StatsD server over -// UDP. Emitting one network packet per observation can quickly overwhelm even -// the fastest internal network. -package statsd - -import ( - "fmt" - "io" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3" - "github.com/go-kit/kit/metrics3/internal/lv" - "github.com/go-kit/kit/metrics3/internal/ratemap" - "github.com/go-kit/kit/util/conn" -) - -// Statsd receives metrics observations and forwards them to a StatsD server. -// Create a Statsd object, use it to create metrics, and pass those metrics as -// dependencies to the components that will use them. -// -// All metrics are buffered until WriteTo is called. Counters and gauges are -// aggregated into a single observation per timeseries per write. Timings are -// buffered but not aggregated. -// -// To regularly report metrics to an io.Writer, use the WriteLoop helper method. -// To send to a StatsD server, use the SendLoop helper method. -type Statsd struct { - prefix string - rates *ratemap.RateMap - - // The observations are collected in an N-dimensional vector space, even - // though they only take advantage of a single dimension (name). This is an - // implementation detail born purely from convenience. It would be more - // accurate to collect them in a map[string][]float64, but we already have - // this nice data structure and helper methods. - counters *lv.Space - gauges *lv.Space - timings *lv.Space - - logger log.Logger -} - -// New returns a Statsd object that may be used to create metrics. Prefix is -// applied to all created metrics. Callers must ensure that regular calls to -// WriteTo are performed, either manually or with one of the helper methods. -func New(prefix string, logger log.Logger) *Statsd { - return &Statsd{ - prefix: prefix, - rates: ratemap.New(), - counters: lv.NewSpace(), - gauges: lv.NewSpace(), - timings: lv.NewSpace(), - logger: logger, - } -} - -// NewCounter returns a counter, sending observations to this Statsd object. -func (s *Statsd) NewCounter(name string, sampleRate float64) *Counter { - s.rates.Set(s.prefix+name, sampleRate) - return &Counter{ - name: s.prefix + name, - obs: s.counters.Observe, - } -} - -// NewGauge returns a gauge, sending observations to this Statsd object. -func (s *Statsd) NewGauge(name string) *Gauge { - return &Gauge{ - name: s.prefix + name, - obs: s.gauges.Observe, - } -} - -// NewTiming returns a histogram whose observations are interpreted as -// millisecond durations, and are forwarded to this Statsd object. -func (s *Statsd) NewTiming(name string, sampleRate float64) *Timing { - s.rates.Set(s.prefix+name, sampleRate) - return &Timing{ - name: s.prefix + name, - obs: s.timings.Observe, - } -} - -// WriteLoop is a helper method that invokes WriteTo to the passed writer every -// time the passed channel fires. This method blocks until the channel is -// closed, so clients probably want to run it in its own goroutine. For typical -// usage, create a time.Ticker and pass its C channel to this method. -func (s *Statsd) WriteLoop(c <-chan time.Time, w io.Writer) { - for range c { - if _, err := s.WriteTo(w); err != nil { - s.logger.Log("during", "WriteTo", "err", err) - } - } -} - -// SendLoop is a helper method that wraps WriteLoop, passing a managed -// connection to the network and address. Like WriteLoop, this method blocks -// until the channel is closed, so clients probably want to start it in its own -// goroutine. For typical usage, create a time.Ticker and pass its C channel to -// this method. -func (s *Statsd) SendLoop(c <-chan time.Time, network, address string) { - s.WriteLoop(c, conn.NewDefaultManager(network, address, s.logger)) -} - -// WriteTo flushes the buffered content of the metrics to the writer, in -// StatsD format. WriteTo abides best-effort semantics, so observations are -// lost if there is a problem with the write. Clients should be sure to call -// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods. -func (s *Statsd) WriteTo(w io.Writer) (count int64, err error) { - var n int - - s.counters.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool { - n, err = fmt.Fprintf(w, "%s:%f|c%s\n", name, sum(values), sampling(s.rates.Get(name))) - if err != nil { - return false - } - count += int64(n) - return true - }) - if err != nil { - return count, err - } - - s.gauges.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool { - n, err = fmt.Fprintf(w, "%s:%f|g\n", name, last(values)) - if err != nil { - return false - } - count += int64(n) - return true - }) - if err != nil { - return count, err - } - - s.timings.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool { - sampleRate := s.rates.Get(name) - for _, value := range values { - n, err = fmt.Fprintf(w, "%s:%f|ms%s\n", name, value, sampling(sampleRate)) - if err != nil { - return false - } - count += int64(n) - } - return true - }) - if err != nil { - return count, err - } - - return count, err -} - -func sum(a []float64) float64 { - var v float64 - for _, f := range a { - v += f - } - return v -} - -func last(a []float64) float64 { - return a[len(a)-1] -} - -func sampling(r float64) string { - var sv string - if r < 1.0 { - sv = fmt.Sprintf("|@%f", r) - } - return sv -} - -type observeFunc func(name string, lvs lv.LabelValues, value float64) - -// Counter is a StatsD counter. Observations are forwarded to a Statsd object, -// and aggregated (summed) per timeseries. -type Counter struct { - name string - obs observeFunc -} - -// With is a no-op. -func (c *Counter) With(...string) metrics.Counter { - return c -} - -// Add implements metrics.Counter. -func (c *Counter) Add(delta float64) { - c.obs(c.name, lv.LabelValues{}, delta) -} - -// Gauge is a StatsD gauge. Observations are forwarded to a Statsd object, and -// aggregated (the last observation selected) per timeseries. -type Gauge struct { - name string - obs observeFunc -} - -// With is a no-op. -func (g *Gauge) With(...string) metrics.Gauge { - return g -} - -// Set implements metrics.Gauge. -func (g *Gauge) Set(value float64) { - g.obs(g.name, lv.LabelValues{}, value) -} - -// Timing is a StatsD timing, or metrics.Histogram. Observations are -// forwarded to a Statsd object, and collected (but not aggregated) per -// timeseries. -type Timing struct { - name string - obs observeFunc -} - -// With is a no-op. -func (t *Timing) With(...string) metrics.Histogram { - return t -} - -// Observe implements metrics.Histogram. Value is interpreted as milliseconds. -func (t *Timing) Observe(value float64) { - t.obs(t.name, lv.LabelValues{}, value) -} diff --git a/metrics3/statsd/statsd_test.go b/metrics3/statsd/statsd_test.go deleted file mode 100644 index a09eccd..0000000 --- a/metrics3/statsd/statsd_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package statsd - -import ( - "testing" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/metrics3/teststat" -) - -func TestCounter(t *testing.T) { - prefix, name := "abc.", "def" - label, value := "label", "value" // ignored - regex := `^` + prefix + name + `:([0-9\.]+)\|c$` - s := New(prefix, log.NewNopLogger()) - counter := s.NewCounter(name, 1.0).With(label, value) - valuef := teststat.SumLines(s, regex) - if err := teststat.TestCounter(counter, valuef); err != nil { - t.Fatal(err) - } -} - -func TestCounterSampled(t *testing.T) { - // This will involve multiplying the observed sum by the inverse of the - // sample rate and checking against the expected value within some - // tolerance. - t.Skip("TODO") -} - -func TestGauge(t *testing.T) { - prefix, name := "ghi.", "jkl" - label, value := "xyz", "abc" // ignored - regex := `^` + prefix + name + `:([0-9\.]+)\|g$` - s := New(prefix, log.NewNopLogger()) - gauge := s.NewGauge(name).With(label, value) - valuef := teststat.LastLine(s, regex) - if err := teststat.TestGauge(gauge, valuef); err != nil { - t.Fatal(err) - } -} - -// StatsD timings just emit all observations. So, we collect them into a generic -// histogram, and run the statistics test on that. - -func TestTiming(t *testing.T) { - prefix, name := "statsd.", "timing_test" - label, value := "abc", "def" // ignored - regex := `^` + prefix + name + `:([0-9\.]+)\|ms$` - s := New(prefix, log.NewNopLogger()) - timing := s.NewTiming(name, 1.0).With(label, value) - quantiles := teststat.Quantiles(s, regex, 50) // no |@0.X - if err := teststat.TestHistogram(timing, quantiles, 0.01); err != nil { - t.Fatal(err) - } -} - -func TestTimingSampled(t *testing.T) { - prefix, name := "statsd.", "sampled_timing_test" - label, value := "foo", "bar" // ignored - regex := `^` + prefix + name + `:([0-9\.]+)\|ms\|@0\.01[0]*$` - s := New(prefix, log.NewNopLogger()) - timing := s.NewTiming(name, 0.01).With(label, value) - quantiles := teststat.Quantiles(s, regex, 50) - if err := teststat.TestHistogram(timing, quantiles, 0.02); err != nil { - t.Fatal(err) - } -} diff --git a/metrics3/teststat/buffers.go b/metrics3/teststat/buffers.go deleted file mode 100644 index 6021780..0000000 --- a/metrics3/teststat/buffers.go +++ /dev/null @@ -1,65 +0,0 @@ -package teststat - -import ( - "bufio" - "bytes" - "io" - "regexp" - "strconv" - - "github.com/go-kit/kit/metrics3/generic" -) - -// SumLines expects a regex whose first capture group can be parsed as a -// float64. It will dump the WriterTo and parse each line, expecting to find a -// match. It returns the sum of all captured floats. -func SumLines(w io.WriterTo, regex string) func() float64 { - return func() float64 { - sum, _ := stats(w, regex, nil) - return sum - } -} - -// LastLine expects a regex whose first capture group can be parsed as a -// float64. It will dump the WriterTo and parse each line, expecting to find a -// match. It returns the final captured float. -func LastLine(w io.WriterTo, regex string) func() float64 { - return func() float64 { - _, final := stats(w, regex, nil) - return final - } -} - -// Quantiles expects a regex whose first capture group can be parsed as a -// float64. It will dump the WriterTo and parse each line, expecting to find a -// match. It observes all captured floats into a generic.Histogram with the -// given number of buckets, and returns the 50th, 90th, 95th, and 99th quantiles -// from that histogram. -func Quantiles(w io.WriterTo, regex string, buckets int) func() (float64, float64, float64, float64) { - return func() (float64, float64, float64, float64) { - h := generic.NewHistogram("quantile-test", buckets) - stats(w, regex, h) - return h.Quantile(0.50), h.Quantile(0.90), h.Quantile(0.95), h.Quantile(0.99) - } -} - -func stats(w io.WriterTo, regex string, h *generic.Histogram) (sum, final float64) { - re := regexp.MustCompile(regex) - buf := &bytes.Buffer{} - w.WriteTo(buf) - //fmt.Fprintf(os.Stderr, "%s\n", buf.String()) - s := bufio.NewScanner(buf) - for s.Scan() { - match := re.FindStringSubmatch(s.Text()) - f, err := strconv.ParseFloat(match[1], 64) - if err != nil { - panic(err) - } - sum += f - final = f - if h != nil { - h.Observe(f) - } - } - return sum, final -} diff --git a/metrics3/teststat/populate.go b/metrics3/teststat/populate.go deleted file mode 100644 index 64be756..0000000 --- a/metrics3/teststat/populate.go +++ /dev/null @@ -1,72 +0,0 @@ -package teststat - -import ( - "math" - "math/rand" - - "github.com/go-kit/kit/metrics3" -) - -// PopulateNormalHistogram makes a series of normal random observations into the -// histogram. The number of observations is determined by Count. The randomness -// is determined by Mean, Stdev, and the seed parameter. -// -// This is a low-level function, exported only for metrics that don't perform -// dynamic quantile computation, like a Prometheus Histogram (c.f. Summary). In -// most cases, you don't need to use this function, and can use TestHistogram -// instead. -func PopulateNormalHistogram(h metrics.Histogram, seed int) { - r := rand.New(rand.NewSource(int64(seed))) - for i := 0; i < Count; i++ { - sample := r.NormFloat64()*float64(Stdev) + float64(Mean) - if sample < 0 { - sample = 0 - } - h.Observe(sample) - } -} - -func normalQuantiles() (p50, p90, p95, p99 float64) { - return nvq(50), nvq(90), nvq(95), nvq(99) -} - -func nvq(quantile int) float64 { - // https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function - return float64(Mean) + float64(Stdev)*math.Sqrt2*erfinv(2*(float64(quantile)/100)-1) -} - -func erfinv(y float64) float64 { - // https://stackoverflow.com/questions/5971830/need-code-for-inverse-error-function - if y < -1.0 || y > 1.0 { - panic("invalid input") - } - - var ( - a = [4]float64{0.886226899, -1.645349621, 0.914624893, -0.140543331} - b = [4]float64{-2.118377725, 1.442710462, -0.329097515, 0.012229801} - c = [4]float64{-1.970840454, -1.624906493, 3.429567803, 1.641345311} - d = [2]float64{3.543889200, 1.637067800} - ) - - const y0 = 0.7 - var x, z float64 - - if math.Abs(y) == 1.0 { - x = -y * math.Log(0.0) - } else if y < -y0 { - z = math.Sqrt(-math.Log((1.0 + y) / 2.0)) - x = -(((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } else { - if y < y0 { - z = y * y - x = y * (((a[3]*z+a[2])*z+a[1])*z + a[0]) / ((((b[3]*z+b[3])*z+b[1])*z+b[0])*z + 1.0) - } else { - z = math.Sqrt(-math.Log((1.0 - y) / 2.0)) - x = (((c[3]*z+c[2])*z+c[1])*z + c[0]) / ((d[1]*z+d[0])*z + 1.0) - } - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - x = x - (math.Erf(x)-y)/(2.0/math.SqrtPi*math.Exp(-x*x)) - } - - return x -} diff --git a/metrics3/teststat/teststat.go b/metrics3/teststat/teststat.go deleted file mode 100644 index b303c40..0000000 --- a/metrics3/teststat/teststat.go +++ /dev/null @@ -1,103 +0,0 @@ -// Package teststat provides helpers for testing metrics backends. -package teststat - -import ( - "errors" - "fmt" - "math" - "math/rand" - "strings" - - "github.com/go-kit/kit/metrics3" -) - -// TestCounter puts some deltas through the counter, and then calls the value -// func to check that the counter has the correct final value. -func TestCounter(counter metrics.Counter, value func() float64) error { - a := rand.Perm(100) - n := rand.Intn(len(a)) - - var want float64 - for i := 0; i < n; i++ { - f := float64(a[i]) - counter.Add(f) - want += f - } - - if have := value(); want != have { - return fmt.Errorf("want %f, have %f", want, have) - } - - return nil -} - -// TestGauge puts some values through the gauge, and then calls the value func -// to check that the gauge has the correct final value. -func TestGauge(gauge metrics.Gauge, value func() float64) error { - a := rand.Perm(100) - n := rand.Intn(len(a)) - - var want float64 - for i := 0; i < n; i++ { - f := float64(a[i]) - gauge.Set(f) - want = f - } - - if have := value(); want != have { - return fmt.Errorf("want %f, have %f", want, have) - } - - return nil -} - -// TestHistogram puts some observations through the histogram, and then calls -// the quantiles func to checks that the histogram has computed the correct -// quantiles within some tolerance -func TestHistogram(histogram metrics.Histogram, quantiles func() (p50, p90, p95, p99 float64), tolerance float64) error { - PopulateNormalHistogram(histogram, rand.Int()) - - want50, want90, want95, want99 := normalQuantiles() - have50, have90, have95, have99 := quantiles() - - var errs []string - if want, have := want50, have50; !cmp(want, have, tolerance) { - errs = append(errs, fmt.Sprintf("p50: want %f, have %f", want, have)) - } - if want, have := want90, have90; !cmp(want, have, tolerance) { - errs = append(errs, fmt.Sprintf("p90: want %f, have %f", want, have)) - } - if want, have := want95, have95; !cmp(want, have, tolerance) { - errs = append(errs, fmt.Sprintf("p95: want %f, have %f", want, have)) - } - if want, have := want99, have99; !cmp(want, have, tolerance) { - errs = append(errs, fmt.Sprintf("p99: want %f, have %f", want, have)) - } - if len(errs) > 0 { - return errors.New(strings.Join(errs, "; ")) - } - - return nil -} - -var ( - Count = 12345 - Mean = 500 - Stdev = 25 -) - -// ExpectedObservationsLessThan returns the number of observations that should -// have a value less than or equal to the given value, given a normal -// distribution of observations described by Count, Mean, and Stdev. -func ExpectedObservationsLessThan(bucket int64) int64 { - // https://code.google.com/p/gostat/source/browse/stat/normal.go - cdf := ((1.0 / 2.0) * (1 + math.Erf((float64(bucket)-float64(Mean))/(float64(Stdev)*math.Sqrt2)))) - return int64(cdf * float64(Count)) -} - -func cmp(want, have, tol float64) bool { - if (math.Abs(want-have) / want) > tol { - return false - } - return true -}