From 9314b5b8a0a7c268e2200b8d3b828d22426fb3ca Mon Sep 17 00:00:00 2001 From: Philip Griesbacher Date: Wed, 6 Sep 2017 15:32:04 +0200 Subject: [PATCH] added deps --- .../prometheus/client_golang/.gitignore | 26 + .../prometheus/client_golang/.travis.yml | 10 + .../prometheus/client_golang/CHANGELOG.md | 109 +++ .../prometheus/client_golang/CONTRIBUTING.md | 18 + .../prometheus/client_golang/LICENSE | 201 +++++ .../prometheus/client_golang/MAINTAINERS.md | 1 + .../prometheus/client_golang/NOTICE | 23 + .../prometheus/client_golang/README.md | 47 ++ .../prometheus/client_golang/VERSION | 1 + .../prometheus/client_golang/api/client.go | 131 +++ .../client_golang/api/client_test.go | 115 +++ .../client_golang/api/prometheus/v1/api.go | 261 ++++++ .../api/prometheus/v1/api_test.go | 381 +++++++++ .../client_golang/examples/random/main.go | 106 +++ .../client_golang/examples/simple/main.go | 31 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 1 + .../prometheus/benchmark_test.go | 185 +++++ .../client_golang/prometheus/collector.go | 75 ++ .../client_golang/prometheus/counter.go | 189 +++++ .../client_golang/prometheus/counter_test.go | 114 +++ .../client_golang/prometheus/desc.go | 189 +++++ .../client_golang/prometheus/desc_test.go | 17 + .../client_golang/prometheus/doc.go | 186 +++++ .../prometheus/example_clustermanager_test.go | 118 +++ .../prometheus/example_timer_complex_test.go | 71 ++ .../prometheus/example_timer_gauge_test.go | 48 ++ .../prometheus/example_timer_test.go | 40 + .../client_golang/prometheus/examples_test.go | 754 +++++++++++++++++ .../prometheus/expvar_collector.go | 119 +++ .../prometheus/expvar_collector_test.go | 97 +++ .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 173 ++++ .../client_golang/prometheus/gauge_test.go | 202 +++++ .../client_golang/prometheus/go_collector.go | 284 +++++++ .../prometheus/go_collector_test.go | 127 +++ .../prometheus/graphite/bridge.go | 280 +++++++ .../prometheus/graphite/bridge_test.go | 309 +++++++ .../client_golang/prometheus/histogram.go | 473 +++++++++++ .../prometheus/histogram_test.go | 348 ++++++++ .../client_golang/prometheus/http.go | 524 ++++++++++++ .../client_golang/prometheus/http_test.go | 154 ++++ .../client_golang/prometheus/labels.go | 57 ++ .../client_golang/prometheus/metric.go | 166 ++++ .../client_golang/prometheus/metric_test.go | 35 + .../client_golang/prometheus/observer.go | 50 ++ .../prometheus/process_collector.go | 140 ++++ .../prometheus/process_collector_test.go | 58 ++ .../prometheus/promhttp/delegator.go | 199 +++++ .../prometheus/promhttp/delegator_1_8.go | 181 +++++ .../prometheus/promhttp/delegator_pre_1_8.go | 44 + .../client_golang/prometheus/promhttp/http.go | 204 +++++ .../prometheus/promhttp/http_test.go | 131 +++ .../prometheus/promhttp/instrument_client.go | 98 +++ .../promhttp/instrument_client_1_8.go | 144 ++++ .../promhttp/instrument_client_1_8_test.go | 195 +++++ .../prometheus/promhttp/instrument_server.go | 440 ++++++++++ .../promhttp/instrument_server_test.go | 233 ++++++ .../push/example_add_from_gatherer_test.go | 84 ++ .../prometheus/push/examples_test.go | 36 + .../client_golang/prometheus/push/push.go | 172 ++++ .../prometheus/push/push_test.go | 176 ++++ .../client_golang/prometheus/registry.go | 762 ++++++++++++++++++ .../client_golang/prometheus/registry_test.go | 590 ++++++++++++++ .../client_golang/prometheus/summary.go | 572 +++++++++++++ .../client_golang/prometheus/summary_test.go | 388 +++++++++ .../client_golang/prometheus/timer.go | 51 ++ .../client_golang/prometheus/timer_test.go | 152 ++++ .../client_golang/prometheus/untyped.go | 42 + .../client_golang/prometheus/value.go | 236 ++++++ .../client_golang/prometheus/value_test.go | 43 + .../client_golang/prometheus/vec.go | 363 +++++++++ .../client_golang/prometheus/vec_test.go | 312 +++++++ .../github.com/prometheus/common/.travis.yml | 6 + .../prometheus/common/CONTRIBUTING.md | 18 + vendor/github.com/prometheus/common/LICENSE | 201 +++++ .../prometheus/common/MAINTAINERS.md | 1 + vendor/github.com/prometheus/common/NOTICE | 5 + vendor/github.com/prometheus/common/README.md | 12 + .../prometheus/common/config/config.go | 30 + .../testdata/tls_config.cert_no_key.bad.yml | 1 + .../config/testdata/tls_config.empty.good.yml | 0 .../testdata/tls_config.insecure.good.yml | 1 + .../testdata/tls_config.invalid_field.bad.yml | 1 + .../testdata/tls_config.key_no_cert.bad.yml | 1 + .../prometheus/common/config/tls_config.go | 79 ++ .../common/config/tls_config_test.go | 92 +++ .../prometheus/common/expfmt/bench_test.go | 167 ++++ .../prometheus/common/expfmt/decode.go | 429 ++++++++++ .../prometheus/common/expfmt/decode_test.go | 435 ++++++++++ .../prometheus/common/expfmt/encode.go | 88 ++ .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../expfmt/fuzz/corpus/from_test_parse_0 | 2 + .../expfmt/fuzz/corpus/from_test_parse_1 | 6 + .../expfmt/fuzz/corpus/from_test_parse_2 | 12 + .../expfmt/fuzz/corpus/from_test_parse_3 | 22 + .../expfmt/fuzz/corpus/from_test_parse_4 | 10 + .../fuzz/corpus/from_test_parse_error_0 | 1 + .../fuzz/corpus/from_test_parse_error_1 | 1 + .../fuzz/corpus/from_test_parse_error_10 | 1 + .../fuzz/corpus/from_test_parse_error_11 | 1 + .../fuzz/corpus/from_test_parse_error_12 | 3 + .../fuzz/corpus/from_test_parse_error_13 | 3 + .../fuzz/corpus/from_test_parse_error_14 | 3 + .../fuzz/corpus/from_test_parse_error_15 | 2 + .../fuzz/corpus/from_test_parse_error_16 | 2 + .../fuzz/corpus/from_test_parse_error_17 | 1 + .../fuzz/corpus/from_test_parse_error_18 | 1 + .../fuzz/corpus/from_test_parse_error_19 | 3 + .../fuzz/corpus/from_test_parse_error_2 | 3 + .../fuzz/corpus/from_test_parse_error_3 | 1 + .../fuzz/corpus/from_test_parse_error_4 | 1 + .../fuzz/corpus/from_test_parse_error_5 | 1 + .../fuzz/corpus/from_test_parse_error_6 | 1 + .../fuzz/corpus/from_test_parse_error_7 | 3 + .../fuzz/corpus/from_test_parse_error_8 | 1 + .../fuzz/corpus/from_test_parse_error_9 | 1 + .../common/expfmt/fuzz/corpus/minimal | 1 + .../prometheus/common/expfmt/testdata/json2 | 46 ++ .../common/expfmt/testdata/json2_bad | 46 ++ .../common/expfmt/testdata/protobuf | Bin 0 -> 8239 bytes .../common/expfmt/testdata/protobuf.gz | Bin 0 -> 2097 bytes .../prometheus/common/expfmt/testdata/text | 322 ++++++++ .../prometheus/common/expfmt/testdata/text.gz | Bin 0 -> 2598 bytes .../prometheus/common/expfmt/text_create.go | 303 +++++++ .../common/expfmt/text_create_test.go | 443 ++++++++++ .../prometheus/common/expfmt/text_parse.go | 753 +++++++++++++++++ .../common/expfmt/text_parse_test.go | 588 ++++++++++++++ .../bitbucket.org/ww/goautoneg/README.txt | 67 ++ .../bitbucket.org/ww/goautoneg/autoneg.go | 162 ++++ .../ww/goautoneg/autoneg_test.go | 33 + .../common/log/eventlog_formatter.go | 89 ++ .../github.com/prometheus/common/log/log.go | 365 +++++++++ .../prometheus/common/log/log_test.go | 39 + .../prometheus/common/log/syslog_formatter.go | 126 +++ .../common/log/syslog_formatter_test.go | 52 ++ .../prometheus/common/model/alert.go | 136 ++++ .../prometheus/common/model/alert_test.go | 118 +++ .../prometheus/common/model/fingerprinting.go | 105 +++ .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 +++++ .../prometheus/common/model/labels_test.go | 140 ++++ .../prometheus/common/model/labelset.go | 169 ++++ .../prometheus/common/model/metric.go | 103 +++ .../prometheus/common/model/metric_test.go | 132 +++ .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 ++++ .../prometheus/common/model/signature_test.go | 314 ++++++++ .../prometheus/common/model/silence.go | 106 +++ .../prometheus/common/model/silence_test.go | 228 ++++++ .../prometheus/common/model/time.go | 249 ++++++ .../prometheus/common/model/time_test.go | 129 +++ .../prometheus/common/model/value.go | 416 ++++++++++ .../prometheus/common/model/value_test.go | 468 +++++++++++ .../prometheus/common/route/route.go | 100 +++ .../prometheus/common/route/route_test.go | 44 + .../prometheus/common/version/info.go | 89 ++ 158 files changed, 21542 insertions(+) create mode 100644 vendor/github.com/prometheus/client_golang/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/.travis.yml create mode 100644 vendor/github.com/prometheus/client_golang/CHANGELOG.md create mode 100644 vendor/github.com/prometheus/client_golang/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/README.md create mode 100644 vendor/github.com/prometheus/client_golang/VERSION create mode 100644 vendor/github.com/prometheus/client_golang/api/client.go create mode 100644 vendor/github.com/prometheus/client_golang/api/client_test.go create mode 100644 vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go create mode 100644 vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go create mode 100644 vendor/github.com/prometheus/client_golang/examples/random/main.go create mode 100644 vendor/github.com/prometheus/client_golang/examples/simple/main.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/examples_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/labels.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/observer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/push.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/timer_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec_test.go create mode 100644 vendor/github.com/prometheus/common/.travis.yml create mode 100644 vendor/github.com/prometheus/common/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/README.md create mode 100644 vendor/github.com/prometheus/common/config/config.go create mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml create mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml create mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml create mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml create mode 100644 vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml create mode 100644 vendor/github.com/prometheus/common/config/tls_config.go create mode 100644 vendor/github.com/prometheus/common/config/tls_config_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/bench_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/decode_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal create mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/json2 create mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/json2_bad create mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/protobuf create mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz create mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/text create mode 100644 vendor/github.com/prometheus/common/expfmt/testdata/text.gz create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse_test.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go create mode 100644 vendor/github.com/prometheus/common/log/eventlog_formatter.go create mode 100644 vendor/github.com/prometheus/common/log/log.go create mode 100644 vendor/github.com/prometheus/common/log/log_test.go create mode 100644 vendor/github.com/prometheus/common/log/syslog_formatter.go create mode 100644 vendor/github.com/prometheus/common/log/syslog_formatter_test.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/alert_test.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labels_test.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/metric_test.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/signature_test.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/silence_test.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/time_test.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/common/model/value_test.go create mode 100644 vendor/github.com/prometheus/common/route/route.go create mode 100644 vendor/github.com/prometheus/common/route/route_test.go create mode 100644 vendor/github.com/prometheus/common/version/info.go diff --git a/vendor/github.com/prometheus/client_golang/.gitignore b/vendor/github.com/prometheus/client_golang/.gitignore new file mode 100644 index 0000000..f6fc2e8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*~ +*# +.build diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml new file mode 100644 index 0000000..85b5115 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: go + +go: + - 1.6.3 + - 1.7 + - 1.8.1 + +script: + - go test -short ./... diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md new file mode 100644 index 0000000..330788a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.8.0 / 2016-08-17 +* [CHANGE] Registry is doing more consistency checks. This might break + existing setups that used to export inconsistent metrics. +* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow + arbitrary grouping. +* [CHANGE] Removed `SelfCollector`. +* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. +* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, + `extraction`. +* [CHANGE] Deprecated a number of functions. +* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` + interfaces. +* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package + `promhttp`) and enabling the creation of other exposition mechanisms. +* [FEATURE] `MustRegister` is variadic now, allowing registration of many + collectors in one call. +* [FEATURE] Added HTTP API v1 package. +* [ENHANCEMENT] Numerous documentation improvements. +* [ENHANCEMENT] Improved metric sorting. +* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. +* [ENHANCEMENT] Several test improvements. +* [BUGFIX] Handle collisions in MetricVec. + +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md new file mode 100644 index 0000000..40503ed --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/MAINTAINERS.md b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md new file mode 100644 index 0000000..3ede55f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/MAINTAINERS.md @@ -0,0 +1 @@ +* Björn Rabenstein diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 0000000..dd878a3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md new file mode 100644 index 0000000..479290d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -0,0 +1,47 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus +server. It is still in alpha stage. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION new file mode 100644 index 0000000..a3df0a6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/vendor/github.com/prometheus/client_golang/api/client.go b/vendor/github.com/prometheus/client_golang/api/client.go new file mode 100644 index 0000000..bf26724 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/api/client.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package api provides clients for the HTTP APIs. +package api + +import ( + "context" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" +) + +// DefaultRoundTripper is used if no RoundTripper is set in Config. +var DefaultRoundTripper http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +// Config defines configuration parameters for a new client. +type Config struct { + // The address of the Prometheus to connect to. + Address string + + // RoundTripper is used by the Client to drive HTTP requests. If not + // provided, DefaultRoundTripper will be used. + RoundTripper http.RoundTripper +} + +func (cfg *Config) roundTripper() http.RoundTripper { + if cfg.RoundTripper == nil { + return DefaultRoundTripper + } + return cfg.RoundTripper +} + +// Client is the interface for an API client. +type Client interface { + URL(ep string, args map[string]string) *url.URL + Do(context.Context, *http.Request) (*http.Response, []byte, error) +} + +// NewClient returns a new Client. +// +// It is safe to use the returned Client from multiple goroutines. +func NewClient(cfg Config) (Client, error) { + u, err := url.Parse(cfg.Address) + if err != nil { + return nil, err + } + u.Path = strings.TrimRight(u.Path, "/") + + return &httpClient{ + endpoint: u, + client: http.Client{Transport: cfg.roundTripper()}, + }, nil +} + +type httpClient struct { + endpoint *url.URL + client http.Client +} + +func (c *httpClient) URL(ep string, args map[string]string) *url.URL { + p := path.Join(c.endpoint.Path, ep) + + for arg, val := range args { + arg = ":" + arg + p = strings.Replace(p, arg, val, -1) + } + + u := *c.endpoint + u.Path = p + + return &u +} + +func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx != nil { + req = req.WithContext(ctx) + } + resp, err := c.client.Do(req) + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + close(done) + }() + + select { + case <-ctx.Done(): + err = resp.Body.Close() + <-done + if err == nil { + err = ctx.Err() + } + case <-done: + } + + return resp, body, err +} diff --git a/vendor/github.com/prometheus/client_golang/api/client_test.go b/vendor/github.com/prometheus/client_golang/api/client_test.go new file mode 100644 index 0000000..53226d7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/api/client_test.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package api + +import ( + "net/http" + "net/url" + "testing" +) + +func TestConfig(t *testing.T) { + c := Config{} + if c.roundTripper() != DefaultRoundTripper { + t.Fatalf("expected default roundtripper for nil RoundTripper field") + } +} + +func TestClientURL(t *testing.T) { + tests := []struct { + address string + endpoint string + args map[string]string + expected string + }{ + { + address: "http://localhost:9090", + endpoint: "/test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost", + endpoint: "/test", + expected: "http://localhost/test", + }, + { + address: "http://localhost:9090", + endpoint: "test", + expected: "http://localhost:9090/test", + }, + { + address: "http://localhost:9090/prefix", + endpoint: "/test", + expected: "http://localhost:9090/prefix/test", + }, + { + address: "https://localhost:9090/", + endpoint: "/test/", + expected: "https://localhost:9090/test", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:param", + args: map[string]string{ + "param": "content", + }, + expected: "http://localhost:9090/test/content/more/content", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param/more/:foo", + args: map[string]string{ + "param": "content", + "foo": "bar", + }, + expected: "http://localhost:9090/test/content/more/bar", + }, + { + address: "http://localhost:9090", + endpoint: "/test/:param", + args: map[string]string{ + "nonexistant": "content", + }, + expected: "http://localhost:9090/test/:param", + }, + } + + for _, test := range tests { + ep, err := url.Parse(test.address) + if err != nil { + t.Fatal(err) + } + + hclient := &httpClient{ + endpoint: ep, + client: http.Client{Transport: DefaultRoundTripper}, + } + + u := hclient.URL(test.endpoint, test.args) + if u.String() != test.expected { + t.Errorf("unexpected result: got %s, want %s", u, test.expected) + continue + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go new file mode 100644 index 0000000..734a12e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go @@ -0,0 +1,261 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +// Package v1 provides bindings to the Prometheus HTTP API v1: +// http://prometheus.io/docs/querying/api/ +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/api" + "github.com/prometheus/common/model" +) + +const ( + statusAPIError = 422 + + apiPrefix = "/api/v1" + + epQuery = apiPrefix + "/query" + epQueryRange = apiPrefix + "/query_range" + epLabelValues = apiPrefix + "/label/:name/values" + epSeries = apiPrefix + "/series" +) + +// ErrorType models the different API error types. +type ErrorType string + +// Possible values for ErrorType. +const ( + ErrBadData ErrorType = "bad_data" + ErrTimeout = "timeout" + ErrCanceled = "canceled" + ErrExec = "execution" + ErrBadResponse = "bad_response" +) + +// Error is an error returned by the API. +type Error struct { + Type ErrorType + Msg string +} + +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Type, e.Msg) +} + +// Range represents a sliced time range. +type Range struct { + // The boundaries of the time range. + Start, End time.Time + // The maximum time between two slices within the boundaries. + Step time.Duration +} + +// API provides bindings for Prometheus's v1 API. +type API interface { + // Query performs a query for the given time. + Query(ctx context.Context, query string, ts time.Time) (model.Value, error) + // QueryRange performs a query for the given range. + QueryRange(ctx context.Context, query string, r Range) (model.Value, error) + // LabelValues performs a query for the values of the given label. + LabelValues(ctx context.Context, label string) (model.LabelValues, error) +} + +// queryResult contains result data for a query. +type queryResult struct { + Type model.ValueType `json:"resultType"` + Result interface{} `json:"result"` + + // The decoded value. + v model.Value +} + +func (qr *queryResult) UnmarshalJSON(b []byte) error { + v := struct { + Type model.ValueType `json:"resultType"` + Result json.RawMessage `json:"result"` + }{} + + err := json.Unmarshal(b, &v) + if err != nil { + return err + } + + switch v.Type { + case model.ValScalar: + var sv model.Scalar + err = json.Unmarshal(v.Result, &sv) + qr.v = &sv + + case model.ValVector: + var vv model.Vector + err = json.Unmarshal(v.Result, &vv) + qr.v = vv + + case model.ValMatrix: + var mv model.Matrix + err = json.Unmarshal(v.Result, &mv) + qr.v = mv + + default: + err = fmt.Errorf("unexpected value type %q", v.Type) + } + return err +} + +// NewAPI returns a new API for the client. +// +// It is safe to use the returned API from multiple goroutines. +func NewAPI(c api.Client) API { + return &httpAPI{client: apiClient{c}} +} + +type httpAPI struct { + client api.Client +} + +func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) { + u := h.client.URL(epQuery, nil) + q := u.Query() + + q.Set("query", query) + q.Set("time", ts.Format(time.RFC3339Nano)) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) { + u := h.client.URL(epQueryRange, nil) + q := u.Query() + + var ( + start = r.Start.Format(time.RFC3339Nano) + end = r.End.Format(time.RFC3339Nano) + step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64) + ) + + q.Set("query", query) + q.Set("start", start) + q.Set("end", end) + q.Set("step", step) + + u.RawQuery = q.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + + var qres queryResult + err = json.Unmarshal(body, &qres) + + return model.Value(qres.v), err +} + +func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) { + u := h.client.URL(epLabelValues, map[string]string{"name": label}) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + _, body, err := h.client.Do(ctx, req) + if err != nil { + return nil, err + } + var labelValues model.LabelValues + err = json.Unmarshal(body, &labelValues) + return labelValues, err +} + +// apiClient wraps a regular client and processes successful API responses. +// Successful also includes responses that errored at the API level. +type apiClient struct { + api.Client +} + +type apiResponse struct { + Status string `json:"status"` + Data json.RawMessage `json:"data"` + ErrorType ErrorType `json:"errorType"` + Error string `json:"error"` +} + +func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + resp, body, err := c.Client.Do(ctx, req) + if err != nil { + return resp, body, err + } + + code := resp.StatusCode + + if code/100 != 2 && code != statusAPIError { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: fmt.Sprintf("bad response code %d", resp.StatusCode), + } + } + + var result apiResponse + + if err = json.Unmarshal(body, &result); err != nil { + return resp, body, &Error{ + Type: ErrBadResponse, + Msg: err.Error(), + } + } + + if (code == statusAPIError) != (result.Status == "error") { + err = &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + } + } + + if code == statusAPIError && result.Status == "error" { + err = &Error{ + Type: result.ErrorType, + Msg: result.Error, + } + } + + return resp, []byte(result.Data), err +} diff --git a/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go new file mode 100644 index 0000000..2c8b1b2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go @@ -0,0 +1,381 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.7 + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + "testing" + "time" + + "github.com/prometheus/common/model" +) + +type apiTest struct { + do func() (interface{}, error) + inErr error + inRes interface{} + + reqPath string + reqParam url.Values + reqMethod string + res interface{} + err error +} + +type apiTestClient struct { + *testing.T + curTest apiTest +} + +func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL { + path := ep + for k, v := range args { + path = strings.Replace(path, ":"+k, v, -1) + } + u := &url.URL{ + Host: "test:9090", + Path: path, + } + return u +} + +func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + + test := c.curTest + + if req.URL.Path != test.reqPath { + c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path) + } + if req.Method != test.reqMethod { + c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method) + } + + b, err := json.Marshal(test.inRes) + if err != nil { + c.Fatal(err) + } + + resp := &http.Response{} + if test.inErr != nil { + resp.StatusCode = statusAPIError + } else { + resp.StatusCode = http.StatusOK + } + + return resp, b, test.inErr +} + +func TestAPIs(t *testing.T) { + + testTime := time.Now() + + client := &apiTestClient{T: t} + + queryAPI := &httpAPI{ + client: client, + } + + doQuery := func(q string, ts time.Time) func() (interface{}, error) { + return func() (interface{}, error) { + return queryAPI.Query(context.Background(), q, ts) + } + } + + doQueryRange := func(q string, rng Range) func() (interface{}, error) { + return func() (interface{}, error) { + return queryAPI.QueryRange(context.Background(), q, rng) + } + } + + doLabelValues := func(label string) func() (interface{}, error) { + return func() (interface{}, error) { + return queryAPI.LabelValues(context.Background(), label) + } + } + + queryTests := []apiTest{ + { + do: doQuery("2", testTime), + inRes: &queryResult{ + Type: model.ValScalar, + Result: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + res: &model.Scalar{ + Value: 2, + Timestamp: model.TimeFromUnix(testTime.Unix()), + }, + }, + { + do: doQuery("2", testTime), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query", + reqParam: url.Values{ + "query": []string{"2"}, + "time": []string{testTime.Format(time.RFC3339Nano)}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doQueryRange("2", Range{ + Start: testTime.Add(-time.Minute), + End: testTime, + Step: time.Minute, + }), + inErr: fmt.Errorf("some error"), + + reqMethod: "GET", + reqPath: "/api/v1/query_range", + reqParam: url.Values{ + "query": []string{"2"}, + "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, + "end": []string{testTime.Format(time.RFC3339Nano)}, + "step": []string{time.Minute.String()}, + }, + err: fmt.Errorf("some error"), + }, + + { + do: doLabelValues("mylabel"), + inRes: []string{"val1", "val2"}, + reqMethod: "GET", + reqPath: "/api/v1/label/mylabel/values", + res: model.LabelValues{"val1", "val2"}, + }, + + { + do: doLabelValues("mylabel"), + inErr: fmt.Errorf("some error"), + reqMethod: "GET", + reqPath: "/api/v1/label/mylabel/values", + err: fmt.Errorf("some error"), + }, + } + + var tests []apiTest + tests = append(tests, queryTests...) + + for _, test := range tests { + client.curTest = test + + res, err := test.do() + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if err.Error() != test.err.Error() { + t.Errorf("unexpected error: want %s, got %s", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpected error: %s", err) + continue + } + + if !reflect.DeepEqual(res, test.res) { + t.Errorf("unexpected result: want %v, got %v", test.res, res) + } + } +} + +type testClient struct { + *testing.T + + ch chan apiClientTest + req *http.Request +} + +type apiClientTest struct { + code int + response interface{} + expected string + err *Error +} + +func (c *testClient) URL(ep string, args map[string]string) *url.URL { + return nil +} + +func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { + if ctx == nil { + c.Fatalf("context was not passed down") + } + if req != c.req { + c.Fatalf("request was not passed down") + } + + test := <-c.ch + + var b []byte + var err error + + switch v := test.response.(type) { + case string: + b = []byte(v) + default: + b, err = json.Marshal(v) + if err != nil { + c.Fatal(err) + } + } + + resp := &http.Response{ + StatusCode: test.code, + } + + return resp, b, nil +} + +func TestAPIClientDo(t *testing.T) { + tests := []apiClientTest{ + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`null`), + ErrorType: ErrBadData, + Error: "failed", + }, + err: &Error{ + Type: ErrBadData, + Msg: "failed", + }, + code: statusAPIError, + expected: `null`, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrTimeout, + Msg: "timed out", + }, + code: statusAPIError, + expected: `test`, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "bad response code 400", + }, + code: http.StatusBadRequest, + }, + { + response: "bad json", + err: &Error{ + Type: ErrBadResponse, + Msg: "invalid character 'b' looking for beginning of value", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "success", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: statusAPIError, + }, + { + response: &apiResponse{ + Status: "error", + Data: json.RawMessage(`"test"`), + ErrorType: ErrTimeout, + Error: "timed out", + }, + err: &Error{ + Type: ErrBadResponse, + Msg: "inconsistent body for response code", + }, + code: http.StatusOK, + }, + } + + tc := &testClient{ + T: t, + ch: make(chan apiClientTest, 1), + req: &http.Request{}, + } + client := &apiClient{tc} + + for _, test := range tests { + + tc.ch <- test + + _, body, err := client.Do(context.Background(), tc.req) + + if test.err != nil { + if err == nil { + t.Errorf("expected error %q but got none", test.err) + continue + } + if test.err.Error() != err.Error() { + t.Errorf("unexpected error: want %q, got %q", test.err, err) + } + continue + } + if err != nil { + t.Errorf("unexpeceted error %s", err) + continue + } + + want, got := test.expected, string(body) + if want != got { + t.Errorf("unexpected body: want %q, got %q", want, got) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/examples/random/main.go b/vendor/github.com/prometheus/client_golang/examples/random/main.go new file mode 100644 index 0000000..eef50d2 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/examples/random/main.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A simple example exposing fictional RPC latencies with different types of +// random distributions (uniform, normal, and exponential) as Prometheus +// metrics. +package main + +import ( + "flag" + "log" + "math" + "math/rand" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.") + normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.") + normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.") + oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") +) + +var ( + // Create a summary to track fictional interservice RPC latencies for three + // distinct services with different latency distributions. These services are + // differentiated via a "service" label. + rpcDurations = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "rpc_durations_seconds", + Help: "RPC latency distributions.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"service"}, + ) + // The same as above, but now as a histogram, and only for the normal + // distribution. The buckets are targeted to the parameters of the + // normal distribution, with 20 buckets centered on the mean, each + // half-sigma wide. + rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "rpc_durations_histogram_seconds", + Help: "RPC latency distributions.", + Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), + }) +) + +func init() { + // Register the summary and the histogram with Prometheus's default registry. + prometheus.MustRegister(rpcDurations) + prometheus.MustRegister(rpcDurationsHistogram) +} + +func main() { + flag.Parse() + + start := time.Now() + + oscillationFactor := func() float64 { + return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod))) + } + + // Periodically record some sample latencies for the three services. + go func() { + for { + v := rand.Float64() * *uniformDomain + rpcDurations.WithLabelValues("uniform").Observe(v) + time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := (rand.NormFloat64() * *normDomain) + *normMean + rpcDurations.WithLabelValues("normal").Observe(v) + rpcDurationsHistogram.Observe(v) + time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond) + } + }() + + go func() { + for { + v := rand.ExpFloat64() / 1e6 + rpcDurations.WithLabelValues("exponential").Observe(v) + time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) + } + }() + + // Expose the registered metrics via HTTP. + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/github.com/prometheus/client_golang/examples/simple/main.go b/vendor/github.com/prometheus/client_golang/examples/simple/main.go new file mode 100644 index 0000000..1fc2324 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/examples/simple/main.go @@ -0,0 +1,31 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A minimal example of how to include Prometheus instrumentation. +package main + +import ( + "flag" + "log" + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") + +func main() { + flag.Parse() + http.Handle("/metrics", promhttp.Handler()) + log.Fatal(http.ListenAndServe(*addr, nil)) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 0000000..3460f03 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 0000000..44986bf --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go new file mode 100644 index 0000000..faad39b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for j := 0; j < b.N/10; j++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkHistogramWithLabelValues(b *testing.B) { + m := NewHistogramVec( + HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkHistogramNoLabels(b *testing.B) { + m := NewHistogram(HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 0000000..623d3d8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 0000000..273db5f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,189 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.metricVec.withLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.metricVec.with(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go new file mode 100644 index 0000000..8d5cd0b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + counter.Add(42) + if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} + +func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) { + testCases := []struct { + desc string + labels Labels + }{ + { + desc: "non utf8 label value", + labels: Labels{"a": "\xFF"}, + }, + { + desc: "not enough label values", + labels: Labels{}, + }, + { + desc: "too many label values", + labels: Labels{"a": "1", "b": "2"}, + }, + } + + for _, test := range testCases { + counterVec := NewCounterVec(CounterOpts{ + Name: "test", + }, []string{"a"}) + + labelValues := make([]string, len(test.labels)) + for _, val := range test.labels { + labelValues = append(labelValues, val) + } + + expectPanic(t, func() { + counterVec.WithLabelValues(labelValues...) + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + expectPanic(t, func() { + counterVec.With(test.labels) + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + + if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil { + t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc) + } + if _, err := counterVec.GetMetricWith(test.labels); err == nil { + t.Errorf("GetMetricWith: expected error because: %s", test.desc) + } + } +} + +func expectPanic(t *testing.T, op func(), errorMsg string) { + defer func() { + if err := recover(); err == nil { + t.Error(errorMsg) + } + }() + + op() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 0000000..920abc9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,189 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go b/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go new file mode 100644 index 0000000..2f96265 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc_test.go @@ -0,0 +1,17 @@ +package prometheus + +import ( + "testing" +) + +func TestNewDescInvalidLabelValues(t *testing.T) { + desc := NewDesc( + "sample_label", + "sample label", + nil, + Labels{"a": "\xFF"}, + ) + if desc.err == nil { + t.Errorf("NewDesc: expected error because: %s", desc.err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 0000000..36ef155 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,186 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// (The top-level functions in the prometheus package are deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go new file mode 100644 index 0000000..260c1b5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import "github.com/prometheus/client_golang/prometheus" + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCountDesc *prometheus.Desc + RAMUsageDesc *prometheus.Desc + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe simply sends the two Descs in the struct to the channel. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + ch <- c.OOMCountDesc + ch <- c.RAMUsageDesc +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// creates constant metrics for each host on the fly based on the returned data. +// +// Note that Collect could be called concurrently, so we depend on +// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + for host, oomCount := range oomCountByHost { + ch <- prometheus.MustNewConstMetric( + c.OOMCountDesc, + prometheus.CounterValue, + float64(oomCount), + host, + ) + } + for host, ramUsage := range ramUsageByHost { + ch <- prometheus.MustNewConstMetric( + c.RAMUsageDesc, + prometheus.GaugeValue, + ramUsage, + host, + ) + } +} + +// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) Then there is +// a variable label "host", since we want to partition the collected metrics by +// host. Since all Descs created in this way are consistent across instances, +// with a guaranteed distinction by the "zone" label, we can register different +// ClusterManager instances with the same registry. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCountDesc: prometheus.NewDesc( + "clustermanager_oom_crashes_total", + "Number of OOM crashes.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + RAMUsageDesc: prometheus.NewDesc( + "clustermanager_ram_usage_bytes", + "RAM usage as reported to the cluster manager.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + } +} + +func ExampleCollector() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to try it out with a pedantic registry. + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(workerDB) + reg.MustRegister(workerCA) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go new file mode 100644 index 0000000..c5e7de5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // apiRequestDuration tracks the duration separate for each HTTP status + // class (1xx, 2xx, ...). This creates a fair amount of time series on + // the Prometheus server. Usually, you would track the duration of + // serving HTTP request without partitioning by outcome. Do something + // like this only if needed. Also note how only status classes are + // tracked, not every single status code. The latter would create an + // even larger amount of time series. Request counters partitioned by + // status code are usually OK as each counter only creates one time + // series. Histograms are way more expensive, so partition with care and + // only where you really need separate latency tracking. Partitioning by + // status class is only an example. In concrete cases, other partitions + // might make more sense. + apiRequestDuration = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "api_request_duration_seconds", + Help: "Histogram for the request duration of the public API, partitioned by status class.", + Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5), + }, + []string{"status_class"}, + ) +) + +func handler(w http.ResponseWriter, r *http.Request) { + status := http.StatusOK + // The ObserverFunc gets called by the deferred ObserveDuration and + // decides which Histogram's Observe method is called. + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + switch { + case status >= 500: // Server error. + apiRequestDuration.WithLabelValues("5xx").Observe(v) + case status >= 400: // Client error. + apiRequestDuration.WithLabelValues("4xx").Observe(v) + case status >= 300: // Redirection. + apiRequestDuration.WithLabelValues("3xx").Observe(v) + case status >= 200: // Success. + apiRequestDuration.WithLabelValues("2xx").Observe(v) + default: // Informational. + apiRequestDuration.WithLabelValues("1xx").Observe(v) + } + })) + defer timer.ObserveDuration() + + // Handle the request. Set status accordingly. + // ... +} + +func ExampleTimer_complex() { + http.HandleFunc("/api", handler) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go new file mode 100644 index 0000000..7184a0d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go @@ -0,0 +1,48 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "os" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // If a function is called rarely (i.e. not more often than scrapes + // happen) or ideally only once (like in a batch job), it can make sense + // to use a Gauge for timing the function call. For timing a batch job + // and pushing the result to a Pushgateway, see also the comprehensive + // example in the push package. + funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "example_function_duration_seconds", + Help: "Duration of the last call of an example function.", + }) +) + +func run() error { + // The Set method of the Gauge is used to observe the duration. + timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set)) + defer timer.ObserveDuration() + + // Do something. Return errors as encountered. The use of 'defer' above + // makes sure the function is still timed properly. + return nil +} + +func ExampleTimer_gauge() { + if err := run(); err != nil { + os.Exit(1) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go new file mode 100644 index 0000000..bd86bb4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go @@ -0,0 +1,40 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "math/rand" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "example_request_duration_seconds", + Help: "Histogram for the runtime of a simple example function.", + Buckets: prometheus.LinearBuckets(0.01, 0.01, 10), + }) +) + +func ExampleTimer() { + // timer times this example function. It uses a Histogram, but a Summary + // would also work, as both implement Observer. Check out + // https://prometheus.io/docs/practices/histograms/ for differences. + timer := prometheus.NewTimer(requestDuration) + defer timer.ObserveDuration() + + // Do something here that takes time. + time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go new file mode 100644 index 0000000..45f6065 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go @@ -0,0 +1,754 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "bytes" + "fmt" + "math" + "net/http" + "runtime" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + {Name: proto.String("status"), Value: proto.String("404")}, + {Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the HTTP server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics HTTP + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Create a Summary without any observations. + temps.WithLabelValues("leiopelma-hochstetteri") + + // Just for demonstration, let's check the state of the summary vector + // by registering it with a custom registry and then let it collect the + // metrics. + reg := prometheus.NewRegistry() + reg.MustRegister(temps) + + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") + } + fmt.Println(proto.MarshalTextString(metricFamilies[0])) + + // Output: + // name: "pond_temperature_celsius" + // help: "The temperature of the frog pond." + // type: SUMMARY + // metric: < + // label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > +} + +func ExampleNewConstSummary() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A summary of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant summary from values we got from a 3rd party telemetry system. + s := prometheus.MustNewConstSummary( + desc, + 4711, 403.34, + map[float64]float64{0.5: 42.3, 0.9: 323.3}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + s.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // summary: < + // sample_count: 4711 + // sample_sum: 403.34 + // quantile: < + // quantile: 0.5 + // value: 42.3 + // > + // quantile: < + // quantile: 0.9 + // value: 323.3 + // > + // > +} + +func ExampleHistogram() { + temps := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // histogram: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // bucket: < + // cumulative_count: 192 + // upper_bound: 20 + // > + // bucket: < + // cumulative_count: 366 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 501 + // upper_bound: 30 + // > + // bucket: < + // cumulative_count: 638 + // upper_bound: 35 + // > + // bucket: < + // cumulative_count: 816 + // upper_bound: 40 + // > + // > +} + +func ExampleNewConstHistogram() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A histogram of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := prometheus.MustNewConstHistogram( + desc, + 4711, 403.34, + map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + h.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // histogram: < + // sample_count: 4711 + // sample_sum: 403.34 + // bucket: < + // cumulative_count: 121 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 2403 + // upper_bound: 50 + // > + // bucket: < + // cumulative_count: 3221 + // upper_bound: 100 + // > + // bucket: < + // cumulative_count: 4233 + // upper_bound: 200 + // > + // > +} + +func ExampleAlreadyRegisteredError() { + reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "requests_total", + Help: "The total number of requests served.", + }) + if err := prometheus.Register(reqCounter); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + // A counter for that metric has been registered before. + // Use the old counter from now on. + reqCounter = are.ExistingCollector.(prometheus.Counter) + } else { + // Something else went wrong! + panic(err) + } + } + reqCounter.Inc() +} + +func ExampleGatherers() { + reg := prometheus.NewRegistry() + temp := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "temperature_kelvin", + Help: "Temperature in Kelvin.", + }, + []string{"location"}, + ) + reg.MustRegister(temp) + temp.WithLabelValues("outside").Set(273.14) + temp.WithLabelValues("inside").Set(298.44) + + var parser expfmt.TextParser + + text := ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +temperature_kelvin{location="somewhere else"} 4.5 +` + + parseText := func() ([]*dto.MetricFamily, error) { + parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) + if err != nil { + return nil, err + } + var result []*dto.MetricFamily + for _, mf := range parsed { + result = append(result, mf) + } + return result, nil + } + + gatherers := prometheus.Gatherers{ + reg, + prometheus.GathererFunc(parseText), + } + + gathering, err := gatherers.Gather() + if err != nil { + fmt.Println(err) + } + + out := &bytes.Buffer{} + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + fmt.Println("----------") + + // Note how the temperature_kelvin metric family has been merged from + // different sources. Now try + text = ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +# Duplicate metric: +temperature_kelvin{location="outside"} 265.3 + # Wrong labels: +temperature_kelvin 4.5 +` + + gathering, err = gatherers.Gather() + if err != nil { + fmt.Println(err) + } + // Note that still as many metrics as possible are returned: + out.Reset() + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + + // Output: + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 + // temperature_kelvin{location="somewhere else"} 4.5 + // ---------- + // 2 error(s) occurred: + // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values + // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 0000000..18a99d5 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go new file mode 100644 index 0000000..910dac3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleNewExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 0000000..e3b67df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 0000000..13064da --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.metricVec.withLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.metricVec.with(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go new file mode 100644 index 0000000..8e5f002 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go @@ -0,0 +1,202 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func TestGaugeSetCurrentTime(t *testing.T) { + g := NewGauge(GaugeOpts{ + Name: "test_name", + Help: "test help", + }) + g.SetToCurrentTime() + unixTime := float64(time.Now().Unix()) + + m := &dto.Metric{} + g.Write(m) + + delta := unixTime - m.GetGauge().GetValue() + // This is just a smoke test to make sure SetToCurrentTime is not + // totally off. Tests with current time involved are hard... + if math.Abs(delta) > 5 { + t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 0000000..096454a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,284 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go new file mode 100644 index 0000000..2d05118 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go @@ -0,0 +1,127 @@ +package prometheus + +import ( + "runtime" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case m := <-ch: + // m can be Gauge or Counter, + // currently just test the go_goroutines Gauge + // and ignore others. + if m.Desc().fqName != "go_goroutines" { + continue + } + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + // GoCollector performs three sends per call. + // On line 27 we need to receive three more sends + // to shut down cleanly. + <-ch + <-ch + <-ch + return + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} + +func TestGCCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + oldGC uint64 + oldPause float64 + ) + defer close(closec) + + go func() { + c.Collect(ch) + // force GC + runtime.GC() + <-waitc + c.Collect(ch) + }() + + first := true + for { + select { + case metric := <-ch: + switch m := metric.(type) { + case *constSummary, *value: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetSummary() == nil { + continue + } + + if len(pb.GetSummary().Quantile) != 5 { + t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) + } + for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { + if *pb.GetSummary().Quantile[idx].Quantile != want { + t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) + } + } + if first { + first = false + oldGC = *pb.GetSummary().SampleCount + oldPause = *pb.GetSummary().SampleSum + close(waitc) + continue + } + if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { + t.Errorf("want 1 new garbage collection run, got %d", diff) + } + if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { + t.Errorf("want moar pause, got %f", diff) + } + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go new file mode 100644 index 0000000..1153337 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go @@ -0,0 +1,280 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graphite provides a bridge to push Prometheus metrics to a Graphite +// server. +package graphite + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "sort" + "time" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + "golang.org/x/net/context" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + defaultInterval = 15 * time.Second + millisecondsPerSecond = 1000 +) + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Ignore errors and try to push as many metrics to Graphite as possible. + ContinueOnError HandlerErrorHandling = iota + + // Abort the push to Graphite upon the first error encountered. + AbortOnError +) + +// Config defines the Graphite bridge config. +type Config struct { + // The url to push data to. Required. + URL string + + // The prefix for the pushed Graphite metrics. Defaults to empty string. + Prefix string + + // The interval to use for pushing data to Graphite. Defaults to 15 seconds. + Interval time.Duration + + // The timeout for pushing metrics to Graphite. Defaults to 15 seconds. + Timeout time.Duration + + // The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer. + Gatherer prometheus.Gatherer + + // The logger that messages are written to. Defaults to no logging. + Logger Logger + + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided Logger + // is not nil. + ErrorHandling HandlerErrorHandling +} + +// Bridge pushes metrics to the configured Graphite server. +type Bridge struct { + url string + prefix string + interval time.Duration + timeout time.Duration + + errorHandling HandlerErrorHandling + logger Logger + + g prometheus.Gatherer +} + +// Logger is the minimal interface Bridge needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// NewBridge returns a pointer to a new Bridge struct. +func NewBridge(c *Config) (*Bridge, error) { + b := &Bridge{} + + if c.URL == "" { + return nil, errors.New("missing URL") + } + b.url = c.URL + + if c.Gatherer == nil { + b.g = prometheus.DefaultGatherer + } else { + b.g = c.Gatherer + } + + if c.Logger != nil { + b.logger = c.Logger + } + + if c.Prefix != "" { + b.prefix = c.Prefix + } + + var z time.Duration + if c.Interval == z { + b.interval = defaultInterval + } else { + b.interval = c.Interval + } + + if c.Timeout == z { + b.timeout = defaultInterval + } else { + b.timeout = c.Timeout + } + + b.errorHandling = c.ErrorHandling + + return b, nil +} + +// Run starts the event loop that pushes Prometheus metrics to Graphite at the +// configured interval. +func (b *Bridge) Run(ctx context.Context) { + ticker := time.NewTicker(b.interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + if err := b.Push(); err != nil && b.logger != nil { + b.logger.Println("error pushing to Graphite:", err) + } + case <-ctx.Done(): + return + } + } +} + +// Push pushes Prometheus metrics to the configured Graphite server. +func (b *Bridge) Push() error { + mfs, err := b.g.Gather() + if err != nil || len(mfs) == 0 { + switch b.errorHandling { + case AbortOnError: + return err + case ContinueOnError: + if b.logger != nil { + b.logger.Println("continue on error:", err) + } + default: + panic("unrecognized error handling value") + } + } + + conn, err := net.DialTimeout("tcp", b.url, b.timeout) + if err != nil { + return err + } + defer conn.Close() + + return writeMetrics(conn, mfs, b.prefix, model.Now()) +} + +func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error { + vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{ + Timestamp: now, + }, mfs...) + if err != nil { + return err + } + + buf := bufio.NewWriter(w) + for _, s := range vec { + if err := writeSanitized(buf, prefix); err != nil { + return err + } + if err := buf.WriteByte('.'); err != nil { + return err + } + if err := writeMetric(buf, s.Metric); err != nil { + return err + } + if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil { + return err + } + if err := buf.Flush(); err != nil { + return err + } + } + + return nil +} + +func writeMetric(buf *bufio.Writer, m model.Metric) error { + metricName, hasName := m[model.MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != model.MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value))) + } + } + + var err error + switch numLabels { + case 0: + if hasName { + return writeSanitized(buf, string(metricName)) + } + default: + sort.Strings(labelStrings) + if err = writeSanitized(buf, string(metricName)); err != nil { + return err + } + for _, s := range labelStrings { + if err = buf.WriteByte('.'); err != nil { + return err + } + if err = writeSanitized(buf, s); err != nil { + return err + } + } + } + return nil +} + +func writeSanitized(buf *bufio.Writer, s string) error { + prevUnderscore := false + + for _, c := range s { + c = replaceInvalidRune(c) + if c == '_' { + if prevUnderscore { + continue + } + prevUnderscore = true + } else { + prevUnderscore = false + } + if _, err := buf.WriteRune(c); err != nil { + return err + } + } + + return nil +} + +func replaceInvalidRune(c rune) rune { + if c == ' ' { + return '.' + } + if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) { + return '_' + } + return c +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go new file mode 100644 index 0000000..c2b274c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go @@ -0,0 +1,309 @@ +package graphite + +import ( + "bufio" + "bytes" + "io" + "log" + "net" + "os" + "regexp" + "testing" + "time" + + "github.com/prometheus/common/model" + "golang.org/x/net/context" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestSanitize(t *testing.T) { + testCases := []struct { + in, out string + }{ + {in: "hello", out: "hello"}, + {in: "hE/l1o", out: "hE_l1o"}, + {in: "he,*ll(.o", out: "he_ll_o"}, + {in: "hello_there%^&", out: "hello_there_"}, + } + + var buf bytes.Buffer + w := bufio.NewWriter(&buf) + + for i, tc := range testCases { + if err := writeSanitized(w, tc.in); err != nil { + t.Fatalf("write failed: %v", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush failed: %v", err) + } + + if want, got := tc.out, buf.String(); want != got { + t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want) + } + + buf.Reset() + } +} + +func TestWriteSummary(t *testing.T) { + sumVec := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"labelname"}, + ) + + sumVec.WithLabelValues("val1").Observe(float64(10)) + sumVec.WithLabelValues("val1").Observe(float64(20)) + sumVec.WithLabelValues("val1").Observe(float64(30)) + sumVec.WithLabelValues("val2").Observe(float64(20)) + sumVec.WithLabelValues("val2").Observe(float64(30)) + sumVec.WithLabelValues("val2").Observe(float64(40)) + + reg := prometheus.NewRegistry() + reg.MustRegister(sumVec) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043 +prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043 +prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043 +prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 +prefix.name_count.constname.constvalue.labelname.val1 3 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043 +prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043 +prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 +prefix.name_count.constname.constvalue.labelname.val2 3 1477043 +` + + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestWriteHistogram(t *testing.T) { + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + Buckets: []float64{0.01, 0.02, 0.05, 0.1}, + }, + []string{"labelname"}, + ) + + histVec.WithLabelValues("val1").Observe(float64(10)) + histVec.WithLabelValues("val1").Observe(float64(20)) + histVec.WithLabelValues("val1").Observe(float64(30)) + histVec.WithLabelValues("val2").Observe(float64(20)) + histVec.WithLabelValues("val2").Observe(float64(30)) + histVec.WithLabelValues("val2").Observe(float64(40)) + + reg := prometheus.NewRegistry() + reg.MustRegister(histVec) + + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043 +prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 +prefix.name_count.constname.constvalue.labelname.val1 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043 +prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 +prefix.name_count.constname.constvalue.labelname.val2 3 1477043 +prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043 +` + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestToReader(t *testing.T) { + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + + reg := prometheus.NewRegistry() + reg.MustRegister(cntVec) + + want := `prefix.name.constname.constvalue.labelname.val1 1 1477043 +prefix.name.constname.constvalue.labelname.val2 1 1477043 +` + mfs, err := reg.Gather() + if err != nil { + t.Fatalf("error: %v", err) + } + + now := model.Time(1477043083) + var buf bytes.Buffer + err = writeMetrics(&buf, mfs, "prefix", now) + if err != nil { + t.Fatalf("error: %v", err) + } + + if got := buf.String(); want != got { + t.Fatalf("wanted \n%s\n, got \n%s\n", want, got) + } +} + +func TestPush(t *testing.T) { + reg := prometheus.NewRegistry() + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + host := "localhost" + port := ":56789" + b, err := NewBridge(&Config{ + URL: host + port, + Gatherer: reg, + Prefix: "prefix", + }) + if err != nil { + t.Fatalf("error creating bridge: %v", err) + } + + nmg, err := newMockGraphite(port) + if err != nil { + t.Fatalf("error creating mock graphite: %v", err) + } + defer nmg.Close() + + err = b.Push() + if err != nil { + t.Fatalf("error pushing: %v", err) + } + + wants := []string{ + "prefix.name.constname.constvalue.labelname.val1 1", + "prefix.name.constname.constvalue.labelname.val2 1", + } + + select { + case got := <-nmg.readc: + for _, want := range wants { + matched, err := regexp.MatchString(want, got) + if err != nil { + t.Fatalf("error pushing: %v", err) + } + if !matched { + t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got) + } + } + return + case err := <-nmg.errc: + t.Fatalf("error reading push: %v", err) + case <-time.After(50 * time.Millisecond): + t.Fatalf("no result from graphite server") + } +} + +func newMockGraphite(port string) (*mockGraphite, error) { + readc := make(chan string) + errc := make(chan error) + ln, err := net.Listen("tcp", port) + if err != nil { + return nil, err + } + + go func() { + conn, err := ln.Accept() + if err != nil { + errc <- err + } + var b bytes.Buffer + io.Copy(&b, conn) + readc <- b.String() + }() + + return &mockGraphite{ + readc: readc, + errc: errc, + Listener: ln, + }, nil +} + +type mockGraphite struct { + readc chan string + errc chan error + + net.Listener +} + +func ExampleBridge() { + b, err := NewBridge(&Config{ + URL: "graphite.example.org:3099", + Gatherer: prometheus.DefaultGatherer, + Prefix: "prefix", + Interval: 15 * time.Second, + Timeout: 10 * time.Second, + ErrorHandling: AbortOnError, + Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile), + }) + if err != nil { + panic(err) + } + + go func() { + // Start something in a goroutine that uses metrics. + }() + + // Push initial metrics to Graphite. Fail fast if the push fails. + if err := b.Push(); err != nil { + panic(err) + } + + // Create a Context to control stopping the Run() loop that pushes + // metrics to Graphite. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start pushing metrics to Graphite in the Run() loop. + b.Run(ctx) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 0000000..6cc6e68 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,473 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := m.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := m.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Observer { + return m.metricVec.withLabelValues(lvs...).(Observer) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Observer { + return m.metricVec.with(labels).(Observer) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go new file mode 100644 index 0000000..5a20f4b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go @@ -0,0 +1,348 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkHistogramObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramObserve1(b *testing.B) { + benchmarkHistogramObserve(1, b) +} + +func BenchmarkHistogramObserve2(b *testing.B) { + benchmarkHistogramObserve(2, b) +} + +func BenchmarkHistogramObserve4(b *testing.B) { + benchmarkHistogramObserve(4, b) +} + +func BenchmarkHistogramObserve8(b *testing.B) { + benchmarkHistogramObserve(8, b) +} + +func benchmarkHistogramWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramWrite1(b *testing.B) { + benchmarkHistogramWrite(1, b) +} + +func BenchmarkHistogramWrite2(b *testing.B) { + benchmarkHistogramWrite(2, b) +} + +func BenchmarkHistogramWrite4(b *testing.B) { + benchmarkHistogramWrite(4, b) +} + +func BenchmarkHistogramWrite8(b *testing.B) { + benchmarkHistogramWrite(8, b) +} + +func TestHistogramNonMonotonicBuckets(t *testing.T) { + testCases := map[string][]float64{ + "not strictly monotonic": {1, 2, 2, 3}, + "not monotonic at all": {1, 2, 4, 3, 5}, + "have +Inf in the middle": {1, 2, math.Inf(+1), 3}, + } + for name, buckets := range testCases { + func() { + defer func() { + if r := recover(); r == nil { + t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name) + } + }() + _ = NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: buckets, + }) + }() + } +} + +// Intentionally adding +Inf here to test if that case is handled correctly. +// Also, getCumulativeCounts depends on it. +var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} + +func TestHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: testBuckets, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Histogram.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + for i, wantBound := range testBuckets { + if i == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestHistogramVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogramVec( + HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + his.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := his.WithLabelValues(string('A' + i)) + s.(Histogram).Write(m) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars[i]) + + for j, wantBound := range testBuckets { + if j == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func getCumulativeCounts(vars []float64) []uint64 { + counts := make([]uint64, len(testBuckets)) + for _, v := range vars { + for i := len(testBuckets) - 1; i >= 0; i-- { + if v > testBuckets[i] { + break + } + counts[i]++ + } + } + return counts +} + +func TestBuckets(t *testing.T) { + got := LinearBuckets(-15, 5, 6) + want := []float64{-15, -10, -5, 0, 5, 10} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } + + got = ExponentialBuckets(100, 1.2, 3) + want = []float64{100, 120, 144} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 0000000..bfee5c6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,524 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is not instrumented, but can be instrumented with the tooling provided +// in package promhttp). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues. Use the tooling provided in +// package promhttp instead. The issues are the following: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// - It has additional issues with HTTP/2, cf. +// https://github.com/prometheus/client_golang/issues/272. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. Use the tooling provided in package promhttp instead. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + if err := Register(reqCnt); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + panic(err) + } + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + if err := Register(reqDur); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + if err := Register(reqSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + reqSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + if err := Register(resSz); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + resSz = are.ExistingCollector.(Summary) + } else { + panic(err) + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := computeApproximateRequestSize(r) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + reqCnt.WithLabelValues(method, code).Inc() + reqDur.Observe(elapsed) + resSz.Observe(float64(delegate.written)) + reqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request) <-chan int { + // Get URL length in current go routine for avoiding a race condition. + // HandlerFunc that runs in parallel may modify the URL. + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + out := make(chan int, 1) + + go func() { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s + close(out) + }() + + return out +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go new file mode 100644 index 0000000..7fd4077 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go @@ -0,0 +1,154 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + respBody := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", respBody) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + } + + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + err := Register(reqCnt) + if err == nil { + t.Fatal("expected reqCnt to be registered already") + } + if are, ok := err.(AlreadyRegisteredError); ok { + reqCnt = are.ExistingCollector.(*CounterVec) + } else { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + err = Register(reqDur) + if err == nil { + t.Fatal("expected reqDur to be registered already") + } + if are, ok := err.(AlreadyRegisteredError); ok { + reqDur = are.ExistingCollector.(Summary) + } else { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + err = Register(reqSz) + if err == nil { + t.Fatal("expected reqSz to be registered already") + } + if _, ok := err.(AlreadyRegisteredError); !ok { + t.Fatal("unexpected registration error:", err) + } + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + err = Register(resSz) + if err == nil { + t.Fatal("expected resSz to be registered already") + } + if _, ok := err.(AlreadyRegisteredError); !ok { + t.Fatal("unexpected registration error:", err) + } + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if string(resp.Body.Bytes()) != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.children); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 0000000..2502e37 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,57 @@ +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return errInconsistentCardinality + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 0000000..d4063d9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go new file mode 100644 index 0000000..7145f5e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 0000000..b0520e8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,50 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + + Collector +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 0000000..94b2553 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,140 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, rss *Desc + startTime *Desc +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + ns := "" + if len(namespace) > 0 { + ns = namespace + "_" + } + + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } + + if limits, err := p.NewLimits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go new file mode 100644 index 0000000..c7acb47 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go @@ -0,0 +1,58 @@ +package prometheus + +import ( + "bytes" + "os" + "regexp" + "testing" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := NewRegistry() + if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { + t.Fatal(err) + } + if err := registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar"), + ); err != nil { + t.Fatal(err) + } + + mfs, err := registry.Gather() + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { + t.Fatal(err) + } + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"), + regexp.MustCompile("\nprocess_max_fds [1-9]"), + regexp.MustCompile("\nprocess_open_fds [1-9]"), + regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"), + regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"), + regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("\nfoobar_process_max_fds [1-9]"), + regexp.MustCompile("\nfoobar_process_open_fds [1-9]"), + regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(buf.Bytes()) { + t.Errorf("want body to match %s\n%s", re, buf.String()) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 0000000..5ee095b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,199 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } + +func (d *closeNotifierDelegator) CloseNotify() <-chan bool { + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d *flusherDelegator) Flush() { + d.ResponseWriter.(http.Flusher).Flush() +} +func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go new file mode 100644 index 0000000..f4d386f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go @@ -0,0 +1,181 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +type pusherDelegator struct{ *responseWriterDelegator } + +func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +func init() { + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, &pusherDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, &pusherDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, &pusherDelegator{d}, &readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go new file mode 100644 index 0000000..8bb9b8b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.8 + +package promhttp + +import ( + "io" + "net/http" +) + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 0000000..2d67f24 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,204 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The +// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP +// error, no error logging, and compression if requested by the client. +// +// If you want to create a Handler for the DefaultGatherer with different +// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and +// your desired HandlerOpts. +func Handler() http.Handler { + return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) +} + +// HandlerFor returns an http.Handler for the provided Gatherer. The behavior +// of the Handler is defined by the provided HandlerOpts. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf, opts.DisableCompression) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding metric family:", err) + } + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + // TODO(beorn7): Consider streaming serving of metrics. + }) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. It is recommended to at least + // log errors (by providing an ErrorLog in HandlerOpts) to not mask + // errors completely. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { + if compressionDisabled { + return writer, "" + } + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go new file mode 100644 index 0000000..413ff7b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go @@ -0,0 +1,131 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bytes" + "errors" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +type errorCollector struct{} + +func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) +} + +func (e errorCollector) Collect(ch chan<- prometheus.Metric) { + ch <- prometheus.NewInvalidMetric( + prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), + errors.New("collect error"), + ) +} + +func TestHandlerErrorHandling(t *testing.T) { + + // Create a registry that collects a MetricFamily with two elements, + // another with one, and reports an error. + reg := prometheus.NewRegistry() + + cnt := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "the_count", + Help: "Ah-ah-ah! Thunder and lightning!", + }) + reg.MustRegister(cnt) + + cntVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + cntVec.WithLabelValues("val1").Inc() + cntVec.WithLabelValues("val2").Inc() + reg.MustRegister(cntVec) + + reg.MustRegister(errorCollector{}) + + logBuf := &bytes.Buffer{} + logger := log.New(logBuf, "", 0) + + writer := httptest.NewRecorder() + request, _ := http.NewRequest("GET", "/", nil) + request.Header.Add("Accept", "test/plain") + + errorHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: HTTPErrorOnError, + }) + continueHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: ContinueOnError, + }) + panicHandler := HandlerFor(reg, HandlerOpts{ + ErrorLog: logger, + ErrorHandling: PanicOnError, + }) + wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantErrorBody := `An error has occurred during metrics gathering: + +error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error +` + wantOKBody := `# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +# HELP the_count Ah-ah-ah! Thunder and lightning! +# TYPE the_count counter +the_count 0 +` + + errorHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusInternalServerError; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg) + } + if got := writer.Body.String(); got != wantErrorBody { + t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) + } + logBuf.Reset() + writer.Body.Reset() + writer.Code = http.StatusOK + + continueHandler.ServeHTTP(writer, request) + if got, want := writer.Code, http.StatusOK; got != want { + t.Errorf("got HTTP status code %d, want %d", got, want) + } + if got := logBuf.String(); got != wantMsg { + t.Errorf("got log message %q, want %q", got, wantMsg) + } + if got := writer.Body.String(); got != wantOKBody { + t.Errorf("got body %q, want %q", got, wantOKBody) + } + + defer func() { + if err := recover(); err == nil { + t.Error("expected panic from panicHandler") + } + }() + panicHandler.ServeHTTP(writer, request) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 0000000..65f9425 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,98 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. Partitioning of the CounterVec happens by HTTP status +// code and/or HTTP method if the respective instance label names are present +// in the CounterVec. For unpartitioned counting, use a CounterVec with +// zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go new file mode 100644 index 0000000..0bd80c3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go @@ -0,0 +1,144 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "context" + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" +) + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go new file mode 100644 index 0000000..7e3f522 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go @@ -0,0 +1,195 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.8 + +package promhttp + +import ( + "log" + "net/http" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestClientMiddlewareAPI(t *testing.T) { + client := http.DefaultClient + client.Timeout = 1 * time.Second + + reg := prometheus.NewRegistry() + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method"}, + ) + + dnsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event"}, + ) + + tlsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event"}, + ) + + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{"method"}, + ) + + reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) + + trace := &InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_start") + }, + DNSDone: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_done") + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_start") + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_done") + }, + } + + client.Transport = InstrumentRoundTripperInFlight(inFlightGauge, + InstrumentRoundTripperCounter(counter, + InstrumentRoundTripperTrace(trace, + InstrumentRoundTripperDuration(histVec, http.DefaultTransport), + ), + ), + ) + + resp, err := client.Get("http://google.com") + if err != nil { + t.Fatalf("%v", err) + } + defer resp.Body.Close() +} + +func ExampleInstrumentRoundTripperDuration() { + client := http.DefaultClient + client.Timeout = 1 * time.Second + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "client_in_flight_requests", + Help: "A gauge of in-flight requests for the wrapped client.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "client_api_requests_total", + Help: "A counter for requests from the wrapped client.", + }, + []string{"code", "method"}, + ) + + // dnsLatencyVec uses custom buckets based on expected dns durations. + // It has an instance label "event", which is set in the + // DNSStart and DNSDonehook functions defined in the + // InstrumentTrace struct below. + dnsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "dns_duration_seconds", + Help: "Trace dns latency histogram.", + Buckets: []float64{.005, .01, .025, .05}, + }, + []string{"event"}, + ) + + // tlsLatencyVec uses custom buckets based on expected tls durations. + // It has an instance label "event", which is set in the + // TLSHandshakeStart and TLSHandshakeDone hook functions defined in the + // InstrumentTrace struct below. + tlsLatencyVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "tls_duration_seconds", + Help: "Trace tls latency histogram.", + Buckets: []float64{.05, .1, .25, .5}, + }, + []string{"event"}, + ) + + // histVec has no labels, making it a zero-dimensional ObserverVec. + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + }, + []string{}, + ) + + // Register all of the metrics in the standard registry. + prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) + + // Define functions for the available httptrace.ClientTrace hook + // functions that we want to instrument. + trace := &InstrumentTrace{ + DNSStart: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_start") + }, + DNSDone: func(t float64) { + dnsLatencyVec.WithLabelValues("dns_done") + }, + TLSHandshakeStart: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_start") + }, + TLSHandshakeDone: func(t float64) { + tlsLatencyVec.WithLabelValues("tls_handshake_done") + }, + } + + // Wrap the default RoundTripper with middleware. + roundTripper := InstrumentRoundTripperInFlight(inFlightGauge, + InstrumentRoundTripperCounter(counter, + InstrumentRoundTripperTrace(trace, + InstrumentRoundTripperDuration(histVec, http.DefaultTransport), + ), + ), + ) + + // Set the RoundTripper on our client. + client.Transport = roundTripper + + resp, err := client.Get("http://google.com") + if err != nil { + log.Printf("error: %v", err) + } + defer resp.Body.Close() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 0000000..3d145ad --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,440 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided +// http.Handler to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. Partitioning of the CounterVec happens by HTTP status +// code and/or HTTP method if the respective instance label names are present +// in the CounterVec. For unpartitioned counting, use a CounterVec with +// zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two labels. The only allowed label names are "code" and "method". The +// function panics if any other instance labels are provided. The Observe +// method of the Observer in the ObserverVec is called with the request +// duration in seconds. Partitioning happens by HTTP status code and/or HTTP +// method if the respective instance label names are present in the +// ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the request size in bytes. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. +// The ObserverVec must have zero, one, or two labels. The only allowed label +// names are "code" and "method". The function panics if any other instance +// labels are provided. The Observe method of the Observer in the ObserverVec +// is called with the response size in bytes. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + pm dto.Metric + ) + + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil { + return + } + if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil { + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + return + } + panic("previously set label not found – this must never happen") + } + if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil { + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString { + continue + } + if name == "code" || name == "method" { + continue + } + panic("metric partitioned with non-supported labels") + } + code = true + method = true + return + } + panic("metric partitioned with non-supported labels") +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go new file mode 100644 index 0000000..50f7524 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go @@ -0,0 +1,233 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "io" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestMiddlewareAPI(t *testing.T) { + reg := prometheus.NewRegistry() + + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "A gauge of requests currently being served by the wrapped handler.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "api_requests_total", + Help: "A counter for requests to the wrapped handler.", + }, + []string{"code", "method"}, + ) + + histVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_duration_seconds", + Help: "A histogram of request latencies.", + Buckets: prometheus.DefBuckets, + ConstLabels: prometheus.Labels{"handler": "api"}, + }, + []string{"method"}, + ) + + writeHeaderVec := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "write_header_duration_seconds", + Help: "A histogram of time to first write latencies.", + Buckets: prometheus.DefBuckets, + ConstLabels: prometheus.Labels{"handler": "api"}, + }, + []string{}, + ) + + responseSize := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "push_request_size_bytes", + Help: "A histogram of request sizes for requests.", + Buckets: []float64{200, 500, 900, 1500}, + }, + []string{}, + ) + + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("OK")) + }) + + reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec) + + chain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerCounter(counter, + InstrumentHandlerDuration(histVec, + InstrumentHandlerTimeToWriteHeader(writeHeaderVec, + InstrumentHandlerResponseSize(responseSize, handler), + ), + ), + ), + ) + + r, _ := http.NewRequest("GET", "www.example.com", nil) + w := httptest.NewRecorder() + chain.ServeHTTP(w, r) +} + +func TestInstrumentTimeToFirstWrite(t *testing.T) { + var i int + dobs := &responseWriterDelegator{ + ResponseWriter: httptest.NewRecorder(), + observeWriteHeader: func(status int) { + i = status + }, + } + d := newDelegator(dobs, nil) + + d.WriteHeader(http.StatusOK) + + if i != http.StatusOK { + t.Fatalf("failed to execute observeWriteHeader") + } +} + +// testResponseWriter is an http.ResponseWriter that also implements +// http.CloseNotifier, http.Flusher, and io.ReaderFrom. +type testResponseWriter struct { + closeNotifyCalled, flushCalled, readFromCalled bool +} + +func (t *testResponseWriter) Header() http.Header { return nil } +func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil } +func (t *testResponseWriter) WriteHeader(int) {} +func (t *testResponseWriter) CloseNotify() <-chan bool { + t.closeNotifyCalled = true + return nil +} +func (t *testResponseWriter) Flush() { t.flushCalled = true } +func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) { + t.readFromCalled = true + return 0, nil +} + +func TestInterfaceUpgrade(t *testing.T) { + w := &testResponseWriter{} + d := newDelegator(w, nil) + d.(http.CloseNotifier).CloseNotify() + if !w.closeNotifyCalled { + t.Error("CloseNotify not called") + } + d.(http.Flusher).Flush() + if !w.flushCalled { + t.Error("Flush not called") + } + d.(io.ReaderFrom).ReadFrom(nil) + if !w.readFromCalled { + t.Error("ReadFrom not called") + } + if _, ok := d.(http.Hijacker); ok { + t.Error("delegator unexpectedly implements http.Hijacker") + } +} + +func ExampleInstrumentHandlerDuration() { + inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "in_flight_requests", + Help: "A gauge of requests currently being served by the wrapped handler.", + }) + + counter := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "api_requests_total", + Help: "A counter for requests to the wrapped handler.", + }, + []string{"code", "method"}, + ) + + // pushVec and pullVec are partitioned by the HTTP method and use custom + // buckets based on the expected request duration. ConstLabels are used + // to set a handler label to mark pushVec as tracking the durations for + // pushes and pullVec as tracking the durations for pulls. Note that + // Name, Help, and Buckets need to be the same for consistency, so we + // use the same HistogramOpts after just modifying the ConstLabels. + histogramOpts := prometheus.HistogramOpts{ + Name: "request_duration_seconds", + Help: "A histogram of latencies for requests.", + Buckets: []float64{.25, .5, 1, 2.5, 5, 10}, + ConstLabels: prometheus.Labels{"handler": "push"}, + } + pushVec := prometheus.NewHistogramVec( + histogramOpts, + []string{"method"}, + ) + histogramOpts.ConstLabels = prometheus.Labels{"handler": "pull"} + pullVec := prometheus.NewHistogramVec( + histogramOpts, + []string{"method"}, + ) + + // responseSize has no labels, making it a zero-dimensional + // ObserverVec. + responseSize := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "response_size_bytes", + Help: "A histogram of response sizes for requests.", + Buckets: []float64{200, 500, 900, 1500}, + }, + []string{}, + ) + + // Create the handlers that will be wrapped by the middleware. + pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Push")) + }) + pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Pull")) + }) + + // Register all of the metrics in the standard registry. + prometheus.MustRegister(inFlightGauge, counter, pullVec, pushVec, responseSize) + + // Wrap the pushHandler with our shared middleware, but use the + // endpoint-specific pushVec with InstrumentHandlerDuration. + pushChain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerCounter(counter, + InstrumentHandlerDuration(pushVec, + InstrumentHandlerResponseSize(responseSize, pushHandler), + ), + ), + ) + + // Wrap the pushHandler with the shared middleware, but use the + // endpoint-specific pullVec with InstrumentHandlerDuration. + pullChain := InstrumentHandlerInFlight(inFlightGauge, + InstrumentHandlerCounter(counter, + InstrumentHandlerDuration(pullVec, + InstrumentHandlerResponseSize(responseSize, pullHandler), + ), + ), + ) + + http.Handle("/metrics", Handler()) + http.Handle("/push", pushChain) + http.Handle("/pull", pullChain) + + if err := http.ListenAndServe(":3000", nil); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go new file mode 100644 index 0000000..5180c07 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go @@ -0,0 +1,84 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package push_test + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +var ( + completionTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last completion of a DB backup, successful or not.", + }) + successTime = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_success_timestamp_seconds", + Help: "The timestamp of the last successful completion of a DB backup.", + }) + duration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_duration_seconds", + Help: "The duration of the last DB backup in seconds.", + }) + records = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_records_processed", + Help: "The number of records processed in the last DB backup.", + }) +) + +func performBackup() (int, error) { + // Perform the backup and return the number of backed up records and any + // applicable error. + // ... + return 42, nil +} + +func ExampleAddFromGatherer() { + registry := prometheus.NewRegistry() + registry.MustRegister(completionTime, duration, records) + // Note that successTime is not registered at this time. + + start := time.Now() + n, err := performBackup() + records.Set(float64(n)) + // Note that time.Since only uses a monotonic clock in Go1.9+. + duration.Set(time.Since(start).Seconds()) + completionTime.SetToCurrentTime() + if err != nil { + fmt.Println("DB backup failed:", err) + } else { + // Only now register successTime. + registry.MustRegister(successTime) + successTime.SetToCurrentTime() + } + // AddFromGatherer is used here rather than FromGatherer to not delete a + // previously pushed success timestamp in case of a failure of this + // backup. + if err := push.AddFromGatherer( + "db_backup", nil, + "http://pushgateway:9091", + registry, + ); err != nil { + fmt.Println("Could not push to Pushgateway:", err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go new file mode 100644 index 0000000..7e0ac66 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go @@ -0,0 +1,36 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package push_test + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/push" +) + +func ExampleCollectors() { + completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "db_backup_last_completion_timestamp_seconds", + Help: "The timestamp of the last successful completion of a DB backup.", + }) + completionTime.SetToCurrentTime() + if err := push.Collectors( + "db_backup", push.HostnameGroupingKey(), + "http://pushgateway:9091", + completionTime, + ); err != nil { + fmt.Println("Could not push completion time to Pushgateway:", err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go new file mode 100644 index 0000000..8fb6f5f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push.go @@ -0,0 +1,172 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package push provides functions to push metrics to a Pushgateway. The metrics +// to push are either collected from a provided registry, or from explicitly +// listed collectors. +// +// See the documentation of the Pushgateway to understand the meaning of the +// grouping parameters and the differences between push.Registry and +// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors +// on the other hand: https://github.com/prometheus/pushgateway +package push + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" + + "github.com/prometheus/client_golang/prometheus" +) + +const contentTypeHeader = "Content-Type" + +// FromGatherer triggers a metric collection by the provided Gatherer (which is +// usually implemented by a prometheus.Registry) and pushes all gathered metrics +// to the Pushgateway specified by url, using the provided job name and the +// (optional) further grouping labels (the grouping map may be nil). See the +// Pushgateway documentation for detailed implications of the job and other +// grouping labels. Neither the job name nor any grouping label value may +// contain a "/". The metrics pushed must not contain a job label of their own +// nor any of the grouping labels. +// +// You can use just host:port or ip:port as url, in which case 'http://' is +// added automatically. You can also include the schema in the URL. However, do +// not include the '/metrics/jobs/...' part. +// +// Note that all previously pushed metrics with the same job and other grouping +// labels will be replaced with the metrics pushed by this call. (It uses HTTP +// method 'PUT' to push to the Pushgateway.) +func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "PUT") +} + +// AddFromGatherer works like FromGatherer, but only previously pushed metrics +// with the same name (and the same job and other grouping labels) will be +// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.) +func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error { + return push(job, grouping, url, g, "POST") +} + +func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error { + if !strings.Contains(pushURL, "://") { + pushURL = "http://" + pushURL + } + if strings.HasSuffix(pushURL, "/") { + pushURL = pushURL[:len(pushURL)-1] + } + + if strings.Contains(job, "/") { + return fmt.Errorf("job contains '/': %s", job) + } + urlComponents := []string{url.QueryEscape(job)} + for ln, lv := range grouping { + if !model.LabelName(ln).IsValid() { + return fmt.Errorf("grouping label has invalid name: %s", ln) + } + if strings.Contains(lv, "/") { + return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv) + } + urlComponents = append(urlComponents, ln, lv) + } + pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/")) + + mfs, err := g.Gather() + if err != nil { + return err + } + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + // Check for pre-existing grouping labels: + for _, mf := range mfs { + for _, m := range mf.GetMetric() { + for _, l := range m.GetLabel() { + if l.GetName() == "job" { + return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) + } + if _, ok := grouping[l.GetName()]; ok { + return fmt.Errorf( + "pushed metric %s (%s) already contains grouping label %s", + mf.GetName(), m, l.GetName(), + ) + } + } + } + enc.Encode(mf) + } + req, err := http.NewRequest(method, pushURL, buf) + if err != nil { + return err + } + req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim)) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != 202 { + body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. + return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body) + } + return nil +} + +// Collectors works like FromGatherer, but it does not use a Gatherer. Instead, +// it collects from the provided collectors directly. It is a convenient way to +// push only a few metrics. +func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "PUT", collectors...) +} + +// AddCollectors works like AddFromGatherer, but it does not use a Gatherer. +// Instead, it collects from the provided collectors directly. It is a +// convenient way to push only a few metrics. +func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error { + return pushCollectors(job, grouping, url, "POST", collectors...) +} + +func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error { + r := prometheus.NewRegistry() + for _, collector := range collectors { + if err := r.Register(collector); err != nil { + return err + } + } + return push(job, grouping, url, r, method) +} + +// HostnameGroupingKey returns a label map with the only entry +// {instance=""}. This can be conveniently used as the grouping +// parameter if metrics should be pushed with the hostname as label. The +// returned map is created upon each call so that the caller is free to add more +// labels to the map. +func HostnameGroupingKey() map[string]string { + hostname, err := os.Hostname() + if err != nil { + return map[string]string{"instance": "unknown"} + } + return map[string]string{"instance": hostname} +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go b/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go new file mode 100644 index 0000000..28ed9b7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go @@ -0,0 +1,176 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package push + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +func TestPush(t *testing.T) { + + var ( + lastMethod string + lastBody []byte + lastPath string + ) + + host, err := os.Hostname() + if err != nil { + t.Error(err) + } + + // Fake a Pushgateway that always responds with 202. + pgwOK := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + lastMethod = r.Method + var err error + lastBody, err = ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + lastPath = r.URL.EscapedPath() + w.Header().Set("Content-Type", `text/plain; charset=utf-8`) + w.WriteHeader(http.StatusAccepted) + }), + ) + defer pgwOK.Close() + + // Fake a Pushgateway that always responds with 500. + pgwErr := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "fake error", http.StatusInternalServerError) + }), + ) + defer pgwErr.Close() + + metric1 := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "testname1", + Help: "testhelp1", + }) + metric2 := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "testname2", + Help: "testhelp2", + ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"}, + }) + + reg := prometheus.NewRegistry() + reg.MustRegister(metric1) + reg.MustRegister(metric2) + + mfs, err := reg.Gather() + if err != nil { + t.Fatal(err) + } + + buf := &bytes.Buffer{} + enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + t.Fatal(err) + } + } + wantBody := buf.Bytes() + + // PushCollectors, all good. + if err := Collectors("testjob", HostnameGroupingKey(), pgwOK.URL, metric1, metric2); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for PushCollectors, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob/instance/"+host { + t.Error("unexpected path:", lastPath) + } + + // PushAddCollectors, with nil grouping, all good. + if err := AddCollectors("testjob", nil, pgwOK.URL, metric1, metric2); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POST for PushAddCollectors, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob" { + t.Error("unexpected path:", lastPath) + } + + // PushCollectors with a broken PGW. + if err := Collectors("testjob", nil, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push to broken Pushgateway succeeded") + } else { + if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { + t.Errorf("got error %q, want %q", got, want) + } + } + + // PushCollectors with invalid grouping or job. + if err := Collectors("testjob", map[string]string{"foo": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with grouping contained in metrics succeeded") + } + if err := Collectors("test/job", nil, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with invalid job value succeeded") + } + if err := Collectors("testjob", map[string]string{"foo/bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with invalid grouping succeeded") + } + if err := Collectors("testjob", map[string]string{"foo-bar": "bums"}, pgwErr.URL, metric1, metric2); err == nil { + t.Error("push with invalid grouping succeeded") + } + + // Push registry, all good. + if err := FromGatherer("testjob", HostnameGroupingKey(), pgwOK.URL, reg); err != nil { + t.Fatal(err) + } + if lastMethod != "PUT" { + t.Error("want method PUT for Push, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + + // PushAdd registry, all good. + if err := AddFromGatherer("testjob", map[string]string{"a": "x", "b": "y"}, pgwOK.URL, reg); err != nil { + t.Fatal(err) + } + if lastMethod != "POST" { + t.Error("want method POSTT for PushAdd, got", lastMethod) + } + if bytes.Compare(lastBody, wantBody) != 0 { + t.Errorf("got body %v, want %v", lastBody, wantBody) + } + if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { + t.Error("unexpected path:", lastPath) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 0000000..8f2094c --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,762 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "sort" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, + )) + continue + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + for _, labelPair := range dtoMetric.GetLabel() { + if !utf8.ValidString(*labelPair.Value) { + return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value) + } + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go new file mode 100644 index 0000000..d136bba --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go @@ -0,0 +1,590 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func testHandler(t testing.TB) { + + metricVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + externalMetricFamily := &dto.MetricFamily{ + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + externalBuf := &bytes.Buffer{} + enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) + if err := enc.Encode(externalMetricFamily); err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externalconstname" + value: "externalconstvalue" + > + label: < + name: "externallabelname" + value: "externalval1" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + buf := &bytes.Buffer{} + enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + if err := enc.Encode(expectedMetricFamily); err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithSameName := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("\xFF"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering: + +collected metric's label constname is not utf8: "\xff" +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + collector prometheus.Collector + externalMF []*dto.MetricFamily + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + collector: metricVec, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: externalMetricFamilyAsText, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 15 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyMergedWithExternalAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithSameName, + }, + }, + { // 16 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; charset=utf-8`, + }, + body: expectedMetricFamilyInvalidLabelValueAsText, + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithInvalidLabelValue, + }, + }, + } + for i, scenario := range scenarios { + registry := prometheus.NewPedanticRegistry() + gatherer := prometheus.Gatherer(registry) + if scenario.externalMF != nil { + gatherer = prometheus.Gatherers{ + registry, + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { + return scenario.externalMF, nil + }), + } + } + + if scenario.collector != nil { + registry.Register(scenario.collector) + } + writer := httptest.NewRecorder() + handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.HeaderMap.Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { + t.Errorf( + "%d. expected body:\n%s\ngot body:\n%s\n", + i, scenario.out.body, writer.Body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} + +func TestRegisterWithOrGet(t *testing.T) { + // Replace the default registerer just to be sure. This is bad, but this + // whole test will go away once RegisterOrGet is removed. + oldRegisterer := prometheus.DefaultRegisterer + defer func() { + prometheus.DefaultRegisterer = oldRegisterer + }() + prometheus.DefaultRegisterer = prometheus.NewRegistry() + original := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + equalButNotSame := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + var err error + if err = prometheus.Register(original); err != nil { + t.Fatal(err) + } + if err = prometheus.Register(equalButNotSame); err == nil { + t.Fatal("expected error when registringe equal collector") + } + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + if are.ExistingCollector != original { + t.Error("expected original collector but got something else") + } + if are.ExistingCollector == equalButNotSame { + t.Error("expected original callector but got new one") + } + } else { + t.Error("unexpected error:", err) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 0000000..56b0663 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,572 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +// +// Deprecated: DefObjectives will not be used as the default objectives in +// v0.10 of the library. The default Summary will have no quantiles then. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is DefObjectives. It is used if Objectives is left at + // its zero value (i.e. nil). To create a Summary without Objectives, + // set it to an empty map (i.e. map[float64]float64{}). + // + // Deprecated: Note that the current value of DefObjectives is + // deprecated. It will be replaced by an empty map in v0.10 of the + // library. Please explicitly set Objectives to the desired value. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, the +// Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := m.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := m.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Observer { + return m.metricVec.withLabelValues(lvs...).(Observer) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Observer { + return m.metricVec.with(labels).(Observer) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go new file mode 100644 index 0000000..b162ed9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go @@ -0,0 +1,388 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestSummaryWithDefaultObjectives(t *testing.T) { + reg := NewRegistry() + summaryWithDefaultObjectives := NewSummary(SummaryOpts{ + Name: "default_objectives", + Help: "Test help.", + }) + if err := reg.Register(summaryWithDefaultObjectives); err != nil { + t.Error(err) + } + + m := &dto.Metric{} + if err := summaryWithDefaultObjectives.Write(m); err != nil { + t.Error(err) + } + if len(m.GetSummary().Quantile) != len(DefObjectives) { + t.Error("expected default objectives in summary") + } +} + +func TestSummaryWithoutObjectives(t *testing.T) { + reg := NewRegistry() + summaryWithEmptyObjectives := NewSummary(SummaryOpts{ + Name: "empty_objectives", + Help: "Test help.", + Objectives: map[float64]float64{}, + }) + if err := reg.Register(summaryWithEmptyObjectives); err != nil { + t.Error(err) + } + + m := &dto.Metric{} + if err := summaryWithEmptyObjectives.Write(m); err != nil { + t.Error(err) + } + if len(m.GetSummary().Quantile) != 0 { + t.Error("expected no objectives in summary") + } +} + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.(Summary).Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + // More because it depends on timing than because it is particularly long... + } + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() + // Wait for MaxAge without observations and make sure quantiles are NaN. + time.Sleep(100 * time.Millisecond) + sum.Write(m) + if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { + t.Errorf("got %f, want NaN after expiration", got) + } +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO(beorn7): This currently tolerates an error of up to 2*ε. The + // error must be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 0000000..b8fc5f1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,51 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. ObserveDuration is +// usually called with a defer statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() { + if t.observer != nil { + t.observer.Observe(time.Since(t.begin).Seconds()) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go b/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go new file mode 100644 index 0000000..2949020 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/timer_test.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestTimerObserve(t *testing.T) { + var ( + his = NewHistogram(HistogramOpts{Name: "test_histogram"}) + sum = NewSummary(SummaryOpts{Name: "test_summary"}) + gauge = NewGauge(GaugeOpts{Name: "test_gauge"}) + ) + + func() { + hisTimer := NewTimer(his) + sumTimer := NewTimer(sum) + gaugeTimer := NewTimer(ObserverFunc(gauge.Set)) + defer hisTimer.ObserveDuration() + defer sumTimer.ObserveDuration() + defer gaugeTimer.ObserveDuration() + }() + + m := &dto.Metric{} + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } + m.Reset() + sum.Write(m) + if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got { + t.Errorf("want %d observations for summary, got %d", want, got) + } + m.Reset() + gauge.Write(m) + if got := m.GetGauge().GetValue(); got <= 0 { + t.Errorf("want value > 0 for gauge, got %f", got) + } +} + +func TestTimerEmpty(t *testing.T) { + emptyTimer := NewTimer(nil) + emptyTimer.ObserveDuration() + // Do nothing, just demonstrate it works without panic. +} + +func TestTimerConditionalTiming(t *testing.T) { + var ( + his = NewHistogram(HistogramOpts{ + Name: "test_histogram", + }) + timeMe = true + m = &dto.Metric{} + ) + + timedFunc := func() { + timer := NewTimer(ObserverFunc(func(v float64) { + if timeMe { + his.Observe(v) + } + })) + defer timer.ObserveDuration() + } + + timedFunc() // This will time. + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } + + timeMe = false + timedFunc() // This will not time again. + m.Reset() + his.Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for histogram, got %d", want, got) + } +} + +func TestTimerByOutcome(t *testing.T) { + var ( + his = NewHistogramVec( + HistogramOpts{Name: "test_histogram"}, + []string{"outcome"}, + ) + outcome = "foo" + m = &dto.Metric{} + ) + + timedFunc := func() { + timer := NewTimer(ObserverFunc(func(v float64) { + his.WithLabelValues(outcome).Observe(v) + })) + defer timer.ObserveDuration() + + if outcome == "foo" { + outcome = "bar" + return + } + outcome = "foo" + } + + timedFunc() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + + timedFunc() + m.Reset() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + + timedFunc() + m.Reset() + his.WithLabelValues("foo").(Histogram).Write(m) + if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) + } + m.Reset() + his.WithLabelValues("bar").(Histogram).Write(m) + if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got { + t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) + } + +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 0000000..0f9ce63 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 0000000..4a9cca6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,236 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) SetToCurrentTime() { + v.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value_test.go b/vendor/github.com/prometheus/client_golang/prometheus/value_test.go new file mode 100644 index 0000000..eed517e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value_test.go @@ -0,0 +1,43 @@ +package prometheus + +import ( + "fmt" + "testing" +) + +func TestNewConstMetricInvalidLabelValues(t *testing.T) { + testCases := []struct { + desc string + labels Labels + }{ + { + desc: "non utf8 label value", + labels: Labels{"a": "\xFF"}, + }, + { + desc: "not enough label values", + labels: Labels{}, + }, + { + desc: "too many label values", + labels: Labels{"a": "1", "b": "2"}, + }, + } + + for _, test := range testCases { + metricDesc := NewDesc( + "sample_value", + "sample value", + []string{"a"}, + Labels{}, + ) + + expectPanic(t, func() { + MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF") + }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) + + if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil { + t.Errorf("NewConstMetric: expected error because: %s", test.desc) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 0000000..65d13fe --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,363 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, HistogramVec, and +// UntypedVec. +type metricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64][]metricWithLabelValues + desc *Desc + + newMetric func(labelValues ...string) Metric + hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + children: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *metricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.children { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabelValues(h, lvs), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabels(h, labels), nil +} + +func (m *metricVec) withLabelValues(lvs ...string) Metric { + metric, err := m.getMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +func (m *metricVec) with(labels Labels) Metric { + metric, err := m.getMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + return m.deleteByHashWithLabelValues(h, lvs) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.deleteByHashWithLabels(h, labels) +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + + i := m.findMetricWithLabelValues(metrics, lvs) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + i := m.findMetricWithLabels(metrics, labels) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// Reset deletes all metrics in this vector. +func (m *metricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)); err != nil { + return 0, err + } + + h := hashNew() + for _, val := range vals { + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)); err != nil { + return 0, err + } + + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs) + if !ok { + // Copy to avoid allocation in case wo don't go down this code path. + copiedLVs := make([]string, len(lvs)) + copy(copiedLVs, lvs) + metric = m.newMetric(copiedLVs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels) + if !ok { + lvs := m.extractLabelValues(labels) + metric = m.newMetric(lvs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricVec) getMetricWithHashAndLabelValues(h uint64, lvs []string) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricVec) getMetricWithHashAndLabels(h uint64, labels Labels) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func (m *metricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { + for i, metric := range metrics { + if m.matchLabelValues(metric.values, lvs) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func (m *metricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { + for i, metric := range metrics { + if m.matchLabels(metric.values, labels) { + return i + } + } + return len(metrics) +} + +func (m *metricVec) matchLabelValues(values []string, lvs []string) bool { + if len(values) != len(lvs) { + return false + } + for i, v := range values { + if v != lvs[i] { + return false + } + } + return true +} + +func (m *metricVec) matchLabels(values []string, labels Labels) bool { + if len(labels) != len(values) { + return false + } + for i, k := range m.desc.variableLabels { + if values[i] != labels[k] { + return false + } + } + return true +} + +func (m *metricVec) extractLabelValues(labels Labels) []string { + labelValues := make([]string, len(labels)) + for i, k := range m.desc.variableLabels { + labelValues[i] = labels[k] + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go new file mode 100644 index 0000000..f767f7a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go @@ -0,0 +1,312 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestDelete(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDelete(t, vec) +} + +func TestDeleteWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDelete(t, vec) +} + +func testDelete(t *testing.T, vec *GaugeVec) { + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDeleteLabelValues(t, vec) +} + +func TestDeleteLabelValuesWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDeleteLabelValues(t, vec) +} + +func testDeleteLabelValues(t *testing.T, vec *GaugeVec) { + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision. + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) + // Delete out of order. + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestMetricVec(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testMetricVec(t, vec) +} + +func TestMetricVecWithCollisions(t *testing.T) { + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testMetricVec(t, vec) +} + +func testMetricVec(t *testing.T, vec *GaugeVec) { + vec.Reset() // Actually test Reset now! + + var pair [2]string + // Keep track of metrics. + expected := map[[2]string]int{} + + for i := 0; i < 1000; i++ { + pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. + expected[pair]++ + vec.WithLabelValues(pair[0], pair[1]).Inc() + + expected[[2]string{"v1", "v2"}]++ + vec.WithLabelValues("v1", "v2").(Gauge).Inc() + } + + var total int + for _, metrics := range vec.children { + for _, metric := range metrics { + total++ + copy(pair[:], metric.values) + + var metricOut dto.Metric + if err := metric.metric.Write(&metricOut); err != nil { + t.Fatal(err) + } + actual := *metricOut.Gauge.Value + + var actualPair [2]string + for i, label := range metricOut.Label { + actualPair[i] = *label.Value + } + + // Test output pair against metric.values to ensure we've selected + // the right one. We check this to ensure the below check means + // anything at all. + if actualPair != pair { + t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) + } + + if actual != float64(expected[pair]) { + t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) + } + } + } + + if total != len(expected) { + t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) + } + + vec.Reset() + + if len(vec.children) > 0 { + t.Fatalf("reset failed") + } +} + +func TestCounterVecEndToEndWithCollision(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"labelname"}, + ) + vec.WithLabelValues("77kepQFQ8Kl").Inc() + vec.WithLabelValues("!0IC=VloaY").Add(2) + + m := &dto.Metric{} + if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 1.; got != want { + t.Errorf("got value %f, want %f", got, want) + } + m.Reset() + if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 2.; got != want { + t.Errorf("got value %f, want %f", got, want) + } +} + +func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { + benchmarkMetricVecWithLabelValues(b, map[string][]string{ + "l1": {"onevalue"}, + "l2": {"twovalue"}, + }) +} + +func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) +} + +func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) +} + +func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) +} + +func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { + labels := map[string][]string{} + + for i := 0; i < nkeys; i++ { + var ( + k = fmt.Sprintf("key-%v", i) + vs = make([]string, 0, nvalues) + ) + for j := 0; j < nvalues; j++ { + vs = append(vs, fmt.Sprintf("value-%v", j)) + } + labels[k] = vs + } + + benchmarkMetricVecWithLabelValues(b, labels) +} + +func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { + var keys []string + for k := range labels { // Map order dependent, who cares though. + keys = append(keys, k) + } + + values := make([]string, len(labels)) // Value cache for permutations. + vec := NewGaugeVec( + GaugeOpts{ + Name: "test", + Help: "helpless", + }, + keys, + ) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Varies input across provide map entries based on key size. + for j, k := range keys { + candidates := labels[k] + values[j] = candidates[i%len(candidates)] + } + + vec.WithLabelValues(values...) + } +} diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml new file mode 100644 index 0000000..2fe8e9a --- /dev/null +++ b/vendor/github.com/prometheus/common/.travis.yml @@ -0,0 +1,6 @@ +sudo: false + +language: go +go: + - 1.7.5 + - tip diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md new file mode 100644 index 0000000..40503ed --- /dev/null +++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/MAINTAINERS.md b/vendor/github.com/prometheus/common/MAINTAINERS.md new file mode 100644 index 0000000..1b31521 --- /dev/null +++ b/vendor/github.com/prometheus/common/MAINTAINERS.md @@ -0,0 +1 @@ +* Fabian Reinartz diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 0000000..636a2c1 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md new file mode 100644 index 0000000..98f6ce2 --- /dev/null +++ b/vendor/github.com/prometheus/common/README.md @@ -0,0 +1,12 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **config**: Common configuration structures +* **expfmt**: Decoding and encoding for the exposition format +* **log**: A logging wrapper around [logrus](https://github.com/Sirupsen/logrus) +* **model**: Shared data structures +* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **version**: Version informations and metric diff --git a/vendor/github.com/prometheus/common/config/config.go b/vendor/github.com/prometheus/common/config/config.go new file mode 100644 index 0000000..33eb922 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/config.go @@ -0,0 +1,30 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "strings" +) + +func checkOverflow(m map[string]interface{}, ctx string) error { + if len(m) > 0 { + var keys []string + for k := range m { + keys = append(keys, k) + } + return fmt.Errorf("unknown fields in %s: %s", ctx, strings.Join(keys, ", ")) + } + return nil +} diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml new file mode 100644 index 0000000..7dfdc1e --- /dev/null +++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.cert_no_key.bad.yml @@ -0,0 +1 @@ +cert_file: somefile diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.empty.good.yml new file mode 100644 index 0000000..e69de29 diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml new file mode 100644 index 0000000..d054383 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.insecure.good.yml @@ -0,0 +1 @@ +insecure_skip_verify: true diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml new file mode 100644 index 0000000..12cbaac --- /dev/null +++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.invalid_field.bad.yml @@ -0,0 +1 @@ +something_invalid: true diff --git a/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml b/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml new file mode 100644 index 0000000..cec045e --- /dev/null +++ b/vendor/github.com/prometheus/common/config/testdata/tls_config.key_no_cert.bad.yml @@ -0,0 +1 @@ +key_file: somefile diff --git a/vendor/github.com/prometheus/common/config/tls_config.go b/vendor/github.com/prometheus/common/config/tls_config.go new file mode 100644 index 0000000..7c7e7cb --- /dev/null +++ b/vendor/github.com/prometheus/common/config/tls_config.go @@ -0,0 +1,79 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" +) + +// TLSConfig configures the options for TLS connections. +type TLSConfig struct { + // The CA cert to use for the targets. + CAFile string `yaml:"ca_file,omitempty"` + // The client cert file for the targets. + CertFile string `yaml:"cert_file,omitempty"` + // The client key file for the targets. + KeyFile string `yaml:"key_file,omitempty"` + // Disable target certificate validation. + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + + // Catches all undefined fields and must be empty after parsing. + XXX map[string]interface{} `yaml:",inline"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + type plain TLSConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + return checkOverflow(c.XXX, "TLS config") +} + +// GenerateConfig produces a tls.Config based on TLS connection options. +// It loads certificate files from disk if they are defined. +func (c *TLSConfig) GenerateConfig() (*tls.Config, error) { + tlsConfig := &tls.Config{InsecureSkipVerify: c.InsecureSkipVerify} + + // If a CA cert is provided then let's read it in so we can validate the + // scrape target's certificate properly. + if len(c.CAFile) > 0 { + caCertPool := x509.NewCertPool() + // Load CA cert. + caCert, err := ioutil.ReadFile(c.CAFile) + if err != nil { + return nil, fmt.Errorf("unable to use specified CA cert %s: %s", c.CAFile, err) + } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caCertPool + } + + if len(c.CertFile) > 0 && len(c.KeyFile) == 0 { + return nil, fmt.Errorf("client cert file %q specified without client key file", c.CertFile) + } else if len(c.KeyFile) > 0 && len(c.CertFile) == 0 { + return nil, fmt.Errorf("client key file %q specified without client cert file", c.KeyFile) + } else if len(c.CertFile) > 0 && len(c.KeyFile) > 0 { + cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil +} diff --git a/vendor/github.com/prometheus/common/config/tls_config_test.go b/vendor/github.com/prometheus/common/config/tls_config_test.go new file mode 100644 index 0000000..4443035 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/tls_config_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/tls" + "io/ioutil" + "reflect" + "strings" + "testing" + + "gopkg.in/yaml.v2" +) + +// LoadTLSConfig parses the given YAML file into a tls.Config. +func LoadTLSConfig(filename string) (*tls.Config, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := &TLSConfig{} + if err = yaml.Unmarshal(content, cfg); err != nil { + return nil, err + } + return cfg.GenerateConfig() +} + +var expectedTLSConfigs = []struct { + filename string + config *tls.Config +}{ + { + filename: "tls_config.empty.good.yml", + config: &tls.Config{}, + }, { + filename: "tls_config.insecure.good.yml", + config: &tls.Config{InsecureSkipVerify: true}, + }, +} + +func TestValidTLSConfig(t *testing.T) { + for _, cfg := range expectedTLSConfigs { + cfg.config.BuildNameToCertificate() + got, err := LoadTLSConfig("testdata/" + cfg.filename) + if err != nil { + t.Errorf("Error parsing %s: %s", cfg.filename, err) + } + if !reflect.DeepEqual(*got, *cfg.config) { + t.Fatalf("%s: unexpected config result: \n\n%s\n expected\n\n%s", cfg.filename, got, cfg.config) + } + } +} + +var expectedTLSConfigErrors = []struct { + filename string + errMsg string +}{ + { + filename: "tls_config.invalid_field.bad.yml", + errMsg: "unknown fields in", + }, { + filename: "tls_config.cert_no_key.bad.yml", + errMsg: "specified without client key file", + }, { + filename: "tls_config.key_no_cert.bad.yml", + errMsg: "specified without client cert file", + }, +} + +func TestBadTLSConfigs(t *testing.T) { + for _, ee := range expectedTLSConfigErrors { + _, err := LoadTLSConfig("testdata/" + ee.filename) + if err == nil { + t.Errorf("Expected error parsing %s but got none", ee.filename) + continue + } + if !strings.Contains(err.Error(), ee.errMsg) { + t.Errorf("Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err) + } + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/bench_test.go b/vendor/github.com/prometheus/common/expfmt/bench_test.go new file mode 100644 index 0000000..e539bfc --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/bench_test.go @@ -0,0 +1,167 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "testing" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + + dto "github.com/prometheus/client_model/go" +) + +var parser TextParser + +// Benchmarks to show how much penalty text format parsing actually inflicts. +// +// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. +// +// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op +// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op +// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op +// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op +// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op +// +// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. +// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), +// the difference becomes less relevant, only ~4x. +// +// The test data contains 248 samples. + +// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric +// family DTOs. +func BenchmarkParseText(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape +// into metric family DTOs. +func BenchmarkParseTextGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + if _, err := parser.TextToMetricFamilies(in); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into +// metric family DTOs. Note that this does not build a map of metric families +// (as the text version does), because it is not required for Prometheus +// ingestion either. (However, it is required for the text-format parsing, as +// the metric family might be sprinkled all over the text, while the +// protobuf-format guarantees bundling at one place.) +func BenchmarkParseProto(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped +// protobuf format. +func BenchmarkParseProtoGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed +// metric family DTOs into a map. This is not happening during Prometheus +// ingestion. It is just here to measure the overhead of that map creation and +// separate it from the overhead of the text format parsing. +func BenchmarkParseProtoMap(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + families := map[string]*dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family := &dto.MetricFamily{} + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + families[family.GetName()] = family + } + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 0000000..a7a42d5 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go new file mode 100644 index 0000000..82c1130 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go @@ -0,0 +1,435 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "io" + "net/http" + "reflect" + "sort" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" +) + +func TestTextDecoder(t *testing.T) { + var ( + ts = model.Now() + in = ` +# Only a quite simple scenario with two metric families. +# More complicated tests of the parser itself can be found in the text package. +# TYPE mf2 counter +mf2 3 +mf1{label="value1"} -3.14 123456 +mf1{label="value2"} 42 +mf2 4 +` + out = model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value1", + }, + Value: -3.14, + Timestamp: 123456, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value2", + }, + Value: 42, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 3, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 4, + Timestamp: ts, + }, + } + ) + + dec := &SampleDecoder{ + Dec: &textDecoder{r: strings.NewReader(in)}, + Opts: &DecodeOptions{ + Timestamp: ts, + }, + } + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(out) + if !reflect.DeepEqual(all, out) { + t.Fatalf("output does not match") + } +} + +func TestProtoDecoder(t *testing.T) { + + var testTime = model.Now() + + scenarios := []struct { + in string + expected model.Vector + fail bool + }{ + { + in: "", + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + fail: true, + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + }, + Value: 84, + Timestamp: testTime, + }, + }, + }, + { + in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.99", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.999", + }, + Value: -84, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + "quantile": "0.5", + }, + Value: 10, + Timestamp: testTime, + }, + }, + }, + { + in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "100", + }, + Value: 123, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "120", + }, + Value: 412, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "144", + }, + Value: 592, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "172.8", + }, + Value: 1524, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "+Inf", + }, + Value: 2693, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_sum", + }, + Value: 1756047.3, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_count", + }, + Value: 2693, + Timestamp: testTime, + }, + }, + }, + { + // The metric type is unset in this protobuf, which needs to be handled + // correctly by the decoder. + in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + }, + Value: 1, + Timestamp: testTime, + }, + }, + }, + } + + for i, scenario := range scenarios { + dec := &SampleDecoder{ + Dec: &protoDecoder{r: strings.NewReader(scenario.in)}, + Opts: &DecodeOptions{ + Timestamp: testTime, + }, + } + + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if scenario.fail { + if err == nil { + t.Fatal("Expected error but got none") + } + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(scenario.expected) + if !reflect.DeepEqual(all, scenario.expected) { + t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all) + } + } +} + +func testDiscriminatorHTTPHeader(t testing.TB) { + var scenarios = []struct { + input map[string]string + output Format + err error + }{ + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, + output: FmtProtoDelim, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, + output: FmtUnknown, + }, + } + + for i, scenario := range scenarios { + var header http.Header + + if len(scenario.input) > 0 { + header = http.Header{} + } + + for key, value := range scenario.input { + header.Add(key, value) + } + + actual := ResponseFormat(header) + + if scenario.output != actual { + t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) + } + } +} + +func TestDiscriminatorHTTPHeader(t *testing.T) { + testDiscriminatorHTTPHeader(t) +} + +func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + testDiscriminatorHTTPHeader(b) + } +} + +func TestExtractSamples(t *testing.T) { + var ( + goodMetricFamily1 = &dto.MetricFamily{ + Name: proto.String("foo"), + Help: proto.String("Help for foo."), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Counter: &dto.Counter{ + Value: proto.Float64(4711), + }, + }, + }, + } + goodMetricFamily2 = &dto.MetricFamily{ + Name: proto.String("bar"), + Help: proto.String("Help for bar."), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Gauge: &dto.Gauge{ + Value: proto.Float64(3.14), + }, + }, + }, + } + badMetricFamily = &dto.MetricFamily{ + Name: proto.String("bad"), + Help: proto.String("Help for bad."), + Type: dto.MetricType(42).Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Gauge: &dto.Gauge{ + Value: proto.Float64(2.7), + }, + }, + }, + } + + opts = &DecodeOptions{ + Timestamp: 42, + } + ) + + got, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2) + if err != nil { + t.Error("Unexpected error from ExtractSamples:", err) + } + want := model.Vector{ + &model.Sample{Metric: model.Metric{model.MetricNameLabel: "foo"}, Value: 4711, Timestamp: 42}, + &model.Sample{Metric: model.Metric{model.MetricNameLabel: "bar"}, Value: 3.14, Timestamp: 42}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want) + } + + got, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2) + if err == nil { + t.Error("Expected error from ExtractSamples") + } + if !reflect.DeepEqual(got, want) { + t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want) + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 0000000..11839ed --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 0000000..371ac75 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 0000000..dc2eede --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 new file mode 100644 index 0000000..139597f --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0 @@ -0,0 +1,2 @@ + + diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 new file mode 100644 index 0000000..2ae8706 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1 @@ -0,0 +1,6 @@ + +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 new file mode 100644 index 0000000..5c351db --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2 @@ -0,0 +1,12 @@ + +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 new file mode 100644 index 0000000..0b3c345 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3 @@ -0,0 +1,22 @@ + +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 new file mode 100644 index 0000000..bde0a38 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4 @@ -0,0 +1,10 @@ + +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 new file mode 100644 index 0000000..4c67f9a --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0 @@ -0,0 +1 @@ +bla 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 new file mode 100644 index 0000000..b853478 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1 @@ -0,0 +1 @@ +metric{label="\t"} 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 new file mode 100644 index 0000000..b5fe5f5 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10 @@ -0,0 +1 @@ +metric{label="bla"} 3.14 2 3 diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 new file mode 100644 index 0000000..57c7fbc --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11 @@ -0,0 +1 @@ +metric{label="bla"} blubb diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 new file mode 100644 index 0000000..0a9df79 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12 @@ -0,0 +1,3 @@ + +# HELP metric one +# HELP metric two diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 new file mode 100644 index 0000000..5bc7427 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13 @@ -0,0 +1,3 @@ + +# TYPE metric counter +# TYPE metric untyped diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 new file mode 100644 index 0000000..a9a2426 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14 @@ -0,0 +1,3 @@ + +metric 4.12 +# TYPE metric counter diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 new file mode 100644 index 0000000..7e95ca8 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15 @@ -0,0 +1,2 @@ + +# TYPE metric bla diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 new file mode 100644 index 0000000..7825f88 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16 @@ -0,0 +1,2 @@ + +# TYPE met-ric diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 new file mode 100644 index 0000000..8f35cae --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17 @@ -0,0 +1 @@ +@invalidmetric{label="bla"} 3.14 2 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 new file mode 100644 index 0000000..7ca2cc2 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18 @@ -0,0 +1 @@ +{label="bla"} 3.14 2 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 new file mode 100644 index 0000000..7a6ccc0 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19 @@ -0,0 +1,3 @@ + +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 new file mode 100644 index 0000000..726d001 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2 @@ -0,0 +1,3 @@ + +metric{label="new +line"} 3.14 diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 new file mode 100644 index 0000000..6aa9e30 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3 @@ -0,0 +1 @@ +metric{@="bla"} 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 new file mode 100644 index 0000000..d112cb9 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4 @@ -0,0 +1 @@ +metric{__name__="bla"} 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 new file mode 100644 index 0000000..b34554a --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5 @@ -0,0 +1 @@ +metric{label+="bla"} 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 new file mode 100644 index 0000000..c4d7df3 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6 @@ -0,0 +1 @@ +metric{label=bla} 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 new file mode 100644 index 0000000..97eafc4 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7 @@ -0,0 +1,3 @@ + +# TYPE metric summary +metric{quantile="bla"} 3.14 diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 new file mode 100644 index 0000000..fc70649 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8 @@ -0,0 +1 @@ +metric{label="bla"+} 3.14 \ No newline at end of file diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 new file mode 100644 index 0000000..57b4879 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9 @@ -0,0 +1 @@ +metric{label="bla"} 3.14 2.72 diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal new file mode 100644 index 0000000..be1e6a3 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal @@ -0,0 +1 @@ +m{} 0 diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/json2 b/vendor/github.com/prometheus/common/expfmt/testdata/json2 new file mode 100644 index 0000000..b914c93 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/testdata/json2 @@ -0,0 +1,46 @@ +[ + { + "baseLabels": { + "__name__": "rpc_calls_total", + "job": "batch_job" + }, + "docstring": "RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "service": "zed" + }, + "value": 25 + }, + { + "labels": { + "service": "bar" + }, + "value": 24 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds" + }, + "docstring": "RPC latency.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "service": "foo" + }, + "value": { + "0.010000": 15, + "0.990000": 17 + } + } + ] + } + } +] diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad b/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad new file mode 100644 index 0000000..cc6ac97 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/testdata/json2_bad @@ -0,0 +1,46 @@ +[ + { + "baseLabels": { + "__name__": "rpc_calls_total", + "job": "batch_job" + }, + "docstring": "RPC calls.", + "metric": { + "type": "counter", + "value": [ + { + "labels": { + "servic|e": "zed" + }, + "value": 25 + }, + { + "labels": { + "service": "bar" + }, + "value": 24 + } + ] + } + }, + { + "baseLabels": { + "__name__": "rpc_latency_microseconds" + }, + "docstring": "RPC latency.", + "metric": { + "type": "histogram", + "value": [ + { + "labels": { + "service": "foo" + }, + "value": { + "0.010000": 15, + "0.990000": 17 + } + } + ] + } + } +] diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/protobuf b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf new file mode 100644 index 0000000000000000000000000000000000000000..b2d018a7c1e3687e4fa1e149808beb95a048b907 GIT binary patch literal 8239 zcmeHMO^h5z74F&f&urOoyd4LxH?}Fa6=&^uXV!rbfk?Jj4oWb=BzQ?cIJA4aYi1hH zbhoRjvpXgS6Cxp`pacmC@vBIH00~(>a>IfgIDkMxfIvcWgoJV+!ioeZ4!ruC?wXzT zuA&iKb`O@Ps=D5*_g;PPy|11^hjW@mgt6d-gz_Tn20hZtXg^r#bHdys&#Fq%7MSR<2RSSUqc^+-Fk#~NWp?`7QuFmXEbb+AnK6la-JnA58{G` zJjv3amvp-cBT<&d%zbXTL$H~`b0DNTeG(@e^DMgz|KM^j&}Ck$24ZOvF&w5b?vOBs zRYiK4Rh1Nq{;Y-`-cZS$P?j(rbVE+ks1FK8G&2gGS;DO7^+MXmSqrNr31PUEQLH~q z(l#bx)KRhAvETe!{QYBb zy7RTSUtGMBh_UNWym|G&^?xo-AKLNkzPDfgf&Y2b+*I)^1u4MRVID>a@4Iutraa?e z7n?DmJNUoZPz`0Uf4!aV5_%yNxm!T3A5$ zqTL;cWH6hHN>8A>&@OSkve=z$6Ar@V$!R4uF`}9TwwjJ zbpq#x|Cbti?}ie(ByIDK`=i>~bw)}c_@rUPZM~B`{1JgTVmXeoUQPi~0R_8VqYx%* zIe=IUppCN>@F1iRJ2+(Wmx4{)3IT2V0J{y+09Pm|)ldNkBRGYB;5L?sFCyYUb>vlJ zz6$LX{U7P_xI#r{%4KtsTU@yQusEftw#^vIC4pwM6$0N@xy>LQvh zt+lJ!reWS0pN&3?_LsZwY(iO+k8ktOp?k{B>rKWXzei&B!a_1X=|!(r`42G20-pu7?_>nTPrbqg$Y= zaa}8)w(VmSTn!U0I!&MlRs=->M<=lj^N!O-_gAD@?WirJX$u+EzCz*3O=UhT=R7)M zG7}{%iJ|?md6lD|)We$+E6$CtEOpQWo2YHNd$%DJH@{4`GD>}EQ(N!P_&=?olNCvJ zIH^u5$Oe)6ej!V-3uVCQehad$B6*l-JHSv0#k`-(R`))%5A6^?++7hRz#Eu87UID> zg^uXSdwU_UMI;T=hy)3+MUt2rtUckMMf;7FMUs~V06sqm-7O^kNBwtdXl}z3%RaSo z;P;I>uxe)C)p>8cwF-TJ({HHl+%sV4Iz9n8Gqdq<)0R5Mc?Dsv{N z12ccS?|ZYr6{z{0MQVThMh$&!1{{ za|CjTv0>zJT4-VOLn!1h_rX0r>UIniA8{pq=l^-ax)Q2IfpFx6z5w*50wu#FxyP0w zL81cwsCkHf!dHo42O?+<2+)xe-b*#`dEG2=C(k_0ysW-g;pr_RaPkWW#Cc!V3C1}s z#uU%=6Kl*)L{3Aj8^T>VHe|hq?|rcbx3E-V;*8LtP&!=u*a#h@XNGi;V?#QO^5E^o z^&Y&vcn;mw&VqJEvjU(SD4bK};+*R^N-2+bNOfIf!uKrnU# zuYBNw7*i~sp+8mQ00|G!1s7?uhDn|Y9;L$XDYD~Hba4KyGk;k6ZO!7o2)MrpfxW7d z9%Zn*GEWWoY(WwWPDXzP>^Bps3X)V*a6rWg?+V!epw6jt{}cNsR%FwnOM+6ppcN+F z0OB)v9o4})T}N;z!lpKmUS&1d1#Be@Csm_M>XTG*RLke`VWd(F_~r@&5vR CKJmZ+ literal 0 HcmV?d00001 diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz b/vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz new file mode 100644 index 0000000000000000000000000000000000000000..7622adb1c8a1166549e339920a5823cf5493d0a6 GIT binary patch literal 2097 zcmV-12+sE(iwFoAHid)vHwry&KEBu?IG@6G5K^8#imAXN ziq63wT<`*2W;4}5ETb;RPUztdaXg?(!gHFE^7l&v-91vtm@rPcuzg36(CvbPE{k#n zk1t@J1c3vxq83g`DB-vjF`O-=VH*?2?WD3BQ)9K0wFj%0jLCo1&%7}G;qR^#=pZmSlyIpe^dEli?-fluqw! z52vHR&j|(fGrMpSdeSJ4Pnf_OA6$n|I6v1<6p6>8I3}J>bX3~#q1pr0%4B|kWfPXh zdL}H5^-P$Lok4qZcX#8&&Xl*6h0M7{WvGM`rksd(xgY^P@yJ)PpaH==Cd%P(wyZH# zscoB{nE1($#l=T(PJ{!;Ba+j7XdBw( zMjkQ8@0y!Kx1p^aPy)U`ar3p}u=GbE5C|sCnh#Nx00Y0N>9| zr{AT^)}IaZ$&nI#61GLheAPHHpg0BRQ+lr@4}U=-jwHu!6vPbfB%q-0_s){6!~kL* zfHsaoIC6)9?_h_kuM{?M%Yp0M1=wwnMh{A}%>o~SL-=#FA58yUMYXcem!Z9coOjBj zDZ70MJr&LuwDP_`ji1My#Vtok5d@ge}cBbK=hWasLL| zRdC!q}ZUNHNzvQgYAPGu18bR2`Eyzg90Y?Y2E7sqt zXpaVDGSSU)U_5|ca*~(Q;0?&Sem~fNtPRMzVdno+$WkPKylM?dm}Ay}gmg@o0ApX) zazPKODegg58=XtyJT2cZ;iiDE`%j&oJ$*mUXv*n$rLxcZm0qaz=lvd8fkfV3a)jGa zkjBl0N3{Yir@@LQL$-P$K2HUqTue!N6`7HnZO|a;Sii{!4FY2UYOJllmMhQb_Q@4o zcBm@9MTD0~x-D6~==o|J9UfWMYC@X2kYVL(IWzBdXU-ceU$@W$%fZ# ztW++4f1!i!SVQezrP5GUP+zq&O#LBxf8Bb+Ku1R&M_NhQQO{F9J+y#K>n(`7ERzIf z2e6|Ii*Bq+-P_Slv`K!LTUWIvGQFyvShXqH z6;QL|F%5e&&snttfa2-Qgn%%|W$PfHzt**C(`ZLFzc~r@wQm~e^VbU+Lz_MK=opeh zM-NH*YP(|H0CL%_xf1VMe;U)SL>ljH>6q+EkeK2{-bpeK zF1V^)n$Lqb5V1^TweCWkO_RlE3^?;@PGXK9znj;h>h^SJSBb>QZ||1-eKz0ij2uh* zOs{KWeb#FX!QI9X+-+F3?-<~fg%V@Or4C6t|MVA!=%7xaPlxM0!E5t_I$WDSiFULj zyB)D80q6!)JXT!ZVieBE5qV?B?_xMBF7z(Wqn3-Nb0~nMfC*c6?0Tp2nbUGC?c(L_ z&y*T)7tjSKAzi^Fj+7f0{k>Giy=c$u`^SI3@Q$JNz6h8talqk5^s{HQezv?PvL}lV%q!#=sDv?ZLc(bJSl z<^3v}%E~Jq!`4Z;zB*W{uhQ9#0_!8m*eI}W!de>z*8f$3_12M{3j-3yE)K@E-4_Sr z8op@##z6P1k`@3|QO}MU@gv9Uf2rvXgSd>gc(7rlMHtIU4F|I7a8`vRnarojZv!uO zD8QF|i5NYy4w)5GpIcSOyl~6vsa4)3E0tHqw4|>; zWDO6*hXw<28o2cMD_wI!aa37zRYd|&3|y0!!}5(fzg2V4C(05g7%0d*T}0z1L{u$X bD-^Z%LVr9zyh@v4WE}qw(Kt;cFCYK_^s^0< literal 0 HcmV?d00001 diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/text b/vendor/github.com/prometheus/common/expfmt/testdata/text new file mode 100644 index 0000000..f3d8c37 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/testdata/text @@ -0,0 +1,322 @@ +# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. +# TYPE http_request_duration_microseconds summary +http_request_duration_microseconds{handler="/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/"} 0 +http_request_duration_microseconds_count{handler="/"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/alerts"} 0 +http_request_duration_microseconds_count{handler="/alerts"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/metrics"} 0 +http_request_duration_microseconds_count{handler="/api/metrics"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/query"} 0 +http_request_duration_microseconds_count{handler="/api/query"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/query_range"} 0 +http_request_duration_microseconds_count{handler="/api/query_range"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/api/targets"} 0 +http_request_duration_microseconds_count{handler="/api/targets"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/consoles/"} 0 +http_request_duration_microseconds_count{handler="/consoles/"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/graph"} 0 +http_request_duration_microseconds_count{handler="/graph"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/heap"} 0 +http_request_duration_microseconds_count{handler="/heap"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0 +http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0 +http_request_duration_microseconds_sum{handler="/static/"} 0 +http_request_duration_microseconds_count{handler="/static/"} 0 +http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275 +http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632 +http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384 +http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001 +http_request_duration_microseconds_count{handler="prometheus"} 119 +# HELP http_request_size_bytes The HTTP request sizes in bytes. +# TYPE http_request_size_bytes summary +http_request_size_bytes{handler="/",quantile="0.5"} 0 +http_request_size_bytes{handler="/",quantile="0.9"} 0 +http_request_size_bytes{handler="/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/"} 0 +http_request_size_bytes_count{handler="/"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.5"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.9"} 0 +http_request_size_bytes{handler="/alerts",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/alerts"} 0 +http_request_size_bytes_count{handler="/alerts"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/metrics"} 0 +http_request_size_bytes_count{handler="/api/metrics"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/query",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/query"} 0 +http_request_size_bytes_count{handler="/api/query"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/query_range"} 0 +http_request_size_bytes_count{handler="/api/query_range"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0 +http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/api/targets"} 0 +http_request_size_bytes_count{handler="/api/targets"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0 +http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/consoles/"} 0 +http_request_size_bytes_count{handler="/consoles/"} 0 +http_request_size_bytes{handler="/graph",quantile="0.5"} 0 +http_request_size_bytes{handler="/graph",quantile="0.9"} 0 +http_request_size_bytes{handler="/graph",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/graph"} 0 +http_request_size_bytes_count{handler="/graph"} 0 +http_request_size_bytes{handler="/heap",quantile="0.5"} 0 +http_request_size_bytes{handler="/heap",quantile="0.9"} 0 +http_request_size_bytes{handler="/heap",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/heap"} 0 +http_request_size_bytes_count{handler="/heap"} 0 +http_request_size_bytes{handler="/static/",quantile="0.5"} 0 +http_request_size_bytes{handler="/static/",quantile="0.9"} 0 +http_request_size_bytes{handler="/static/",quantile="0.99"} 0 +http_request_size_bytes_sum{handler="/static/"} 0 +http_request_size_bytes_count{handler="/static/"} 0 +http_request_size_bytes{handler="prometheus",quantile="0.5"} 291 +http_request_size_bytes{handler="prometheus",quantile="0.9"} 291 +http_request_size_bytes{handler="prometheus",quantile="0.99"} 291 +http_request_size_bytes_sum{handler="prometheus"} 34488 +http_request_size_bytes_count{handler="prometheus"} 119 +# HELP http_requests_total Total number of HTTP requests made. +# TYPE http_requests_total counter +http_requests_total{code="200",handler="prometheus",method="get"} 119 +# HELP http_response_size_bytes The HTTP response sizes in bytes. +# TYPE http_response_size_bytes summary +http_response_size_bytes{handler="/",quantile="0.5"} 0 +http_response_size_bytes{handler="/",quantile="0.9"} 0 +http_response_size_bytes{handler="/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/"} 0 +http_response_size_bytes_count{handler="/"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.5"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.9"} 0 +http_response_size_bytes{handler="/alerts",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/alerts"} 0 +http_response_size_bytes_count{handler="/alerts"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/metrics"} 0 +http_response_size_bytes_count{handler="/api/metrics"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/query",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/query"} 0 +http_response_size_bytes_count{handler="/api/query"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/query_range"} 0 +http_response_size_bytes_count{handler="/api/query_range"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0 +http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/api/targets"} 0 +http_response_size_bytes_count{handler="/api/targets"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0 +http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/consoles/"} 0 +http_response_size_bytes_count{handler="/consoles/"} 0 +http_response_size_bytes{handler="/graph",quantile="0.5"} 0 +http_response_size_bytes{handler="/graph",quantile="0.9"} 0 +http_response_size_bytes{handler="/graph",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/graph"} 0 +http_response_size_bytes_count{handler="/graph"} 0 +http_response_size_bytes{handler="/heap",quantile="0.5"} 0 +http_response_size_bytes{handler="/heap",quantile="0.9"} 0 +http_response_size_bytes{handler="/heap",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/heap"} 0 +http_response_size_bytes_count{handler="/heap"} 0 +http_response_size_bytes{handler="/static/",quantile="0.5"} 0 +http_response_size_bytes{handler="/static/",quantile="0.9"} 0 +http_response_size_bytes{handler="/static/",quantile="0.99"} 0 +http_response_size_bytes_sum{handler="/static/"} 0 +http_response_size_bytes_count{handler="/static/"} 0 +http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049 +http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058 +http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064 +http_response_size_bytes_sum{handler="prometheus"} 247001 +http_response_size_bytes_count{handler="prometheus"} 119 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.55 +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 70 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 8192 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 29 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 5.3870592e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.42236894836e+09 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 5.41478912e+08 +# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures. +# TYPE prometheus_dns_sd_lookup_failures_total counter +prometheus_dns_sd_lookup_failures_total 0 +# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups. +# TYPE prometheus_dns_sd_lookups_total counter +prometheus_dns_sd_lookups_total 7 +# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute. +# TYPE prometheus_evaluator_duration_milliseconds summary +prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0 +prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1 +prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1 +prometheus_evaluator_duration_milliseconds_sum 12 +prometheus_evaluator_duration_milliseconds_count 23 +# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks. +# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge +prometheus_local_storage_checkpoint_duration_milliseconds 0 +# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. +# TYPE prometheus_local_storage_chunk_ops_total counter +prometheus_local_storage_chunk_ops_total{type="create"} 598 +prometheus_local_storage_chunk_ops_total{type="persist"} 174 +prometheus_local_storage_chunk_ops_total{type="pin"} 920 +prometheus_local_storage_chunk_ops_total{type="transcode"} 415 +prometheus_local_storage_chunk_ops_total{type="unpin"} 920 +# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds. +# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0 +prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0 +prometheus_local_storage_indexing_batch_latency_milliseconds_count 1 +# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch). +# TYPE prometheus_local_storage_indexing_batch_sizes summary +prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2 +prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2 +prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2 +prometheus_local_storage_indexing_batch_sizes_sum 2 +prometheus_local_storage_indexing_batch_sizes_count 1 +# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue. +# TYPE prometheus_local_storage_indexing_queue_capacity gauge +prometheus_local_storage_indexing_queue_capacity 16384 +# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed. +# TYPE prometheus_local_storage_indexing_queue_length gauge +prometheus_local_storage_indexing_queue_length 0 +# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested. +# TYPE prometheus_local_storage_ingested_samples_total counter +prometheus_local_storage_ingested_samples_total 30473 +# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes. +# TYPE prometheus_local_storage_invalid_preload_requests_total counter +prometheus_local_storage_invalid_preload_requests_total 0 +# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory. +# TYPE prometheus_local_storage_memory_chunkdescs gauge +prometheus_local_storage_memory_chunkdescs 1059 +# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor). +# TYPE prometheus_local_storage_memory_chunks gauge +prometheus_local_storage_memory_chunks 1020 +# HELP prometheus_local_storage_memory_series The current number of series in memory. +# TYPE prometheus_local_storage_memory_series gauge +prometheus_local_storage_memory_series 424 +# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk. +# TYPE prometheus_local_storage_persist_latency_microseconds summary +prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377 +prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539 +prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463 +prometheus_local_storage_persist_latency_microseconds_sum 20424.415 +prometheus_local_storage_persist_latency_microseconds_count 174 +# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue. +# TYPE prometheus_local_storage_persist_queue_capacity gauge +prometheus_local_storage_persist_queue_capacity 1024 +# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue. +# TYPE prometheus_local_storage_persist_queue_length gauge +prometheus_local_storage_persist_queue_length 0 +# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type. +# TYPE prometheus_local_storage_series_ops_total counter +prometheus_local_storage_series_ops_total{type="create"} 2 +prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11 +# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications). +# TYPE prometheus_notifications_latency_milliseconds summary +prometheus_notifications_latency_milliseconds{quantile="0.5"} 0 +prometheus_notifications_latency_milliseconds{quantile="0.9"} 0 +prometheus_notifications_latency_milliseconds{quantile="0.99"} 0 +prometheus_notifications_latency_milliseconds_sum 0 +prometheus_notifications_latency_milliseconds_count 0 +# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. +# TYPE prometheus_notifications_queue_capacity gauge +prometheus_notifications_queue_capacity 100 +# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. +# TYPE prometheus_notifications_queue_length gauge +prometheus_notifications_queue_length 0 +# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute. +# TYPE prometheus_rule_evaluation_duration_milliseconds summary +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2 +prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12 +prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0 +prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3 +prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15 +prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115 +# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. +# TYPE prometheus_rule_evaluation_failures_total counter +prometheus_rule_evaluation_failures_total 0 +# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples. +# TYPE prometheus_samples_queue_capacity gauge +prometheus_samples_queue_capacity 4096 +# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name). +# TYPE prometheus_samples_queue_length gauge +prometheus_samples_queue_length 0 +# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. +# TYPE prometheus_target_interval_length_seconds summary +prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15 +prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15 +prometheus_target_interval_length_seconds_sum{interval="15s"} 175 +prometheus_target_interval_length_seconds_count{interval="15s"} 12 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1 +prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1 +prometheus_target_interval_length_seconds_sum{interval="1s"} 55 +prometheus_target_interval_length_seconds_count{interval="1s"} 117 diff --git a/vendor/github.com/prometheus/common/expfmt/testdata/text.gz b/vendor/github.com/prometheus/common/expfmt/testdata/text.gz new file mode 100644 index 0000000000000000000000000000000000000000..b7658c84d7d63018b286611cec48a8362bd7c71d GIT binary patch literal 2598 zcmV+>3fc7^iwFpzgl<*<19WA0bO5zmU6Y$Q7Jctuq3U^=sZBg!8{0he!&D|!^Dvvr zZhE(Bo+ujBHWds6#H77l^WXPMh&CSrT?tPfk~HWXu8yRudj$Oh`R(KHpUH`_A}Hy% znpP}`>oR0Xo(1P5D)Wj)c@|gX@kGgQkB^^;Y(~hn1)FL(N;uct7JQ(8XU|9t=MTODRBN$JDAv-0l$BF+*5e=z};A%O07S&*nGuQO(j z>mWk-hgFMpQ_)zcr=+peP;yx+X@u_Lisa`rWn~iGK-4KN8)YZCY~}3`+G=b}F#to$ z@^TZI6-<^QtC$u@+|Vr$*n~g@4azV((%Xrw;#(rMl5eTtl60x;Ml=Hg7M4ePi_AR5 zWhuIvYk}Y`;R3PEC^}&Hxyn;oEiW7(g{&$T zOGz6lOF2!z9oUJ6#bPDLBo;y{NGx>}OqMblU^}Ra!R=`IYFF}DXsPD0l(QwXRMZT9 z1r}uvMcj$jioMF+IQ!Ll@7;uu%iOdxk_}6~P&?NkP;OYDNzoz`|$Y!B|ny zj%reuNgp&L4FZEROl#k@C-4^&Xc%S;&}O?m9I$#}oK6+6oP{ZQ6n|&+`H7Y!f9^S+ zik!okDveGH7f+_;o#NhWl*iERwd35sQ%b}ClgAJDu!kt)tP04Sbi$G~H#MGY%rG!z zqXsxvhKKo&h=>3i+chf&BMA|P4I`hz zdEunP01Z8x1s*LK%C+ePQyMV6TsWA4aWllCMbcU~oj@ZerWZ#K!MiszC|VYsf77Wl z^kaHav$XN*QGgD`PAaTJbBuMY-<^l{GCUqB&znByImP&6F7EmEav7T(hH$4NyXmc=;M!JeWZ8&YkOy?!Z~~i zp5u!AGki(T^||9|bLcL4hJhn7t)en1SYD3%+~S+34X++te?5F6=M^_U89wR_*nigB z<@IQYVU{E@C>5OPIfvq>akKxHS4neKxZ$xGp6BR?pl;)>Hiy0A`D^+QXFm)LR&~h& zKCjvdLB8-y%~S(TVCJ@sA;K)8q|TBTLW?{)%}qiDaX(l^19!DuuQ$7czddYr5ba3F zKa-NxVcPHCf18Uq8I=tEz?4nA)z;na55CJ)JapocYdeTD=u#Y{dH%I7g6A+vYZ#7p zMT3af8U5?0FYmwnN}8smZQ2bLMcP@^sP>L7YDBd9D6!GC?e>bO=$~O)hp_m%-v*>< z(*3Buy+fXJNRw$wBnU1+ddeXs(Wqur6|auIJ!Zl~Ub~x%)y@Xz$f6D34okV9o-TYN zNJ_#kg>p^JKSPlWW&yAKF%S1qNkhg@2|WK`iiOxKC`!cVM_XfXk^#_PJElNZhFQgr z&H=u4JyUp{U7xHQ$0UoP?8=V8Q^=xI(EMJ}r9T_~B^PKVvZQE8THNqAk*+ZP_+p-| z$fCw1^SNyqoquDx<$NL|E~IEuq-#2Rnl~~1>GC#Fnt9VS`SF@0+9U`ot!eU;@x*C3 zO=pTgji$zW0p4GVw;&LsZHh4--fuOnA=`vS7$xi?lKtBcvbuj8XVKPR3NXlC{p;5wdf)S20ibfSL;ZY@gdm=AoE<{51ZYADTD zbk+XwJz*ypWSAIN4G?=yKO%!(^ByA#F$rnr=R}X?P4n@RKwQ+a@pi4xr?Ncm6cL?Y zFaM?8l6mOYGLaVr630i1hENp6DU}zrakJ^5R)mY zEhS*#F76?&pipA;NvK)+8e1>(gT1dGJw>}Y4iQ6VKT`ltK+j;#;pwed5)M zLZmEb$+N5xDz#7jy}3xf^~_I2GqD|s?}|u|20T#C4};nrD=zb*fGw-nNVzW|qEX~R zJ!;-28{$Vk))a3+Fm}EOOq0n}OQR1)SRmCXPp?xXU`F!Sxs(eRxkeP;+LF<5sL56!2BFYVwN^71p)t<^Jt?M~mfFyrUBd6tQ^i zts?(xa8!?DYhHy<@bya=<9?TLsf_=Iftny*K2lS};x}y<57m_&v^AG@JXSqI*F!9L z#aqu|2_lyI6*BwwQ$w$)Jv0ba(rS-72ATQ8V22R5MY^i9?`6VZKWYzVRH09>jxy{- zyRGBzw6V%e{9#nPCXAl@8^J{2{sxYvK&%&M}p-h{~`~52J=svE9rgUfb&r_pUeiCP} 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go new file mode 100644 index 0000000..7e7388c --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go @@ -0,0 +1,588 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func testTextParse(t testing.TB) { + var scenarios = []struct { + in string + out []*dto.MetricFamily + }{ + // 0: Empty lines as input. + { + in: ` + +`, + out: []*dto.MetricFamily{}, + }, + // 1: Minimal case. + { + in: ` +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("minimal_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(1.234), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-3e3), + }, + TimestampMs: proto.Int64(103948), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("no_labels"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + // 2: Counters & gauges, docstrings, various whitespace, escape sequences. + { + in: ` +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("base\"v\\al\nue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("name2"), + Help: proto.String("doc str\"ing 2"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue2"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + TimestampMs: proto.Int64(54321), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + }, + }, + // 3: The evil summary, mixed with other types and funny comments. + { + in: ` +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("fake_sum"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(2001), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("decoy"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-1), + }, + TimestampMs: proto.Int64(-2), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("my_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(4711), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(110), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(140), + }, + }, + }, + TimestampMs: proto.Int64(2), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(5), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(-12.34), + Value: proto.Float64(math.NaN()), + }, + }, + }, + TimestampMs: proto.Int64(5), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val2"), + }, + }, + Summary: &dto.Summary{ + SampleSum: proto.Float64(8), + }, + TimestampMs: proto.Int64(15), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val3"), + }, + }, + Summary: &dto.Summary{ + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.2), + Value: proto.Float64(4711), + }, + }, + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(20), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.3), + Value: proto.Float64(-1.2), + }, + }, + }, + }, + }, + }, + }, + }, + // 4: The histogram. + { + in: ` +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + out: []*dto.MetricFamily{ + { + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), len(out); expected != got { + t.Errorf( + "%d. expected %d MetricFamilies, got %d", + i, expected, got, + ) + } + for _, expected := range scenario.out { + got, ok := out[expected.GetName()] + if !ok { + t.Errorf( + "%d. expected MetricFamily %q, found none", + i, expected.GetName(), + ) + continue + } + if expected.String() != got.String() { + t.Errorf( + "%d. expected MetricFamily %s, got %s", + i, expected, got, + ) + } + } + } +} + +func TestTextParse(t *testing.T) { + testTextParse(t) +} + +func BenchmarkTextParse(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParse(b) + } +} + +func testTextParseError(t testing.TB) { + var scenarios = []struct { + in string + err string + }{ + // 0: No new-line at end of input. + { + in: ` +bla 3.14 +blubber 42`, + err: "text format parsing error in line 3: unexpected end of input stream", + }, + // 1: Invalid escape sequence in label value. + { + in: `metric{label="\t"} 3.14`, + err: "text format parsing error in line 1: invalid escape sequence", + }, + // 2: Newline in label value. + { + in: ` +metric{label="new +line"} 3.14 +`, + err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, + }, + // 3: + { + in: `metric{@="bla"} 3.14`, + err: "text format parsing error in line 1: invalid label name for metric", + }, + // 4: + { + in: `metric{__name__="bla"} 3.14`, + err: `text format parsing error in line 1: label name "__name__" is reserved`, + }, + // 5: + { + in: `metric{label+="bla"} 3.14`, + err: "text format parsing error in line 1: expected '=' after label name", + }, + // 6: + { + in: `metric{label=bla} 3.14`, + err: "text format parsing error in line 1: expected '\"' at start of label value", + }, + // 7: + { + in: ` +# TYPE metric summary +metric{quantile="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'quantile' label", + }, + // 8: + { + in: `metric{label="bla"+} 3.14`, + err: "text format parsing error in line 1: unexpected end of label value", + }, + // 9: + { + in: `metric{label="bla"} 3.14 2.72 +`, + err: "text format parsing error in line 1: expected integer as timestamp", + }, + // 10: + { + in: `metric{label="bla"} 3.14 2 3 +`, + err: "text format parsing error in line 1: spurious string after timestamp", + }, + // 11: + { + in: `metric{label="bla"} blubb +`, + err: "text format parsing error in line 1: expected float as value", + }, + // 12: + { + in: ` +# HELP metric one +# HELP metric two +`, + err: "text format parsing error in line 3: second HELP line for metric name", + }, + // 13: + { + in: ` +# TYPE metric counter +# TYPE metric untyped +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +metric 4.12 +# TYPE metric counter +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +# TYPE metric bla +`, + err: "text format parsing error in line 2: unknown metric type", + }, + // 15: + { + in: ` +# TYPE met-ric +`, + err: "text format parsing error in line 2: invalid metric name in comment", + }, + // 16: + { + in: `@invalidmetric{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 17: + { + in: `{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 18: + { + in: ` +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'le' label", + }, + } + + for i, scenario := range scenarios { + _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestTextParseError(t *testing.T) { + testTextParseError(t) +} + +func BenchmarkParseError(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParseError(b) + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 0000000..7723656 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 0000000..648b38c --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go new file mode 100644 index 0000000..41d328f --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go @@ -0,0 +1,33 @@ +package goautoneg + +import ( + "testing" +) + +var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5" + +func TestParseAccept(t *testing.T) { + alternatives := []string{"text/html", "image/png"} + content_type := Negotiate(chrome, alternatives) + if content_type != "image/png" { + t.Errorf("got %s expected image/png", content_type) + } + + alternatives = []string{"text/html", "text/plain", "text/n3"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/html" { + t.Errorf("got %s expected text/html", content_type) + } + + alternatives = []string{"text/n3", "text/plain"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/plain" { + t.Errorf("got %s expected text/plain", content_type) + } + + alternatives = []string{"text/n3", "application/rdf+xml"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/n3" { + t.Errorf("got %s expected text/n3", content_type) + } +} diff --git a/vendor/github.com/prometheus/common/log/eventlog_formatter.go b/vendor/github.com/prometheus/common/log/eventlog_formatter.go new file mode 100644 index 0000000..6d41284 --- /dev/null +++ b/vendor/github.com/prometheus/common/log/eventlog_formatter.go @@ -0,0 +1,89 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package log + +import ( + "fmt" + "os" + + "golang.org/x/sys/windows/svc/eventlog" + + "github.com/Sirupsen/logrus" +) + +func init() { + setEventlogFormatter = func(name string, debugAsInfo bool) error { + if name == "" { + return fmt.Errorf("missing name parameter") + } + + fmter, err := newEventlogger(name, debugAsInfo, origLogger.Formatter) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating eventlog formatter: %v\n", err) + origLogger.Errorf("can't connect logger to eventlog: %v", err) + return err + } + origLogger.Formatter = fmter + return nil + } +} + +type eventlogger struct { + log *eventlog.Log + debugAsInfo bool + wrap logrus.Formatter +} + +func newEventlogger(name string, debugAsInfo bool, fmter logrus.Formatter) (*eventlogger, error) { + logHandle, err := eventlog.Open(name) + if err != nil { + return nil, err + } + return &eventlogger{log: logHandle, debugAsInfo: debugAsInfo, wrap: fmter}, nil +} + +func (s *eventlogger) Format(e *logrus.Entry) ([]byte, error) { + data, err := s.wrap.Format(e) + if err != nil { + fmt.Fprintf(os.Stderr, "eventlogger: can't format entry: %v\n", err) + return data, err + } + + switch e.Level { + case logrus.PanicLevel: + fallthrough + case logrus.FatalLevel: + fallthrough + case logrus.ErrorLevel: + err = s.log.Error(102, e.Message) + case logrus.WarnLevel: + err = s.log.Warning(101, e.Message) + case logrus.InfoLevel: + err = s.log.Info(100, e.Message) + case logrus.DebugLevel: + if s.debugAsInfo { + err = s.log.Info(100, e.Message) + } + default: + err = s.log.Info(100, e.Message) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "eventlogger: can't send log to eventlog: %v\n", err) + } + + return data, err +} diff --git a/vendor/github.com/prometheus/common/log/log.go b/vendor/github.com/prometheus/common/log/log.go new file mode 100644 index 0000000..0a74a7f --- /dev/null +++ b/vendor/github.com/prometheus/common/log/log.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net/url" + "os" + "runtime" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" +) + +type levelFlag string + +// String implements flag.Value. +func (f levelFlag) String() string { + return fmt.Sprintf("%q", origLogger.Level.String()) +} + +// Set implements flag.Value. +func (f levelFlag) Set(level string) error { + l, err := logrus.ParseLevel(level) + if err != nil { + return err + } + origLogger.Level = l + return nil +} + +// setSyslogFormatter is nil if the target architecture does not support syslog. +var setSyslogFormatter func(string, string) error + +// setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows). +var setEventlogFormatter func(string, bool) error + +func setJSONFormatter() { + origLogger.Formatter = &logrus.JSONFormatter{} +} + +type logFormatFlag url.URL + +// String implements flag.Value. +func (f logFormatFlag) String() string { + u := url.URL(f) + return fmt.Sprintf("%q", u.String()) +} + +// Set implements flag.Value. +func (f logFormatFlag) Set(format string) error { + u, err := url.Parse(format) + if err != nil { + return err + } + if u.Scheme != "logger" { + return fmt.Errorf("invalid scheme %s", u.Scheme) + } + jsonq := u.Query().Get("json") + if jsonq == "true" { + setJSONFormatter() + } + + switch u.Opaque { + case "syslog": + if setSyslogFormatter == nil { + return fmt.Errorf("system does not support syslog") + } + appname := u.Query().Get("appname") + facility := u.Query().Get("local") + return setSyslogFormatter(appname, facility) + case "eventlog": + if setEventlogFormatter == nil { + return fmt.Errorf("system does not support eventlog") + } + name := u.Query().Get("name") + debugAsInfo := false + debugAsInfoRaw := u.Query().Get("debugAsInfo") + if parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil { + debugAsInfo = parsedDebugAsInfo + } + return setEventlogFormatter(name, debugAsInfo) + case "stdout": + origLogger.Out = os.Stdout + case "stderr": + origLogger.Out = os.Stderr + default: + return fmt.Errorf("unsupported logger %q", u.Opaque) + } + return nil +} + +func init() { + AddFlags(flag.CommandLine) +} + +// AddFlags adds the flags used by this package to the given FlagSet. That's +// useful if working with a custom FlagSet. The init function of this package +// adds the flags to flag.CommandLine anyway. Thus, it's usually enough to call +// flag.Parse() to make the logging flags take effect. +func AddFlags(fs *flag.FlagSet) { + fs.Var( + levelFlag(origLogger.Level.String()), + "log.level", + "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]", + ) + fs.Var( + logFormatFlag(url.URL{Scheme: "logger", Opaque: "stderr"}), + "log.format", + `Set the log target and format. Example: "logger:syslog?appname=bob&local=7" or "logger:stdout?json=true"`, + ) +} + +// Logger is the interface for loggers used in the Prometheus components. +type Logger interface { + Debug(...interface{}) + Debugln(...interface{}) + Debugf(string, ...interface{}) + + Info(...interface{}) + Infoln(...interface{}) + Infof(string, ...interface{}) + + Warn(...interface{}) + Warnln(...interface{}) + Warnf(string, ...interface{}) + + Error(...interface{}) + Errorln(...interface{}) + Errorf(string, ...interface{}) + + Fatal(...interface{}) + Fatalln(...interface{}) + Fatalf(string, ...interface{}) + + With(key string, value interface{}) Logger +} + +type logger struct { + entry *logrus.Entry +} + +func (l logger) With(key string, value interface{}) Logger { + return logger{l.entry.WithField(key, value)} +} + +// Debug logs a message at level Debug on the standard logger. +func (l logger) Debug(args ...interface{}) { + l.sourced().Debug(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func (l logger) Debugln(args ...interface{}) { + l.sourced().Debugln(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func (l logger) Debugf(format string, args ...interface{}) { + l.sourced().Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +func (l logger) Info(args ...interface{}) { + l.sourced().Info(args...) +} + +// Info logs a message at level Info on the standard logger. +func (l logger) Infoln(args ...interface{}) { + l.sourced().Infoln(args...) +} + +// Infof logs a message at level Info on the standard logger. +func (l logger) Infof(format string, args ...interface{}) { + l.sourced().Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +func (l logger) Warn(args ...interface{}) { + l.sourced().Warn(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func (l logger) Warnln(args ...interface{}) { + l.sourced().Warnln(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func (l logger) Warnf(format string, args ...interface{}) { + l.sourced().Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +func (l logger) Error(args ...interface{}) { + l.sourced().Error(args...) +} + +// Error logs a message at level Error on the standard logger. +func (l logger) Errorln(args ...interface{}) { + l.sourced().Errorln(args...) +} + +// Errorf logs a message at level Error on the standard logger. +func (l logger) Errorf(format string, args ...interface{}) { + l.sourced().Errorf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func (l logger) Fatal(args ...interface{}) { + l.sourced().Fatal(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func (l logger) Fatalln(args ...interface{}) { + l.sourced().Fatalln(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func (l logger) Fatalf(format string, args ...interface{}) { + l.sourced().Fatalf(format, args...) +} + +// sourced adds a source field to the logger that contains +// the file name and line where the logging happened. +func (l logger) sourced() *logrus.Entry { + _, file, line, ok := runtime.Caller(2) + if !ok { + file = "" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + file = file[slash+1:] + } + return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line)) +} + +var origLogger = logrus.New() +var baseLogger = logger{entry: logrus.NewEntry(origLogger)} + +// Base returns the default Logger logging to +func Base() Logger { + return baseLogger +} + +// NewLogger returns a new Logger logging to out. +func NewLogger(w io.Writer) Logger { + l := logrus.New() + l.Out = w + return logger{entry: logrus.NewEntry(l)} +} + +// NewNopLogger returns a logger that discards all log messages. +func NewNopLogger() Logger { + l := logrus.New() + l.Out = ioutil.Discard + return logger{entry: logrus.NewEntry(l)} +} + +// With adds a field to the logger. +func With(key string, value interface{}) Logger { + return baseLogger.With(key, value) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + baseLogger.sourced().Debug(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + baseLogger.sourced().Debugln(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + baseLogger.sourced().Debugf(format, args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + baseLogger.sourced().Info(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + baseLogger.sourced().Infoln(args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + baseLogger.sourced().Infof(format, args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + baseLogger.sourced().Warn(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + baseLogger.sourced().Warnln(args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + baseLogger.sourced().Warnf(format, args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + baseLogger.sourced().Error(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + baseLogger.sourced().Errorln(args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + baseLogger.sourced().Errorf(format, args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + baseLogger.sourced().Fatal(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + baseLogger.sourced().Fatalln(args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + baseLogger.sourced().Fatalf(format, args...) +} + +type errorLogWriter struct{} + +func (errorLogWriter) Write(b []byte) (int, error) { + baseLogger.sourced().Error(string(b)) + return len(b), nil +} + +// NewErrorLogger returns a log.Logger that is meant to be used +// in the ErrorLog field of an http.Server to log HTTP server errors. +func NewErrorLogger() *log.Logger { + return log.New(&errorLogWriter{}, "", 0) +} diff --git a/vendor/github.com/prometheus/common/log/log_test.go b/vendor/github.com/prometheus/common/log/log_test.go new file mode 100644 index 0000000..953adb7 --- /dev/null +++ b/vendor/github.com/prometheus/common/log/log_test.go @@ -0,0 +1,39 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "bytes" + "regexp" + "testing" + + "github.com/Sirupsen/logrus" +) + +func TestFileLineLogging(t *testing.T) { + var buf bytes.Buffer + origLogger.Out = &buf + origLogger.Formatter = &logrus.TextFormatter{ + DisableColors: true, + } + + // The default logging level should be "info". + Debug("This debug-level line should not show up in the output.") + Infof("This %s-level line should show up in the output.", "info") + + re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$` + if !regexp.MustCompile(re).Match(buf.Bytes()) { + t.Fatalf("%q did not match expected regex %q", buf.String(), re) + } +} diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter.go b/vendor/github.com/prometheus/common/log/syslog_formatter.go new file mode 100644 index 0000000..64f5fda --- /dev/null +++ b/vendor/github.com/prometheus/common/log/syslog_formatter.go @@ -0,0 +1,126 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!nacl,!plan9 + +package log + +import ( + "fmt" + "log/syslog" + "os" + + "github.com/Sirupsen/logrus" +) + +var _ logrus.Formatter = (*syslogger)(nil) + +func init() { + setSyslogFormatter = func(appname, local string) error { + if appname == "" { + return fmt.Errorf("missing appname parameter") + } + if local == "" { + return fmt.Errorf("missing local parameter") + } + + fmter, err := newSyslogger(appname, local, origLogger.Formatter) + if err != nil { + fmt.Fprintf(os.Stderr, "error creating syslog formatter: %v\n", err) + origLogger.Errorf("can't connect logger to syslog: %v", err) + return err + } + origLogger.Formatter = fmter + return nil + } +} + +var prefixTag []byte + +type syslogger struct { + wrap logrus.Formatter + out *syslog.Writer +} + +func newSyslogger(appname string, facility string, fmter logrus.Formatter) (*syslogger, error) { + priority, err := getFacility(facility) + if err != nil { + return nil, err + } + out, err := syslog.New(priority, appname) + _, isJSON := fmter.(*logrus.JSONFormatter) + if isJSON { + // add cee tag to json formatted syslogs + prefixTag = []byte("@cee:") + } + return &syslogger{ + out: out, + wrap: fmter, + }, err +} + +func getFacility(facility string) (syslog.Priority, error) { + switch facility { + case "0": + return syslog.LOG_LOCAL0, nil + case "1": + return syslog.LOG_LOCAL1, nil + case "2": + return syslog.LOG_LOCAL2, nil + case "3": + return syslog.LOG_LOCAL3, nil + case "4": + return syslog.LOG_LOCAL4, nil + case "5": + return syslog.LOG_LOCAL5, nil + case "6": + return syslog.LOG_LOCAL6, nil + case "7": + return syslog.LOG_LOCAL7, nil + } + return syslog.LOG_LOCAL0, fmt.Errorf("invalid local(%s) for syslog", facility) +} + +func (s *syslogger) Format(e *logrus.Entry) ([]byte, error) { + data, err := s.wrap.Format(e) + if err != nil { + fmt.Fprintf(os.Stderr, "syslogger: can't format entry: %v\n", err) + return data, err + } + // only append tag to data sent to syslog (line), not to what + // is returned + line := string(append(prefixTag, data...)) + + switch e.Level { + case logrus.PanicLevel: + err = s.out.Crit(line) + case logrus.FatalLevel: + err = s.out.Crit(line) + case logrus.ErrorLevel: + err = s.out.Err(line) + case logrus.WarnLevel: + err = s.out.Warning(line) + case logrus.InfoLevel: + err = s.out.Info(line) + case logrus.DebugLevel: + err = s.out.Debug(line) + default: + err = s.out.Notice(line) + } + + if err != nil { + fmt.Fprintf(os.Stderr, "syslogger: can't send log to syslog: %v\n", err) + } + + return data, err +} diff --git a/vendor/github.com/prometheus/common/log/syslog_formatter_test.go b/vendor/github.com/prometheus/common/log/syslog_formatter_test.go new file mode 100644 index 0000000..b7e6884 --- /dev/null +++ b/vendor/github.com/prometheus/common/log/syslog_formatter_test.go @@ -0,0 +1,52 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows,!nacl,!plan9 + +package log + +import ( + "errors" + "log/syslog" + "testing" +) + +func TestGetFacility(t *testing.T) { + testCases := []struct { + facility string + expectedPriority syslog.Priority + expectedErr error + }{ + {"0", syslog.LOG_LOCAL0, nil}, + {"1", syslog.LOG_LOCAL1, nil}, + {"2", syslog.LOG_LOCAL2, nil}, + {"3", syslog.LOG_LOCAL3, nil}, + {"4", syslog.LOG_LOCAL4, nil}, + {"5", syslog.LOG_LOCAL5, nil}, + {"6", syslog.LOG_LOCAL6, nil}, + {"7", syslog.LOG_LOCAL7, nil}, + {"8", syslog.LOG_LOCAL0, errors.New("invalid local(8) for syslog")}, + } + for _, tc := range testCases { + priority, err := getFacility(tc.facility) + if err != tc.expectedErr { + if err.Error() != tc.expectedErr.Error() { + t.Errorf("want %s, got %s", tc.expectedErr.Error(), err.Error()) + } + } + + if priority != tc.expectedPriority { + t.Errorf("want %q, got %q", tc.expectedPriority, priority) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 0000000..35e739c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/alert_test.go b/vendor/github.com/prometheus/common/model/alert_test.go new file mode 100644 index 0000000..9692bca --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert_test.go @@ -0,0 +1,118 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "testing" + "time" +) + +func TestAlertValidate(t *testing.T) { + ts := time.Now() + + var cases = []struct { + alert *Alert + err string + }{ + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + }, + err: "start time missing", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts, + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts.Add(1 * time.Minute), + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts.Add(-1 * time.Minute), + }, + err: "start time must be before end time", + }, + { + alert: &Alert{ + StartsAt: ts, + }, + err: "at least one label pair required", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b", "!bad": "label"}, + StartsAt: ts, + }, + err: "invalid label set: invalid name", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b", "bad": "\xfflabel"}, + StartsAt: ts, + }, + err: "invalid label set: invalid value", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + Annotations: LabelSet{"!bad": "label"}, + StartsAt: ts, + }, + err: "invalid annotations: invalid name", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + Annotations: LabelSet{"bad": "\xfflabel"}, + StartsAt: ts, + }, + err: "invalid annotations: invalid value", + }, + } + + for i, c := range cases { + err := c.alert.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 0000000..fc4de41 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 0000000..038fc1c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 0000000..41051a0 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go new file mode 100644 index 0000000..e8df28f --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels_test.go @@ -0,0 +1,140 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func testLabelNames(t testing.TB) { + var scenarios = []struct { + in LabelNames + out LabelNames + }{ + { + in: LabelNames{"ZZZ", "zzz"}, + out: LabelNames{"ZZZ", "zzz"}, + }, + { + in: LabelNames{"aaa", "AAA"}, + out: LabelNames{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelNames(t *testing.T) { + testLabelNames(t) +} + +func BenchmarkLabelNames(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelNames(b) + } +} + +func testLabelValues(t testing.TB) { + var scenarios = []struct { + in LabelValues + out LabelValues + }{ + { + in: LabelValues{"ZZZ", "zzz"}, + out: LabelValues{"ZZZ", "zzz"}, + }, + { + in: LabelValues{"aaa", "AAA"}, + out: LabelValues{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelValues(t *testing.T) { + testLabelValues(t) +} + +func BenchmarkLabelValues(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelValues(b) + } +} + +func TestLabelNameIsValid(t *testing.T) { + var scenarios = []struct { + ln LabelName + valid bool + }{ + { + ln: "Avalid_23name", + valid: true, + }, + { + ln: "_Avalid_23name", + valid: true, + }, + { + ln: "1valid_23name", + valid: false, + }, + { + ln: "avalid_23name", + valid: true, + }, + { + ln: "Ava:lid_23name", + valid: false, + }, + { + ln: "a lid_23name", + valid: false, + }, + { + ln: ":leading_colon", + valid: false, + }, + { + ln: "colon:in:the:middle", + valid: false, + }, + } + + for _, s := range scenarios { + if s.ln.IsValid() != s.valid { + t.Errorf("Expected %v for %q using IsValid method", s.valid, s.ln) + } + if LabelNameRE.MatchString(string(s.ln)) != s.valid { + t.Errorf("Expected %v for %q using regexp match", s.valid, s.ln) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 0000000..6eda08a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 0000000..f725090 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go new file mode 100644 index 0000000..06f9de5 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric_test.go @@ -0,0 +1,132 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "testing" + +func testMetric(t testing.TB) { + var scenarios = []struct { + input LabelSet + fingerprint Fingerprint + fastFingerprint Fingerprint + }{ + { + input: LabelSet{}, + fingerprint: 14695981039346656037, + fastFingerprint: 14695981039346656037, + }, + { + input: LabelSet{ + "first_name": "electro", + "occupation": "robot", + "manufacturer": "westinghouse", + }, + fingerprint: 5911716720268894962, + fastFingerprint: 11310079640881077873, + }, + { + input: LabelSet{ + "x": "y", + }, + fingerprint: 8241431561484471700, + fastFingerprint: 13948396922932177635, + }, + { + input: LabelSet{ + "a": "bb", + "b": "c", + }, + fingerprint: 3016285359649981711, + fastFingerprint: 3198632812309449502, + }, + { + input: LabelSet{ + "a": "b", + "bb": "c", + }, + fingerprint: 7122421792099404749, + fastFingerprint: 5774953389407657638, + }, + } + + for i, scenario := range scenarios { + input := Metric(scenario.input) + + if scenario.fingerprint != input.Fingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint()) + } + if scenario.fastFingerprint != input.FastFingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint()) + } + } +} + +func TestMetric(t *testing.T) { + testMetric(t) +} + +func BenchmarkMetric(b *testing.B) { + for i := 0; i < b.N; i++ { + testMetric(b) + } +} + +func TestMetricNameIsValid(t *testing.T) { + var scenarios = []struct { + mn LabelValue + valid bool + }{ + { + mn: "Avalid_23name", + valid: true, + }, + { + mn: "_Avalid_23name", + valid: true, + }, + { + mn: "1valid_23name", + valid: false, + }, + { + mn: "avalid_23name", + valid: true, + }, + { + mn: "Ava:lid_23name", + valid: true, + }, + { + mn: "a lid_23name", + valid: false, + }, + { + mn: ":leading_colon", + valid: true, + }, + { + mn: "colon:in:the:middle", + valid: true, + }, + } + + for _, s := range scenarios { + if IsValidMetricName(s.mn) != s.valid { + t.Errorf("Expected %v for %q using IsValidMetricName function", s.valid, s.mn) + } + if MetricNameRE.MatchString(string(s.mn)) != s.valid { + t.Errorf("Expected %v for %q using regexp matching", s.valid, s.mn) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 0000000..a7b9691 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 0000000..8762b13 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/signature_test.go b/vendor/github.com/prometheus/common/model/signature_test.go new file mode 100644 index 0000000..d59c8a8 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature_test.go @@ -0,0 +1,314 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "runtime" + "sync" + "testing" +) + +func TestLabelsToSignature(t *testing.T) { + var scenarios = []struct { + in map[string]string + out uint64 + }{ + { + in: map[string]string{}, + out: 14695981039346656037, + }, + { + in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := LabelsToSignature(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFastFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 12952432476264840823, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFastFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureForLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels LabelNames + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{}, + labels: LabelNames{"empty"}, + out: 7187873163539638612, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"empty"}, + out: 7187873163539638612, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 14695981039346656037, + }, + } + + for i, scenario := range scenarios { + actual := SignatureForLabels(scenario.in, scenario.labels...) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureWithoutLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels map[LabelName]struct{} + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: map[LabelName]struct{}{"foo": struct{}{}}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := SignatureWithoutLabels(scenario.in, scenario.labels) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { + for i := 0; i < b.N; i++ { + if a := LabelsToSignature(l); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, l, a) + } + } +} + +func BenchmarkLabelToSignatureScalar(b *testing.B) { + benchmarkLabelToSignature(b, nil, 14695981039346656037) +} + +func BenchmarkLabelToSignatureSingle(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkLabelToSignatureDouble(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkLabelToSignatureTriple(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFingerprintScalar(b *testing.B) { + benchmarkMetricToFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFingerprintSingle(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkMetricToFingerprintDouble(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkMetricToFingerprintTriple(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFastFingerprintScalar(b *testing.B) { + benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFastFingerprintSingle(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964) +} + +func BenchmarkMetricToFastFingerprintDouble(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) +} + +func BenchmarkMetricToFastFingerprintTriple(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) +} + +func BenchmarkEmptyLabelSignature(b *testing.B) { + input := []map[string]string{nil, {}} + + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + alloc := ms.Alloc + + for _, labels := range input { + LabelsToSignature(labels) + } + + runtime.ReadMemStats(&ms) + + if got := ms.Alloc; alloc != got { + b.Fatal("expected LabelsToSignature with empty labels not to perform allocations") + } +} + +func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) { + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + for i := 0; i < concLevel; i++ { + go func() { + start.Wait() + for j := b.N / concLevel; j >= 0; j-- { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } + end.Done() + }() + } + b.ResetTimer() + start.Done() + end.Wait() +} + +func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) +} + +func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) +} + +func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) +} + +func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 0000000..7538e29 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/silence_test.go b/vendor/github.com/prometheus/common/model/silence_test.go new file mode 100644 index 0000000..8eaaf07 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence_test.go @@ -0,0 +1,228 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "testing" + "time" +) + +func TestMatcherValidate(t *testing.T) { + var cases = []struct { + matcher *Matcher + err string + }{ + { + matcher: &Matcher{ + Name: "name", + Value: "value", + }, + }, + { + matcher: &Matcher{ + Name: "name", + Value: "value", + IsRegex: true, + }, + }, + { + matcher: &Matcher{ + Name: "name!", + Value: "value", + }, + err: "invalid name", + }, + { + matcher: &Matcher{ + Name: "", + Value: "value", + }, + err: "invalid name", + }, + { + matcher: &Matcher{ + Name: "name", + Value: "value\xff", + }, + err: "invalid value", + }, + { + matcher: &Matcher{ + Name: "name", + Value: "", + }, + err: "invalid value", + }, + } + + for i, c := range cases { + err := c.matcher.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} + +func TestSilenceValidate(t *testing.T) { + ts := time.Now() + + var cases = []struct { + sil *Silence + err string + }{ + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + {Name: "name", Value: "value"}, + {Name: "name", Value: "value"}, + {Name: "name", Value: "value", IsRegex: true}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts.Add(-1 * time.Minute), + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "start time must be before end time", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "end time missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "start time missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "!name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "invalid matcher", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + }, + err: "comment missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "creation timestamp missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + Comment: "comment", + }, + err: "creator information missing", + }, + } + + for i, c := range cases { + err := c.sil.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 0000000..548968a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,249 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// StringToDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go new file mode 100644 index 0000000..45ffd87 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time_test.go @@ -0,0 +1,129 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + "time" +) + +func TestComparators(t *testing.T) { + t1a := TimeFromUnix(0) + t1b := TimeFromUnix(0) + t2 := TimeFromUnix(2*second - 1) + + if !t1a.Equal(t1b) { + t.Fatalf("Expected %s to be equal to %s", t1a, t1b) + } + if t1a.Equal(t2) { + t.Fatalf("Expected %s to not be equal to %s", t1a, t2) + } + + if !t1a.Before(t2) { + t.Fatalf("Expected %s to be before %s", t1a, t2) + } + if t1a.Before(t1b) { + t.Fatalf("Expected %s to not be before %s", t1a, t1b) + } + + if !t2.After(t1a) { + t.Fatalf("Expected %s to be after %s", t2, t1a) + } + if t1b.After(t1a) { + t.Fatalf("Expected %s to not be after %s", t1b, t1a) + } +} + +func TestTimeConversions(t *testing.T) { + unixSecs := int64(1136239445) + unixNsecs := int64(123456789) + unixNano := unixSecs*1e9 + unixNsecs + + t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) + t2 := time.Unix(unixSecs, unixNsecs) + + ts := TimeFromUnixNano(unixNano) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + // Test available precision. + ts = TimeFromUnixNano(t2.UnixNano()) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + if ts.UnixNano() != unixNano-unixNano%nanosPerTick { + t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) + } +} + +func TestDuration(t *testing.T) { + duration := time.Second + time.Minute + time.Hour + goTime := time.Unix(1136239445, 0) + + ts := TimeFromUnix(goTime.Unix()) + if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { + t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) + } + + earlier := ts.Add(-duration) + delta := ts.Sub(earlier) + if delta != duration { + t.Fatalf("Expected %s to be equal to %s", delta, duration) + } +} + +func TestParseDuration(t *testing.T) { + var cases = []struct { + in string + out time.Duration + }{ + { + in: "324ms", + out: 324 * time.Millisecond, + }, { + in: "3s", + out: 3 * time.Second, + }, { + in: "5m", + out: 5 * time.Minute, + }, { + in: "1h", + out: time.Hour, + }, { + in: "4d", + out: 4 * 24 * time.Hour, + }, { + in: "3w", + out: 3 * 7 * 24 * time.Hour, + }, { + in: "10y", + out: 10 * 365 * 24 * time.Hour, + }, + } + + for _, c := range cases { + d, err := ParseDuration(c.in) + if err != nil { + t.Errorf("Unexpected error on input %q", c.in) + } + if time.Duration(d) != c.out { + t.Errorf("Expected %v but got %v", c.out, d) + } + if d.String() != c.in { + t.Errorf("Expected duration string %q but got %q", c.in, d.String()) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 0000000..c9ed3ff --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go new file mode 100644 index 0000000..b97dcf8 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_test.go @@ -0,0 +1,468 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "math" + "reflect" + "sort" + "testing" +) + +func TestEqualValues(t *testing.T) { + tests := map[string]struct { + in1, in2 SampleValue + want bool + }{ + "equal floats": { + in1: 3.14, + in2: 3.14, + want: true, + }, + "unequal floats": { + in1: 3.14, + in2: 3.1415, + want: false, + }, + "positive inifinities": { + in1: SampleValue(math.Inf(+1)), + in2: SampleValue(math.Inf(+1)), + want: true, + }, + "negative inifinities": { + in1: SampleValue(math.Inf(-1)), + in2: SampleValue(math.Inf(-1)), + want: true, + }, + "different inifinities": { + in1: SampleValue(math.Inf(+1)), + in2: SampleValue(math.Inf(-1)), + want: false, + }, + "number and infinity": { + in1: 42, + in2: SampleValue(math.Inf(+1)), + want: false, + }, + "number and NaN": { + in1: 42, + in2: SampleValue(math.NaN()), + want: false, + }, + "NaNs": { + in1: SampleValue(math.NaN()), + in2: SampleValue(math.NaN()), + want: true, // !!! + }, + } + + for name, test := range tests { + got := test.in1.Equal(test.in2) + if got != test.want { + t.Errorf("Comparing %s, %f and %f: got %t, want %t", name, test.in1, test.in2, got, test.want) + } + } +} + +func TestEqualSamples(t *testing.T) { + testSample := &Sample{} + + tests := map[string]struct { + in1, in2 *Sample + want bool + }{ + "equal pointers": { + in1: testSample, + in2: testSample, + want: true, + }, + "different metrics": { + in1: &Sample{Metric: Metric{"foo": "bar"}}, + in2: &Sample{Metric: Metric{"foo": "biz"}}, + want: false, + }, + "different timestamp": { + in1: &Sample{Timestamp: 0}, + in2: &Sample{Timestamp: 1}, + want: false, + }, + "different value": { + in1: &Sample{Value: 0}, + in2: &Sample{Value: 1}, + want: false, + }, + "equal samples": { + in1: &Sample{ + Metric: Metric{"foo": "bar"}, + Timestamp: 0, + Value: 1, + }, + in2: &Sample{ + Metric: Metric{"foo": "bar"}, + Timestamp: 0, + Value: 1, + }, + want: true, + }, + } + + for name, test := range tests { + got := test.in1.Equal(test.in2) + if got != test.want { + t.Errorf("Comparing %s, %v and %v: got %t, want %t", name, test.in1, test.in2, got, test.want) + } + } + +} + +func TestSamplePairJSON(t *testing.T) { + input := []struct { + plain string + value SamplePair + }{ + { + plain: `[1234.567,"123.1"]`, + value: SamplePair{ + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sp SamplePair + err = json.Unmarshal(b, &sp) + if err != nil { + t.Error(err) + continue + } + + if sp != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sp) + } + } +} + +func TestSampleJSON(t *testing.T) { + input := []struct { + plain string + value Sample + }{ + { + plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`, + value: Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Sample + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(sv, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorJSON(t *testing.T) { + input := []struct { + plain string + value Vector + }{ + { + plain: `[]`, + value: Vector{}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`, + value: Vector{&Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`, + value: Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + &Sample{ + Metric: Metric{ + "foo": "bar", + }, + Value: SampleValue(math.Inf(1)), + Timestamp: 1234, + }, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var vec Vector + err = json.Unmarshal(b, &vec) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(vec, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, vec) + } + } +} + +func TestScalarJSON(t *testing.T) { + input := []struct { + plain string + value Scalar + }{ + { + plain: `[123.456,"456"]`, + value: Scalar{ + Timestamp: 123456, + Value: 456, + }, + }, + { + plain: `[123123.456,"+Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(1)), + }, + }, + { + plain: `[123123.456,"-Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(-1)), + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Scalar + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestStringJSON(t *testing.T) { + input := []struct { + plain string + value String + }{ + { + plain: `[123.456,"test"]`, + value: String{ + Timestamp: 123456, + Value: "test", + }, + }, + { + plain: `[123123.456,"台北"]`, + value: String{ + Timestamp: 123123456, + Value: "台北", + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv String + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorSort(t *testing.T) { + input := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + } + + expected := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + } + + sort.Sort(input) + + for i, actual := range input { + actualFp := actual.Metric.Fingerprint() + expectedFp := expected[i].Metric.Fingerprint() + + if actualFp != expectedFp { + t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) + } + + if actual.Timestamp != expected[i].Timestamp { + t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) + } + } +} diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go new file mode 100644 index 0000000..bb46881 --- /dev/null +++ b/vendor/github.com/prometheus/common/route/route.go @@ -0,0 +1,100 @@ +package route + +import ( + "net/http" + + "github.com/julienschmidt/httprouter" + "golang.org/x/net/context" +) + +type param string + +// Param returns param p for the context. +func Param(ctx context.Context, p string) string { + return ctx.Value(param(p)).(string) +} + +// WithParam returns a new context with param p set to v. +func WithParam(ctx context.Context, p, v string) context.Context { + return context.WithValue(ctx, param(p), v) +} + +// Router wraps httprouter.Router and adds support for prefixed sub-routers +// and per-request context injections. +type Router struct { + rtr *httprouter.Router + prefix string +} + +// New returns a new Router. +func New() *Router { + return &Router{ + rtr: httprouter.New(), + } +} + +// WithPrefix returns a router that prefixes all registered routes with prefix. +func (r *Router) WithPrefix(prefix string) *Router { + return &Router{rtr: r.rtr, prefix: r.prefix + prefix} +} + +// handle turns a HandlerFunc into an httprouter.Handle. +func (r *Router) handle(h http.HandlerFunc) httprouter.Handle { + return func(w http.ResponseWriter, req *http.Request, params httprouter.Params) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + + for _, p := range params { + ctx = context.WithValue(ctx, param(p.Key), p.Value) + } + h(w, req.WithContext(ctx)) + } +} + +// Get registers a new GET route. +func (r *Router) Get(path string, h http.HandlerFunc) { + r.rtr.GET(r.prefix+path, r.handle(h)) +} + +// Options registers a new OPTIONS route. +func (r *Router) Options(path string, h http.HandlerFunc) { + r.rtr.OPTIONS(r.prefix+path, r.handle(h)) +} + +// Del registers a new DELETE route. +func (r *Router) Del(path string, h http.HandlerFunc) { + r.rtr.DELETE(r.prefix+path, r.handle(h)) +} + +// Put registers a new PUT route. +func (r *Router) Put(path string, h http.HandlerFunc) { + r.rtr.PUT(r.prefix+path, r.handle(h)) +} + +// Post registers a new POST route. +func (r *Router) Post(path string, h http.HandlerFunc) { + r.rtr.POST(r.prefix+path, r.handle(h)) +} + +// Redirect takes an absolute path and sends an internal HTTP redirect for it, +// prefixed by the router's path prefix. Note that this method does not include +// functionality for handling relative paths or full URL redirects. +func (r *Router) Redirect(w http.ResponseWriter, req *http.Request, path string, code int) { + http.Redirect(w, req, r.prefix+path, code) +} + +// ServeHTTP implements http.Handler. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + r.rtr.ServeHTTP(w, req) +} + +// FileServe returns a new http.HandlerFunc that serves files from dir. +// Using routes must provide the *filepath parameter. +func FileServe(dir string) http.HandlerFunc { + fs := http.FileServer(http.Dir(dir)) + + return func(w http.ResponseWriter, r *http.Request) { + r.URL.Path = Param(r.Context(), "filepath") + fs.ServeHTTP(w, r) + } +} diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go new file mode 100644 index 0000000..a9bb209 --- /dev/null +++ b/vendor/github.com/prometheus/common/route/route_test.go @@ -0,0 +1,44 @@ +package route + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestRedirect(t *testing.T) { + router := New().WithPrefix("/test/prefix") + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil) + if err != nil { + t.Fatalf("Error building test request: %s", err) + } + + router.Redirect(w, r, "/some/endpoint", http.StatusFound) + if w.Code != http.StatusFound { + t.Fatalf("Unexpected redirect status code: got %d, want %d", w.Code, http.StatusFound) + } + + want := "/test/prefix/some/endpoint" + got := w.Header()["Location"][0] + if want != got { + t.Fatalf("Unexpected redirect location: got %s, want %s", got, want) + } +} + +func TestContext(t *testing.T) { + router := New() + router.Get("/test/:foo/", func(w http.ResponseWriter, r *http.Request) { + want := "bar" + got := Param(r.Context(), "foo") + if want != got { + t.Fatalf("Unexpected context value: want %q, got %q", want, got) + } + }) + + r, err := http.NewRequest("GET", "http://localhost:9090/test/bar/", nil) + if err != nil { + t.Fatalf("Error building test request: %s", err) + } + router.ServeHTTP(nil, r) +} diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go new file mode 100644 index 0000000..84489a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/version/info.go @@ -0,0 +1,89 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "bytes" + "fmt" + "runtime" + "strings" + "text/template" + + "github.com/prometheus/client_golang/prometheus" +) + +// Build information. Populated at build-time. +var ( + Version string + Revision string + Branch string + BuildUser string + BuildDate string + GoVersion = runtime.Version() +) + +// NewCollector returns a collector which exports metrics about current version information. +func NewCollector(program string) *prometheus.GaugeVec { + buildInfo := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: program, + Name: "build_info", + Help: fmt.Sprintf( + "A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.", + program, + ), + }, + []string{"version", "revision", "branch", "goversion"}, + ) + buildInfo.WithLabelValues(Version, Revision, Branch, GoVersion).Set(1) + return buildInfo +} + +// versionInfoTmpl contains the template used by Info. +var versionInfoTmpl = ` +{{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}}) + build user: {{.buildUser}} + build date: {{.buildDate}} + go version: {{.goVersion}} +` + +// Print returns version information. +func Print(program string) string { + m := map[string]string{ + "program": program, + "version": Version, + "revision": Revision, + "branch": Branch, + "buildUser": BuildUser, + "buildDate": BuildDate, + "goVersion": GoVersion, + } + t := template.Must(template.New("version").Parse(versionInfoTmpl)) + + var buf bytes.Buffer + if err := t.ExecuteTemplate(&buf, "version", m); err != nil { + panic(err) + } + return strings.TrimSpace(buf.String()) +} + +// Info returns version, branch and revision information. +func Info() string { + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision) +} + +// BuildContext returns goVersion, buildUser and buildDate information. +func BuildContext() string { + return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate) +}