diff --git a/.circleci/config.yml b/.circleci/config.yml index 8210552866..d2d1da6b10 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -11,7 +11,6 @@ executors: jobs: test: executor: golang - resource_class: large steps: - checkout diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cdd525189..63f299cea2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,5 @@ -## 2.6.0-rc.0 / 2018-12-05 +## 2.6.0 / 2018-12-17 -* [CHANGE] Include default flags to the container's entrypoint. #4796 * [CHANGE] Promtool: Remove the `update` command. #3839 * [FEATURE] Add JSON log format via the `--log.format` flag. #4876 * [FEATURE] API: Add /api/v1/labels endpoint to get all label names. #4835 @@ -8,8 +7,8 @@ * [ENHANCEMENT] Add `prometheus_tsdb_lowest_timestamp_seconds`, `prometheus_tsdb_head_min_time_seconds` and `prometheus_tsdb_head_max_time_seconds` metrics. #4888 * [ENHANCEMENT] Add `rule_group_last_evaluation_timestamp_seconds` metric. #4852 * [ENHANCEMENT] Add `prometheus_template_text_expansion_failures_total` and `prometheus_template_text_expansions_total` metrics. #4747 +* [ENHANCEMENT] Remove default flags from the container's entrypoint. #4976 * [ENHANCEMENT] Set consistent User-Agent header in outgoing requests. #4891 -* [ENHANCEMENT] Azure SD: Add the machine's power state to the discovery metadata. #4908 * [ENHANCEMENT] Azure SD: Error out at load time when authentication parameters are missing. #4907 * [ENHANCEMENT] EC2 SD: Add the machine's private DNS name to the discovery metadata. #4693 * [ENHANCEMENT] EC2 SD: Add the operating system's platform to the discovery metadata. #4663 @@ -42,6 +41,7 @@ * [BUGFIX] Scrape: Scrape targets at fixed intervals even after Prometheus restarts. #4926 * [BUGFIX] TSDB: Support restored snapshots including the head properly. #4953 * [BUGFIX] TSDB: Repair WAL when the last record in a segment is torn. #4953 +* [BUGFIX] TSDB: Fix unclosed file readers on Windows systems. #4997 * [BUGFIX] Web: Avoid proxy to connect to the local gRPC server. #4572 ## 2.5.0 / 2018-11-06 diff --git a/Dockerfile b/Dockerfile index b04e660932..c3ff6454e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,16 +7,13 @@ COPY documentation/examples/prometheus.yml /etc/prometheus/prometheus.yml COPY console_libraries/ /usr/share/prometheus/console_libraries/ COPY consoles/ /usr/share/prometheus/consoles/ -RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ -RUN mkdir -p /prometheus && \ - chown -R nobody:nogroup etc/prometheus /prometheus +RUN ln -s /usr/share/prometheus/console_libraries /usr/share/prometheus/consoles/ /etc/prometheus/ && \ + mkdir -p /prometheus && \ + chown -R nobody:nogroup etc/prometheus /prometheus && \ + ln -s /prometheus /etc/prometheus/data USER nobody EXPOSE 9090 VOLUME [ "/prometheus" ] -WORKDIR /prometheus -ENTRYPOINT [ "/bin/prometheus", \ - "--storage.tsdb.path=/prometheus", \ - "--web.console.libraries=/etc/prometheus/console_libraries", \ - "--web.console.templates=/etc/prometheus/consoles", \ - "--config.file=/etc/prometheus/prometheus.yml" ] +WORKDIR /etc/prometheus +ENTRYPOINT [ "/bin/prometheus" ] diff --git a/Makefile.common b/Makefile.common index ca4e01ddf5..a0a808a3e7 100644 --- a/Makefile.common +++ b/Makefile.common @@ -29,6 +29,8 @@ GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= +GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) +GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) @@ -62,8 +64,12 @@ PROMU := $(FIRST_GOPATH)/bin/promu STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck pkgs = ./... -GO_VERSION ?= $(shell $(GO) version) -GO_BUILD_PLATFORM ?= $(subst /,-,$(lastword $(GO_VERSION))) +ifeq (arm, $(GOHOSTARCH)) + GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) +else + GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) +endif PROMU_VERSION ?= 0.2.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz @@ -73,6 +79,13 @@ BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKER_REPO ?= prom +ifeq ($(GOHOSTARCH),amd64) + ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) + # Only supported on amd64 + test-flags := -race + endif +endif + .PHONY: all all: precheck style staticcheck unused build test @@ -110,7 +123,7 @@ common-test-short: .PHONY: common-test common-test: @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test -race $(GOOPTS) $(pkgs) + GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) .PHONY: common-format common-format: diff --git a/RELEASE.md b/RELEASE.md index 0ff5131a78..690a663a75 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,12 +1,12 @@ # Releases -This page describes the release process and the currently planned schedule for upcoming releases as well as the respective release schepherds. Release shepards are chosen on a voluntary basis. +This page describes the release process and the currently planned schedule for upcoming releases as well as the respective release shepherd. Release shepherd are chosen on a voluntary basis. ## Release schedule Release cadence of first pre-releases being cut is 6 weeks. -| release series | date of first pre-release (year-month-day) | release shepard | +| release series | date of first pre-release (year-month-day) | release shepherd | |----------------|--------------------------------------------|---------------------------------------------| | v2.4 | 2018-09-06 | Goutham Veeramachaneni (GitHub: @gouthamve) | | v2.5 | 2018-10-24 | Frederic Branczyk (GitHub: @brancz) | @@ -15,12 +15,12 @@ Release cadence of first pre-releases being cut is 6 weeks. If you are interested in volunteering please create a pull request against the [prometheus/prometheus](https://github.com/prometheus/prometheus) repository and propose yourself for the release series of your choice. -## Release shepard responsibilities +## Release shepherd responsibilities -The release shepard is responsible for the entire release series of a minor release, meaning all pre- and patch releases of a minor release. The process starts with the initial pre-release. +The release shepherd is responsible for the entire release series of a minor release, meaning all pre- and patch releases of a minor release. The process starts with the initial pre-release. * The first pre-release is scheduled according to the above schedule. -* With the pre-release the release shepard is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release. +* With the pre-release the release shepherd is responsible for running and monitoring a benchmark run of the pre-release for 3 days, after which, if successful, the pre-release is promoted to a stable release. * Once a pre-release has been released, the `master` branch of the repository is frozen for any feature work, only critical bug fix work concerning the minor release is merged. * Pre-releases are done from `master`, after pre-releases are promoted to the stable release a `release-major.minor` branch is created. @@ -36,7 +36,7 @@ We use [Semantic Versioning](http://semver.org/). We maintain a separate branch for each minor release, named `release-.`, e.g. `release-1.1`, `release-2.0`. -The usual flow is to merge new features and changes into the master branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into master from the latest release branch. The master branch should always contain all commits from the latest release branch. Whether merging master back into a release branch makes more sense is left up to the shepard's judgement. +The usual flow is to merge new features and changes into the master branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into master from the latest release branch. The master branch should always contain all commits from the latest release branch. Whether merging master back into a release branch makes more sense is left up to the shepherd's judgement. If a bug fix got accidentally merged into master, cherry-pick commits have to be created in the latest release branch, which then have to be merged back into master. Try to avoid that situation. diff --git a/VERSION b/VERSION index 27a7c935f5..e70b4523ae 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.6.0-rc.0 +2.6.0 diff --git a/cmd/prometheus/main.go b/cmd/prometheus/main.go index a10744db90..30cf95088f 100644 --- a/cmd/prometheus/main.go +++ b/cmd/prometheus/main.go @@ -256,7 +256,7 @@ func main() { ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxRule = context.Background() - notifier = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) + notifierManager = notifier.NewManager(&cfg.notifier, log.With(logger, "component", "notifier")) ctxScrape, cancelScrape = context.WithCancel(context.Background()) discoveryManagerScrape = discovery.NewManager(ctxScrape, log.With(logger, "component", "discovery manager scrape"), discovery.Name("scrape")) @@ -279,7 +279,7 @@ func main() { Appendable: fanoutStorage, TSDB: localStorage, QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage), - NotifyFunc: sendAlerts(notifier, cfg.web.ExternalURL.String()), + NotifyFunc: sendAlerts(notifierManager, cfg.web.ExternalURL.String()), Context: ctxRule, ExternalURL: cfg.web.ExternalURL, Registerer: prometheus.DefaultRegisterer, @@ -296,7 +296,7 @@ func main() { cfg.web.QueryEngine = queryEngine cfg.web.ScrapeManager = scrapeManager cfg.web.RuleManager = ruleManager - cfg.web.Notifier = notifier + cfg.web.Notifier = notifierManager cfg.web.Version = &web.PrometheusVersion{ Version: version.Version, @@ -332,7 +332,7 @@ func main() { webHandler.ApplyConfig, // The Scrape and notifier managers need to reload before the Discovery manager as // they need to read the most updated config when receiving the new targets list. - notifier.ApplyConfig, + notifierManager.ApplyConfig, scrapeManager.ApplyConfig, func(cfg *config.Config) error { c := make(map[string]sd_config.ServiceDiscoveryConfig) @@ -611,12 +611,12 @@ func main() { // so we wait until the config is fully loaded. <-reloadReady.C - notifier.Run(discoveryManagerNotify.SyncCh()) + notifierManager.Run(discoveryManagerNotify.SyncCh()) level.Info(logger).Log("msg", "Notifier manager stopped") return nil }, func(err error) { - notifier.Stop() + notifierManager.Stop() }, ) } @@ -696,8 +696,12 @@ func computeExternalURL(u, listenAddr string) (*url.URL, error) { return eu, nil } +type sender interface { + Send(alerts ...*notifier.Alert) +} + // sendAlerts implements the rules.NotifyFunc for a Notifier. -func sendAlerts(n *notifier.Manager, externalURL string) rules.NotifyFunc { +func sendAlerts(s sender, externalURL string) rules.NotifyFunc { return func(ctx context.Context, expr string, alerts ...*rules.Alert) { var res []*notifier.Alert @@ -717,7 +721,7 @@ func sendAlerts(n *notifier.Manager, externalURL string) rules.NotifyFunc { } if len(alerts) > 0 { - n.Send(res...) + s.Send(res...) } } } diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 605ba816eb..c6697a101f 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -14,6 +14,7 @@ package main import ( + "context" "flag" "fmt" "net/http" @@ -24,6 +25,9 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/notifier" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/util/testutil" ) @@ -173,3 +177,73 @@ func TestFailedStartupExitCode(t *testing.T) { t.Errorf("unable to retrieve the exit status for prometheus: %v", err) } } + +type senderFunc func(alerts ...*notifier.Alert) + +func (s senderFunc) Send(alerts ...*notifier.Alert) { + s(alerts...) +} + +func TestSendAlerts(t *testing.T) { + testCases := []struct { + in []*rules.Alert + exp []*notifier.Alert + }{ + { + in: []*rules.Alert{ + &rules.Alert{ + Labels: []labels.Label{{Name: "l1", Value: "v1"}}, + Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + ActiveAt: time.Unix(1, 0), + FiredAt: time.Unix(2, 0), + ValidUntil: time.Unix(3, 0), + }, + }, + exp: []*notifier.Alert{ + ¬ifier.Alert{ + Labels: []labels.Label{{Name: "l1", Value: "v1"}}, + Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + StartsAt: time.Unix(2, 0), + EndsAt: time.Unix(3, 0), + GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1", + }, + }, + }, + { + in: []*rules.Alert{ + &rules.Alert{ + Labels: []labels.Label{{Name: "l1", Value: "v1"}}, + Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + ActiveAt: time.Unix(1, 0), + FiredAt: time.Unix(2, 0), + ResolvedAt: time.Unix(4, 0), + }, + }, + exp: []*notifier.Alert{ + ¬ifier.Alert{ + Labels: []labels.Label{{Name: "l1", Value: "v1"}}, + Annotations: []labels.Label{{Name: "a2", Value: "v2"}}, + StartsAt: time.Unix(2, 0), + EndsAt: time.Unix(4, 0), + GeneratorURL: "http://localhost:9090/graph?g0.expr=up&g0.tab=1", + }, + }, + }, + { + in: []*rules.Alert{}, + }, + } + + for i, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + senderFunc := senderFunc(func(alerts ...*notifier.Alert) { + if len(tc.in) == 0 { + t.Fatalf("sender called with 0 alert") + } + testutil.Equals(t, tc.exp, alerts) + }) + sendAlerts(senderFunc, "http://localhost:9090")(context.TODO(), "up", tc.in...) + }) + } +} diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index b143d32d6c..5796c5f5ad 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -160,14 +160,14 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou // All this preparation is so that we can test alerts as we evaluate the rules. // This avoids storing them in memory, as the number of evals might be high. - // All the `eval_time` for which we have unit tests. - var alertEvalTimes []time.Duration + // All the `eval_time` for which we have unit tests for alerts. + alertEvalTimesMap := map[time.Duration]struct{}{} // Map of all the eval_time+alertname combination present in the unit tests. alertsInTest := make(map[time.Duration]map[string]struct{}) // Map of all the unit tests for given eval_time. alertTests := make(map[time.Duration][]alertTestCase) for _, alert := range tg.AlertRuleTests { - alertEvalTimes = append(alertEvalTimes, alert.EvalTime) + alertEvalTimesMap[alert.EvalTime] = struct{}{} if _, ok := alertsInTest[alert.EvalTime]; !ok { alertsInTest[alert.EvalTime] = make(map[string]struct{}) @@ -176,6 +176,10 @@ func (tg *testGroup) test(mint, maxt time.Time, evalInterval time.Duration, grou alertTests[alert.EvalTime] = append(alertTests[alert.EvalTime], alert) } + alertEvalTimes := make([]time.Duration, 0, len(alertEvalTimesMap)) + for k := range alertEvalTimesMap { + alertEvalTimes = append(alertEvalTimes, k) + } sort.Slice(alertEvalTimes, func(i, j int) bool { return alertEvalTimes[i] < alertEvalTimes[j] }) diff --git a/config/config.go b/config/config.go index 20950a0436..d12c8d651d 100644 --- a/config/config.go +++ b/config/config.go @@ -22,6 +22,8 @@ import ( "strings" "time" + "github.com/prometheus/prometheus/pkg/relabel" + config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" sd_config "github.com/prometheus/prometheus/discovery/config" @@ -29,8 +31,7 @@ import ( ) var ( - patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`) - relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + patRulePath = regexp.MustCompile(`^[^*]*(\*[^/]*)?$`) ) // Load parses the YAML input s into a Config. @@ -92,14 +93,6 @@ var ( Timeout: model.Duration(10 * time.Second), } - // DefaultRelabelConfig is the default Relabel configuration. - DefaultRelabelConfig = RelabelConfig{ - Action: RelabelReplace, - Separator: ";", - Regex: MustNewRegexp("(.*)"), - Replacement: "$1", - } - // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), @@ -350,9 +343,9 @@ type ScrapeConfig struct { HTTPClientConfig config_util.HTTPClientConfig `yaml:",inline"` // List of target relabel configurations. - RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` + RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` // List of metric relabel configurations. - MetricRelabelConfigs []*RelabelConfig `yaml:"metric_relabel_configs,omitempty"` + MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -414,7 +407,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // AlertingConfig configures alerting and alertmanager related configs. type AlertingConfig struct { - AlertRelabelConfigs []*RelabelConfig `yaml:"alert_relabel_configs,omitempty"` + AlertRelabelConfigs []*relabel.Config `yaml:"alert_relabel_configs,omitempty"` AlertmanagerConfigs []*AlertmanagerConfig `yaml:"alertmanagers,omitempty"` } @@ -452,7 +445,7 @@ type AlertmanagerConfig struct { Timeout model.Duration `yaml:"timeout,omitempty"` // List of Alertmanager relabel configurations. - RelabelConfigs []*RelabelConfig `yaml:"relabel_configs,omitempty"` + RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -524,151 +517,11 @@ type FileSDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` } -// RelabelAction is the action to be performed on relabeling. -type RelabelAction string - -const ( - // RelabelReplace performs a regex replacement. - RelabelReplace RelabelAction = "replace" - // RelabelKeep drops targets for which the input does not match the regex. - RelabelKeep RelabelAction = "keep" - // RelabelDrop drops targets for which the input does match the regex. - RelabelDrop RelabelAction = "drop" - // RelabelHashMod sets a label to the modulus of a hash of labels. - RelabelHashMod RelabelAction = "hashmod" - // RelabelLabelMap copies labels to other labelnames based on a regex. - RelabelLabelMap RelabelAction = "labelmap" - // RelabelLabelDrop drops any label matching the regex. - RelabelLabelDrop RelabelAction = "labeldrop" - // RelabelLabelKeep drops any label not matching the regex. - RelabelLabelKeep RelabelAction = "labelkeep" -) - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (a *RelabelAction) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - switch act := RelabelAction(strings.ToLower(s)); act { - case RelabelReplace, RelabelKeep, RelabelDrop, RelabelHashMod, RelabelLabelMap, RelabelLabelDrop, RelabelLabelKeep: - *a = act - return nil - } - return fmt.Errorf("unknown relabel action %q", s) -} - -// RelabelConfig is the configuration for relabeling of target label sets. -type RelabelConfig struct { - // A list of labels from which values are taken and concatenated - // with the configured separator in order. - SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` - // Separator is the string between concatenated values from the source labels. - Separator string `yaml:"separator,omitempty"` - // Regex against which the concatenation is matched. - Regex Regexp `yaml:"regex,omitempty"` - // Modulus to take of the hash of concatenated values from the source labels. - Modulus uint64 `yaml:"modulus,omitempty"` - // TargetLabel is the label to which the resulting string is written in a replacement. - // Regexp interpolation is allowed for the replace action. - TargetLabel string `yaml:"target_label,omitempty"` - // Replacement is the regex replacement pattern to be used. - Replacement string `yaml:"replacement,omitempty"` - // Action is the action to be performed for the relabeling. - Action RelabelAction `yaml:"action,omitempty"` -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (c *RelabelConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { - *c = DefaultRelabelConfig - type plain RelabelConfig - if err := unmarshal((*plain)(c)); err != nil { - return err - } - if c.Regex.Regexp == nil { - c.Regex = MustNewRegexp("") - } - if c.Modulus == 0 && c.Action == RelabelHashMod { - return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") - } - if (c.Action == RelabelReplace || c.Action == RelabelHashMod) && c.TargetLabel == "" { - return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) - } - if c.Action == RelabelReplace && !relabelTarget.MatchString(c.TargetLabel) { - return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) - } - if c.Action == RelabelLabelMap && !relabelTarget.MatchString(c.Replacement) { - return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) - } - if c.Action == RelabelHashMod && !model.LabelName(c.TargetLabel).IsValid() { - return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) - } - - if c.Action == RelabelLabelDrop || c.Action == RelabelLabelKeep { - if c.SourceLabels != nil || - c.TargetLabel != DefaultRelabelConfig.TargetLabel || - c.Modulus != DefaultRelabelConfig.Modulus || - c.Separator != DefaultRelabelConfig.Separator || - c.Replacement != DefaultRelabelConfig.Replacement { - return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action) - } - } - - return nil -} - -// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable. -type Regexp struct { - *regexp.Regexp - original string -} - -// NewRegexp creates a new anchored Regexp and returns an error if the -// passed-in regular expression does not compile. -func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") - return Regexp{ - Regexp: regex, - original: s, - }, err -} - -// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile. -func MustNewRegexp(s string) Regexp { - re, err := NewRegexp(s) - if err != nil { - panic(err) - } - return re -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - r, err := NewRegexp(s) - if err != nil { - return err - } - *re = r - return nil -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (re Regexp) MarshalYAML() (interface{}, error) { - if re.original != "" { - return re.original, nil - } - return nil, nil -} - // RemoteWriteConfig is the configuration for writing to remote storage. type RemoteWriteConfig struct { - URL *config_util.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - WriteRelabelConfigs []*RelabelConfig `yaml:"write_relabel_configs,omitempty"` + URL *config_util.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + WriteRelabelConfigs []*relabel.Config `yaml:"write_relabel_configs,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. diff --git a/config/config_test.go b/config/config_test.go index e04fd56d25..650802d931 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/prometheus/discovery/azure" "github.com/prometheus/prometheus/discovery/consul" "github.com/prometheus/prometheus/discovery/dns" @@ -71,13 +73,13 @@ var expectedConf = &Config{ { URL: mustParseURL("http://remote1/push"), RemoteTimeout: model.Duration(30 * time.Second), - WriteRelabelConfigs: []*RelabelConfig{ + WriteRelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"__name__"}, Separator: ";", - Regex: MustNewRegexp("expensive.*"), + Regex: relabel.MustNewRegexp("expensive.*"), Replacement: "$1", - Action: RelabelDrop, + Action: relabel.Drop, }, }, QueueConfig: DefaultQueueConfig, @@ -145,33 +147,33 @@ var expectedConf = &Config{ }, }, - RelabelConfigs: []*RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"job", "__meta_dns_name"}, TargetLabel: "job", Separator: ";", - Regex: MustNewRegexp("(.*)some-[regex]"), + Regex: relabel.MustNewRegexp("(.*)some-[regex]"), Replacement: "foo-${1}", - Action: RelabelReplace, + Action: relabel.Replace, }, { SourceLabels: model.LabelNames{"abc"}, TargetLabel: "cde", Separator: ";", - Regex: DefaultRelabelConfig.Regex, - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelReplace, + Regex: relabel.DefaultRelabelConfig.Regex, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.Replace, }, { TargetLabel: "abc", Separator: ";", - Regex: DefaultRelabelConfig.Regex, + Regex: relabel.DefaultRelabelConfig.Regex, Replacement: "static", - Action: RelabelReplace, + Action: relabel.Replace, }, { TargetLabel: "abc", Separator: ";", - Regex: MustNewRegexp(""), + Regex: relabel.MustNewRegexp(""), Replacement: "static", - Action: RelabelReplace, + Action: relabel.Replace, }, }, }, @@ -212,56 +214,56 @@ var expectedConf = &Config{ }, }, - RelabelConfigs: []*RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"job"}, - Regex: MustNewRegexp("(.*)some-[regex]"), + Regex: relabel.MustNewRegexp("(.*)some-[regex]"), Separator: ";", - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelDrop, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.Drop, }, { SourceLabels: model.LabelNames{"__address__"}, TargetLabel: "__tmp_hash", - Regex: DefaultRelabelConfig.Regex, - Replacement: DefaultRelabelConfig.Replacement, + Regex: relabel.DefaultRelabelConfig.Regex, + Replacement: relabel.DefaultRelabelConfig.Replacement, Modulus: 8, Separator: ";", - Action: RelabelHashMod, + Action: relabel.HashMod, }, { SourceLabels: model.LabelNames{"__tmp_hash"}, - Regex: MustNewRegexp("1"), + Regex: relabel.MustNewRegexp("1"), Separator: ";", - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelKeep, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.Keep, }, { - Regex: MustNewRegexp("1"), + Regex: relabel.MustNewRegexp("1"), Separator: ";", - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelLabelMap, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.LabelMap, }, { - Regex: MustNewRegexp("d"), + Regex: relabel.MustNewRegexp("d"), Separator: ";", - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelLabelDrop, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.LabelDrop, }, { - Regex: MustNewRegexp("k"), + Regex: relabel.MustNewRegexp("k"), Separator: ";", - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelLabelKeep, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.LabelKeep, }, }, - MetricRelabelConfigs: []*RelabelConfig{ + MetricRelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"__name__"}, - Regex: MustNewRegexp("expensive_metric.*"), + Regex: relabel.MustNewRegexp("expensive_metric.*"), Separator: ";", - Replacement: DefaultRelabelConfig.Replacement, - Action: RelabelDrop, + Replacement: relabel.DefaultRelabelConfig.Replacement, + Action: relabel.Drop, }, }, }, @@ -296,14 +298,14 @@ var expectedConf = &Config{ }, }, - RelabelConfigs: []*RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"__meta_sd_consul_tags"}, - Regex: MustNewRegexp("label:([^=]+)=([^,]+)"), + Regex: relabel.MustNewRegexp("label:([^=]+)=([^,]+)"), Separator: ",", TargetLabel: "${1}", Replacement: "${2}", - Action: RelabelReplace, + Action: relabel.Replace, }, }, }, @@ -442,13 +444,14 @@ var expectedConf = &Config{ ServiceDiscoveryConfig: sd_config.ServiceDiscoveryConfig{ AzureSDConfigs: []*azure.SDConfig{ { - Environment: "AzurePublicCloud", - SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11", - TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2", - ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C", - ClientSecret: "mysecret", - RefreshInterval: model.Duration(5 * time.Minute), - Port: 9100, + Environment: "AzurePublicCloud", + SubscriptionID: "11AAAA11-A11A-111A-A111-1111A1111A11", + TenantID: "BBBB222B-B2B2-2B22-B222-2BB2222BB2B2", + ClientID: "333333CC-3C33-3333-CCC3-33C3CCCCC33C", + ClientSecret: "mysecret", + AuthenticationMethod: "OAuth", + RefreshInterval: model.Duration(5 * time.Minute), + Port: 9100, }, }, }, @@ -767,6 +770,10 @@ var expectedErrors = []struct { filename: "azure_tenant_id_missing.bad.yml", errMsg: "Azure SD configuration requires a tenant_id", }, + { + filename: "azure_authentication_method.bad.yml", + errMsg: "Unknown authentication_type \"invalid\". Supported types are \"OAuth\" or \"ManagedIdentity\"", + }, { filename: "empty_scrape_config.bad.yml", errMsg: "empty or null scrape config section", @@ -845,33 +852,6 @@ func TestEmptyGlobalBlock(t *testing.T) { testutil.Equals(t, exp, *c) } -func TestTargetLabelValidity(t *testing.T) { - tests := []struct { - str string - valid bool - }{ - {"-label", false}, - {"label", true}, - {"label${1}", true}, - {"${1}label", true}, - {"${1}", true}, - {"${1}label", true}, - {"${", false}, - {"$", false}, - {"${}", false}, - {"foo${", false}, - {"$1", true}, - {"asd$2asd", true}, - {"-foo${1}bar-", false}, - {"_${1}_", true}, - {"foo${bar}foo", true}, - } - for _, test := range tests { - testutil.Assert(t, relabelTarget.Match([]byte(test.str)) == test.valid, - "Expected %q to be %v", test.str, test.valid) - } -} - func kubernetesSDHostURL() config_util.URL { tURL, _ := url.Parse("https://localhost:1234") return config_util.URL{URL: tURL} diff --git a/config/testdata/azure_authentication_method.bad.yml b/config/testdata/azure_authentication_method.bad.yml new file mode 100644 index 0000000000..b05fc474a6 --- /dev/null +++ b/config/testdata/azure_authentication_method.bad.yml @@ -0,0 +1,4 @@ +scrape_configs: +- azure_sd_configs: + - authentication_method: invalid + subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 diff --git a/config/testdata/conf.good.yml b/config/testdata/conf.good.yml index 5abadc212f..2db750d10b 100644 --- a/config/testdata/conf.good.yml +++ b/config/testdata/conf.good.yml @@ -196,6 +196,7 @@ scrape_configs: - job_name: service-azure azure_sd_configs: - environment: AzurePublicCloud + authentication_method: OAuth subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11 tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2 client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C diff --git a/discovery/azure/azure.go b/discovery/azure/azure.go index 58a7ca8e00..4164325d62 100644 --- a/discovery/azure/azure.go +++ b/discovery/azure/azure.go @@ -26,13 +26,11 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" - "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/util/strutil" ) @@ -47,7 +45,9 @@ const ( azureLabelMachinePrivateIP = azureLabel + "machine_private_ip" azureLabelMachineTag = azureLabel + "machine_tag_" azureLabelMachineScaleSet = azureLabel + "machine_scale_set" - azureLabelPowerState = azureLabel + "machine_power_state" + + authMethodOAuth = "OAuth" + authMethodManagedIdentity = "ManagedIdentity" ) var ( @@ -64,21 +64,23 @@ var ( // DefaultSDConfig is the default Azure SD configuration. DefaultSDConfig = SDConfig{ - Port: 80, - RefreshInterval: model.Duration(5 * time.Minute), - Environment: azure.PublicCloud.Name, + Port: 80, + RefreshInterval: model.Duration(5 * time.Minute), + Environment: azure.PublicCloud.Name, + AuthenticationMethod: authMethodOAuth, } ) // SDConfig is the configuration for Azure based service discovery. type SDConfig struct { - Environment string `yaml:"environment,omitempty"` - Port int `yaml:"port"` - SubscriptionID string `yaml:"subscription_id"` - TenantID string `yaml:"tenant_id,omitempty"` - ClientID string `yaml:"client_id,omitempty"` - ClientSecret config_util.Secret `yaml:"client_secret,omitempty"` - RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + Environment string `yaml:"environment,omitempty"` + Port int `yaml:"port"` + SubscriptionID string `yaml:"subscription_id"` + TenantID string `yaml:"tenant_id,omitempty"` + ClientID string `yaml:"client_id,omitempty"` + ClientSecret config_util.Secret `yaml:"client_secret,omitempty"` + RefreshInterval model.Duration `yaml:"refresh_interval,omitempty"` + AuthenticationMethod string `yaml:"authentication_method,omitempty"` } func validateAuthParam(param, name string) error { @@ -96,18 +98,27 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if err != nil { return err } + if err = validateAuthParam(c.SubscriptionID, "subscription_id"); err != nil { return err } - if err = validateAuthParam(c.TenantID, "tenant_id"); err != nil { - return err + + if c.AuthenticationMethod == authMethodOAuth { + if err = validateAuthParam(c.TenantID, "tenant_id"); err != nil { + return err + } + if err = validateAuthParam(c.ClientID, "client_id"); err != nil { + return err + } + if err = validateAuthParam(string(c.ClientSecret), "client_secret"); err != nil { + return err + } } - if err = validateAuthParam(c.ClientID, "client_id"); err != nil { - return err - } - if err = validateAuthParam(string(c.ClientSecret), "client_secret"); err != nil { - return err + + if c.AuthenticationMethod != authMethodOAuth && c.AuthenticationMethod != authMethodManagedIdentity { + return fmt.Errorf("Unknown authentication_type %q. Supported types are %q or %q", c.AuthenticationMethod, authMethodOAuth, authMethodManagedIdentity) } + return nil } @@ -187,13 +198,30 @@ func createAzureClient(cfg SDConfig) (azureClient, error) { resourceManagerEndpoint := env.ResourceManagerEndpoint var c azureClient - oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, cfg.TenantID) - if err != nil { - return azureClient{}, err - } - spt, err := adal.NewServicePrincipalToken(*oauthConfig, cfg.ClientID, string(cfg.ClientSecret), resourceManagerEndpoint) - if err != nil { - return azureClient{}, err + + var spt *adal.ServicePrincipalToken + + switch cfg.AuthenticationMethod { + case authMethodManagedIdentity: + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return azureClient{}, err + } + + spt, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, resourceManagerEndpoint) + if err != nil { + return azureClient{}, err + } + case authMethodOAuth: + oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, cfg.TenantID) + if err != nil { + return azureClient{}, err + } + + spt, err = adal.NewServicePrincipalToken(*oauthConfig, cfg.ClientID, string(cfg.ClientSecret), resourceManagerEndpoint) + if err != nil { + return azureClient{}, err + } } bearerAuthorizer := autorest.NewBearerAuthorizer(spt) @@ -229,7 +257,6 @@ type virtualMachine struct { ScaleSet string Tags map[string]*string NetworkProfile compute.NetworkProfile - PowerStateCode string } // Create a new azureResource object from an ID string. @@ -304,21 +331,12 @@ func (d *Discovery) refresh() (tg *targetgroup.Group, err error) { return } - // We check if the virtual machine has been deallocated. - // If so, we skip them in service discovery. - if strings.EqualFold(vm.PowerStateCode, "PowerState/deallocated") { - level.Debug(d.logger).Log("msg", "Skipping virtual machine", "machine", vm.Name, "power_state", vm.PowerStateCode) - ch <- target{} - return - } - labels := model.LabelSet{ azureLabelMachineID: model.LabelValue(vm.ID), azureLabelMachineName: model.LabelValue(vm.Name), azureLabelMachineOSType: model.LabelValue(vm.OsType), azureLabelMachineLocation: model.LabelValue(vm.Location), azureLabelMachineResourceGroup: model.LabelValue(r.ResourceGroup), - azureLabelPowerState: model.LabelValue(vm.PowerStateCode), } if vm.ScaleSet != "" { @@ -346,6 +364,16 @@ func (d *Discovery) refresh() (tg *targetgroup.Group, err error) { continue } + // Unfortunately Azure does not return information on whether a VM is deallocated. + // This information is available via another API call however the Go SDK does not + // yet support this. On deallocated machines, this value happens to be nil so it + // is a cheap and easy way to determine if a machine is allocated or not. + if networkInterface.Properties.Primary == nil { + level.Debug(d.logger).Log("msg", "Skipping deallocated virtual machine", "machine", vm.Name) + ch <- target{} + return + } + if *networkInterface.Properties.Primary { for _, ip := range *networkInterface.Properties.IPConfigurations { if ip.Properties.PrivateIPAddress != nil { @@ -473,7 +501,6 @@ func mapFromVM(vm compute.VirtualMachine) virtualMachine { ScaleSet: "", Tags: tags, NetworkProfile: *(vm.Properties.NetworkProfile), - PowerStateCode: getPowerStateFromVMInstanceView(vm.Properties.InstanceView), } } @@ -494,7 +521,6 @@ func mapFromVMScaleSetVM(vm compute.VirtualMachineScaleSetVM, scaleSetName strin ScaleSet: scaleSetName, Tags: tags, NetworkProfile: *(vm.Properties.NetworkProfile), - PowerStateCode: getPowerStateFromVMInstanceView(vm.Properties.InstanceView), } } @@ -527,16 +553,3 @@ func (client *azureClient) getNetworkInterfaceByID(networkInterfaceID string) (n return result, nil } - -func getPowerStateFromVMInstanceView(instanceView *compute.VirtualMachineInstanceView) (powerState string) { - if instanceView.Statuses == nil { - return - } - for _, ivs := range *instanceView.Statuses { - code := *(ivs.Code) - if strings.HasPrefix(code, "PowerState") { - powerState = code - } - } - return -} diff --git a/discovery/azure/azure_test.go b/discovery/azure/azure_test.go index 7dab352430..daed79ea6f 100644 --- a/discovery/azure/azure_test.go +++ b/discovery/azure/azure_test.go @@ -26,10 +26,6 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { vmType := "type" location := "westeurope" networkProfile := compute.NetworkProfile{} - provisioningStatusCode := "ProvisioningState/succeeded" - provisionDisplayStatus := "Provisioning succeeded" - powerStatusCode := "PowerState/running" - powerDisplayStatus := "VM running" properties := &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{ OsDisk: &compute.OSDisk{ @@ -37,20 +33,6 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, - InstanceView: &compute.VirtualMachineInstanceView{ - Statuses: &[]compute.InstanceViewStatus{ - { - Code: &provisioningStatusCode, - Level: "Info", - DisplayStatus: &provisionDisplayStatus, - }, - { - Code: &powerStatusCode, - Level: "Info", - DisplayStatus: &powerDisplayStatus, - }, - }, - }, } testVM := compute.VirtualMachine{ @@ -70,7 +52,6 @@ func TestMapFromVMWithEmptyTags(t *testing.T) { OsType: "Linux", Tags: map[string]*string{}, NetworkProfile: networkProfile, - PowerStateCode: "PowerState/running", } actualVM := mapFromVM(testVM) @@ -88,10 +69,6 @@ func TestMapFromVMWithTags(t *testing.T) { tags := map[string]*string{ "prometheus": new(string), } - provisioningStatusCode := "ProvisioningState/succeeded" - provisionDisplayStatus := "Provisioning succeeded" - powerStatusCode := "PowerState/running" - powerDisplayStatus := "VM running" networkProfile := compute.NetworkProfile{} properties := &compute.VirtualMachineProperties{ StorageProfile: &compute.StorageProfile{ @@ -100,20 +77,6 @@ func TestMapFromVMWithTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, - InstanceView: &compute.VirtualMachineInstanceView{ - Statuses: &[]compute.InstanceViewStatus{ - { - Code: &provisioningStatusCode, - Level: "Info", - DisplayStatus: &provisionDisplayStatus, - }, - { - Code: &powerStatusCode, - Level: "Info", - DisplayStatus: &powerDisplayStatus, - }, - }, - }, } testVM := compute.VirtualMachine{ @@ -133,7 +96,6 @@ func TestMapFromVMWithTags(t *testing.T) { OsType: "Linux", Tags: tags, NetworkProfile: networkProfile, - PowerStateCode: "PowerState/running", } actualVM := mapFromVM(testVM) @@ -149,10 +111,6 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { vmType := "type" location := "westeurope" networkProfile := compute.NetworkProfile{} - provisioningStatusCode := "ProvisioningState/succeeded" - provisionDisplayStatus := "Provisioning succeeded" - powerStatusCode := "PowerState/running" - powerDisplayStatus := "VM running" properties := &compute.VirtualMachineScaleSetVMProperties{ StorageProfile: &compute.StorageProfile{ OsDisk: &compute.OSDisk{ @@ -160,20 +118,6 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, - InstanceView: &compute.VirtualMachineInstanceView{ - Statuses: &[]compute.InstanceViewStatus{ - { - Code: &provisioningStatusCode, - Level: "Info", - DisplayStatus: &provisionDisplayStatus, - }, - { - Code: &powerStatusCode, - Level: "Info", - DisplayStatus: &powerDisplayStatus, - }, - }, - }, } testVM := compute.VirtualMachineScaleSetVM{ @@ -195,7 +139,6 @@ func TestMapFromVMScaleSetVMWithEmptyTags(t *testing.T) { Tags: map[string]*string{}, NetworkProfile: networkProfile, ScaleSet: scaleSet, - PowerStateCode: "PowerState/running", } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) @@ -214,10 +157,6 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { "prometheus": new(string), } networkProfile := compute.NetworkProfile{} - provisioningStatusCode := "ProvisioningState/succeeded" - provisionDisplayStatus := "Provisioning succeeded" - powerStatusCode := "PowerState/running" - powerDisplayStatus := "VM running" properties := &compute.VirtualMachineScaleSetVMProperties{ StorageProfile: &compute.StorageProfile{ OsDisk: &compute.OSDisk{ @@ -225,20 +164,6 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { }, }, NetworkProfile: &networkProfile, - InstanceView: &compute.VirtualMachineInstanceView{ - Statuses: &[]compute.InstanceViewStatus{ - { - Code: &provisioningStatusCode, - Level: "Info", - DisplayStatus: &provisionDisplayStatus, - }, - { - Code: &powerStatusCode, - Level: "Info", - DisplayStatus: &powerDisplayStatus, - }, - }, - }, } testVM := compute.VirtualMachineScaleSetVM{ @@ -260,7 +185,6 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { Tags: tags, NetworkProfile: networkProfile, ScaleSet: scaleSet, - PowerStateCode: "PowerState/running", } actualVM := mapFromVMScaleSetVM(testVM, scaleSet) @@ -269,52 +193,3 @@ func TestMapFromVMScaleSetVMWithTags(t *testing.T) { t.Errorf("Expected %v got %v", expectedVM, actualVM) } } - -func TestGetPowerStatusFromVM(t *testing.T) { - provisioningStatusCode := "ProvisioningState/succeeded" - provisionDisplayStatus := "Provisioning succeeded" - powerStatusCode := "PowerState/running" - powerDisplayStatus := "VM running" - properties := &compute.VirtualMachineScaleSetVMProperties{ - StorageProfile: &compute.StorageProfile{ - OsDisk: &compute.OSDisk{ - OsType: "Linux", - }, - }, - InstanceView: &compute.VirtualMachineInstanceView{ - Statuses: &[]compute.InstanceViewStatus{ - { - Code: &provisioningStatusCode, - Level: "Info", - DisplayStatus: &provisionDisplayStatus, - }, - { - Code: &powerStatusCode, - Level: "Info", - DisplayStatus: &powerDisplayStatus, - }, - }, - }, - } - - testVM := compute.VirtualMachineScaleSetVM{ - Properties: properties, - } - - actual := getPowerStateFromVMInstanceView(testVM.Properties.InstanceView) - - expected := "PowerState/running" - - if actual != expected { - t.Errorf("expected powerStatus %s, but got %s instead", expected, actual) - } - - // Noq we test a virtualMachine with an empty InstanceView struct. - testVM.Properties.InstanceView = &compute.VirtualMachineInstanceView{} - - actual = getPowerStateFromVMInstanceView(testVM.Properties.InstanceView) - - if actual != "" { - t.Errorf("expected powerStatus %s, but got %s instead", expected, actual) - } -} diff --git a/discovery/consul/consul.go b/discovery/consul/consul.go index 7063ae88c2..7ba8ad721b 100644 --- a/discovery/consul/consul.go +++ b/discovery/consul/consul.go @@ -55,6 +55,8 @@ const ( servicePortLabel = model.MetaLabelPrefix + "consul_service_port" // datacenterLabel is the name of the label containing the datacenter ID. datacenterLabel = model.MetaLabelPrefix + "consul_dc" + // taggedAddressesLabel is the prefix for the labels mapping to a target's tagged addresses. + taggedAddressesLabel = model.MetaLabelPrefix + "consul_tagged_address_" // serviceIDLabel is the name of the label containing the service ID. serviceIDLabel = model.MetaLabelPrefix + "consul_service_id" @@ -487,7 +489,7 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr var tags = srv.tagSeparator + strings.Join(node.ServiceTags, srv.tagSeparator) + srv.tagSeparator // If the service address is not empty it should be used instead of the node address - // since the service may be registered remotely through a different node + // since the service may be registered remotely through a different node. var addr string if node.ServiceAddress != "" { addr = net.JoinHostPort(node.ServiceAddress, fmt.Sprintf("%d", node.ServicePort)) @@ -505,18 +507,24 @@ func (srv *consulService) watch(ctx context.Context, ch chan<- []*targetgroup.Gr serviceIDLabel: model.LabelValue(node.ServiceID), } - // Add all key/value pairs from the node's metadata as their own labels + // Add all key/value pairs from the node's metadata as their own labels. for k, v := range node.NodeMeta { name := strutil.SanitizeLabelName(k) labels[metaDataLabel+model.LabelName(name)] = model.LabelValue(v) } - // Add all key/value pairs from the service's metadata as their own labels + // Add all key/value pairs from the service's metadata as their own labels. for k, v := range node.ServiceMeta { name := strutil.SanitizeLabelName(k) labels[serviceMetaDataLabel+model.LabelName(name)] = model.LabelValue(v) } + // Add all key/value pairs from the service's tagged addresses as their own labels. + for k, v := range node.TaggedAddresses { + name := strutil.SanitizeLabelName(k) + labels[taggedAddressesLabel+model.LabelName(name)] = model.LabelValue(v) + } + tgroup.Targets = append(tgroup.Targets, labels) } diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index 70d8be64a8..da734de7c8 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -87,6 +87,7 @@ const ( "Node": "node1", "Address": "1.1.1.1", "Datacenter": "test-dc", +"TaggedAddresses": {"lan":"192.168.10.10","wan":"10.0.10.10"}, "NodeMeta": {"rack_name": "2304"}, "ServiceID": "test", "ServiceName": "test", diff --git a/discovery/gce/gce.go b/discovery/gce/gce.go index d0d75de062..5a468b9d03 100644 --- a/discovery/gce/gce.go +++ b/discovery/gce/gce.go @@ -25,7 +25,6 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" - "golang.org/x/oauth2" "golang.org/x/oauth2/google" compute "google.golang.org/api/compute/v1" @@ -140,7 +139,7 @@ func NewDiscovery(conf SDConfig, logger log.Logger) (*Discovery, error) { logger: logger, } var err error - gd.client, err = google.DefaultClient(oauth2.NoContext, compute.ComputeReadonlyScope) + gd.client, err = google.DefaultClient(context.Background(), compute.ComputeReadonlyScope) if err != nil { return nil, fmt.Errorf("error setting up communication with GCE service: %s", err) } diff --git a/discovery/kubernetes/kubernetes_test.go b/discovery/kubernetes/kubernetes_test.go index de07eed6cc..be1b2fd1f1 100644 --- a/discovery/kubernetes/kubernetes_test.go +++ b/discovery/kubernetes/kubernetes_test.go @@ -156,7 +156,7 @@ Loop: case <-time.After(timeout): // Because we use queue, an object that is created then // deleted or updated may be processed only once. - // So possibliy we may skip events, timed out here. + // So possibly we may skip events, timed out here. t.Logf("timed out, got %d (max: %d) items, some events are skipped", len(allTgs), max) break Loop } diff --git a/discovery/kubernetes/service.go b/discovery/kubernetes/service.go index 5fe4c04904..c369328d2a 100644 --- a/discovery/kubernetes/service.go +++ b/discovery/kubernetes/service.go @@ -140,6 +140,8 @@ const ( serviceAnnotationPrefix = metaLabelPrefix + "service_annotation_" servicePortNameLabel = metaLabelPrefix + "service_port_name" servicePortProtocolLabel = metaLabelPrefix + "service_port_protocol" + serviceClusterIPLabel = metaLabelPrefix + "service_cluster_ip" + serviceExternalNameLabel = metaLabelPrefix + "service_external_name" ) func serviceLabels(svc *apiv1.Service) model.LabelSet { @@ -169,11 +171,19 @@ func (s *Service) buildService(svc *apiv1.Service) *targetgroup.Group { for _, port := range svc.Spec.Ports { addr := net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", strconv.FormatInt(int64(port.Port), 10)) - tg.Targets = append(tg.Targets, model.LabelSet{ + labelSet := model.LabelSet{ model.AddressLabel: lv(addr), servicePortNameLabel: lv(port.Name), servicePortProtocolLabel: lv(string(port.Protocol)), - }) + } + + if svc.Spec.Type == apiv1.ServiceTypeExternalName { + labelSet[serviceExternalNameLabel] = lv(svc.Spec.ExternalName) + } else { + labelSet[serviceClusterIPLabel] = lv(svc.Spec.ClusterIP) + } + + tg.Targets = append(tg.Targets, labelSet) } return tg diff --git a/discovery/kubernetes/service_test.go b/discovery/kubernetes/service_test.go index e75cc4a538..f193d26672 100644 --- a/discovery/kubernetes/service_test.go +++ b/discovery/kubernetes/service_test.go @@ -44,6 +44,8 @@ func makeMultiPortService() *v1.Service { Port: int32(30901), }, }, + Type: v1.ServiceTypeClusterIP, + ClusterIP: "10.0.0.1", }, } } @@ -62,6 +64,8 @@ func makeSuffixedService(suffix string) *v1.Service { Port: int32(30900), }, }, + Type: v1.ServiceTypeClusterIP, + ClusterIP: "10.0.0.1", }, } } @@ -70,6 +74,26 @@ func makeService() *v1.Service { return makeSuffixedService("") } +func makeExternalService() *v1.Service { + return &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testservice-external", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: "testport", + Protocol: v1.ProtocolTCP, + Port: int32(31900), + }, + }, + Type: v1.ServiceTypeExternalName, + ExternalName: "FooExternalName", + }, + } +} + func TestServiceDiscoveryAdd(t *testing.T) { n, c, w := makeDiscovery(RoleService, NamespaceDiscovery{}) @@ -79,15 +103,19 @@ func TestServiceDiscoveryAdd(t *testing.T) { obj := makeService() c.CoreV1().Services(obj.Namespace).Create(obj) w.Services().Add(obj) + obj = makeExternalService() + c.CoreV1().Services(obj.Namespace).Create(obj) + w.Services().Add(obj) }, - expectedMaxItems: 1, + expectedMaxItems: 2, expectedRes: map[string]*targetgroup.Group{ "svc/default/testservice": { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", - "__address__": "testservice.default.svc:30900", - "__meta_kubernetes_service_port_name": "testport", + "__address__": "testservice.default.svc:30900", + "__meta_kubernetes_service_cluster_ip": "10.0.0.1", + "__meta_kubernetes_service_port_name": "testport", }, }, Labels: model.LabelSet{ @@ -96,6 +124,21 @@ func TestServiceDiscoveryAdd(t *testing.T) { }, Source: "svc/default/testservice", }, + "svc/default/testservice-external": { + Targets: []model.LabelSet{ + { + "__meta_kubernetes_service_port_protocol": "TCP", + "__address__": "testservice-external.default.svc:31900", + "__meta_kubernetes_service_port_name": "testport", + "__meta_kubernetes_service_external_name": "FooExternalName", + }, + }, + Labels: model.LabelSet{ + "__meta_kubernetes_service_name": "testservice-external", + "__meta_kubernetes_namespace": "default", + }, + Source: "svc/default/testservice-external", + }, }, }.Run(t) } @@ -135,13 +178,15 @@ func TestServiceDiscoveryUpdate(t *testing.T) { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", - "__address__": "testservice.default.svc:30900", - "__meta_kubernetes_service_port_name": "testport0", + "__address__": "testservice.default.svc:30900", + "__meta_kubernetes_service_cluster_ip": "10.0.0.1", + "__meta_kubernetes_service_port_name": "testport0", }, { "__meta_kubernetes_service_port_protocol": "UDP", - "__address__": "testservice.default.svc:30901", - "__meta_kubernetes_service_port_name": "testport1", + "__address__": "testservice.default.svc:30901", + "__meta_kubernetes_service_cluster_ip": "10.0.0.1", + "__meta_kubernetes_service_port_name": "testport1", }, }, Labels: model.LabelSet{ @@ -175,8 +220,9 @@ func TestServiceDiscoveryNamespaces(t *testing.T) { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", - "__address__": "testservice.ns1.svc:30900", - "__meta_kubernetes_service_port_name": "testport", + "__address__": "testservice.ns1.svc:30900", + "__meta_kubernetes_service_cluster_ip": "10.0.0.1", + "__meta_kubernetes_service_port_name": "testport", }, }, Labels: model.LabelSet{ @@ -189,8 +235,9 @@ func TestServiceDiscoveryNamespaces(t *testing.T) { Targets: []model.LabelSet{ { "__meta_kubernetes_service_port_protocol": "TCP", - "__address__": "testservice.ns2.svc:30900", - "__meta_kubernetes_service_port_name": "testport", + "__address__": "testservice.ns2.svc:30900", + "__meta_kubernetes_service_cluster_ip": "10.0.0.1", + "__meta_kubernetes_service_port_name": "testport", }, }, Labels: model.LabelSet{ diff --git a/discovery/manager.go b/discovery/manager.go index 00a6258282..450b92b5eb 100644 --- a/discovery/manager.go +++ b/discovery/manager.go @@ -41,11 +41,12 @@ import ( ) var ( - failedConfigs = prometheus.NewCounter( + failedConfigs = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "prometheus_sd_configs_failed_total", Help: "Total number of service discovery configurations that failed to load.", }, + []string{"name"}, ) discoveredTargets = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -54,23 +55,26 @@ var ( }, []string{"name", "config"}, ) - receivedUpdates = prometheus.NewCounter( + receivedUpdates = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "prometheus_sd_received_updates_total", Help: "Total number of update events received from the SD providers.", }, + []string{"name"}, ) - delayedUpdates = prometheus.NewCounter( + delayedUpdates = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "prometheus_sd_updates_delayed_total", Help: "Total number of update events that couldn't be sent immediately.", }, + []string{"name"}, ) - sentUpdates = prometheus.NewCounter( + sentUpdates = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "prometheus_sd_updates_total", Help: "Total number of update events sent to the SD consumers.", }, + []string{"name"}, ) ) @@ -226,7 +230,7 @@ func (m *Manager) updater(ctx context.Context, p *provider, updates chan []*targ case <-ctx.Done(): return case tgs, ok := <-updates: - receivedUpdates.Inc() + receivedUpdates.WithLabelValues(m.name).Inc() if !ok { level.Debug(m.logger).Log("msg", "discoverer channel closed", "provider", p.name) return @@ -255,11 +259,11 @@ func (m *Manager) sender() { case <-ticker.C: // Some discoverers send updates too often so we throttle these with the ticker. select { case <-m.triggerSend: - sentUpdates.Inc() + sentUpdates.WithLabelValues(m.name).Inc() select { case m.syncCh <- m.allGroups(): default: - delayedUpdates.Inc() + delayedUpdates.WithLabelValues(m.name).Inc() level.Debug(m.logger).Log("msg", "discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: @@ -328,7 +332,7 @@ func (m *Manager) registerProviders(cfg sd_config.ServiceDiscoveryConfig, setNam d, err := newDiscoverer() if err != nil { level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", t) - failedConfigs.Inc() + failedConfigs.WithLabelValues(m.name).Inc() return } diff --git a/docs/configuration/configuration.md b/docs/configuration/configuration.md index 9461b7e3b8..da8a6accf8 100644 --- a/docs/configuration/configuration.md +++ b/docs/configuration/configuration.md @@ -263,11 +263,10 @@ The following meta labels are available on targets during relabeling: * `__meta_azure_machine_location`: the location the machine runs in * `__meta_azure_machine_name`: the machine name * `__meta_azure_machine_os_type`: the machine operating system -* `__meta_azure_machine_power_state`: the current power state of the machine * `__meta_azure_machine_private_ip`: the machine's private IP * `__meta_azure_machine_resource_group`: the machine's resource group -* `__meta_azure_machine_scale_set`: the name of the scale set which the vm is part of (this value is only set if you are using a [scale set](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/)) * `__meta_azure_machine_tag_`: each tag value of the machine +* `__meta_azure_machine_scale_set`: the name of the scale set which the vm is part of (this value is only set if you are using a [scale set](https://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/)) See below for the configuration options for Azure discovery: @@ -275,14 +274,18 @@ See below for the configuration options for Azure discovery: # The information to access the Azure API. # The Azure environment. [ environment: | default = AzurePublicCloud ] -# The subscription ID. + +# The authentication method, either OAuth or ManagedIdentity. +# See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview +[ authentication_method: | default = OAuth] +# The subscription ID. Always required. subscription_id: -# The tenant ID. -tenant_id: -# The client ID. -client_id: -# The client secret. -client_secret: +# Optional tenant ID. Only required with authentication_method OAuth. +[ tenant_id: ] +# Optional client ID. Only required with authentication_method OAuth. +[ client_id: ] +# Optional client secret. Only required with authentication_method OAuth. +[ client_secret: ] # Refresh interval to re-read the instance list. [ refresh_interval: | default = 300s ] @@ -301,6 +304,7 @@ The following meta labels are available on targets during [relabeling](#relabel_ * `__meta_consul_address`: the address of the target * `__meta_consul_dc`: the datacenter name for the target +* `__meta_consul_tagged_address_`: each node tagged address key value of the target * `__meta_consul_metadata_`: each node metadata key value of the target * `__meta_consul_node`: the node name defined for the target * `__meta_consul_service_address`: the service address of the target @@ -694,9 +698,11 @@ service port. Available meta labels: * `__meta_kubernetes_namespace`: The namespace of the service object. -* `__meta_kubernetes_service_name`: The name of the service object. -* `__meta_kubernetes_service_label_`: The label of the service object. * `__meta_kubernetes_service_annotation_`: The annotation of the service object. +* `__meta_kubernetes_service_cluster_ip`: The cluster IP address of the service. (Does not apply to services of type ExternalName) +* `__meta_kubernetes_service_external_name`: The DNS name of the service. (Applies to services of type ExternalName) +* `__meta_kubernetes_service_label_`: The label of the service object. +* `__meta_kubernetes_service_name`: The name of the service object. * `__meta_kubernetes_service_port_name`: Name of the service port for the target. * `__meta_kubernetes_service_port_number`: Number of the service port for the target. * `__meta_kubernetes_service_port_protocol`: Protocol of the service port for the target. diff --git a/docs/querying/basics.md b/docs/querying/basics.md index 7043bca59c..74804f298d 100644 --- a/docs/querying/basics.md +++ b/docs/querying/basics.md @@ -6,10 +6,11 @@ sort_rank: 1 # Querying Prometheus -Prometheus provides a functional expression language that lets the user select -and aggregate time series data in real time. The result of an expression can -either be shown as a graph, viewed as tabular data in Prometheus's expression -browser, or consumed by external systems via the [HTTP API](api.md). +Prometheus provides a functional query language called PromQL (Prometheus Query +Language) that lets the user select and aggregate time series data in real +time. The result of an expression can either be shown as a graph, viewed as +tabular data in Prometheus's expression browser, or consumed by external +systems via the [HTTP API](api.md). ## Examples diff --git a/documentation/examples/kubernetes-rabbitmq/README.md b/documentation/examples/kubernetes-rabbitmq/README.md index 3c1fd6a37d..83f4f9fe7a 100644 --- a/documentation/examples/kubernetes-rabbitmq/README.md +++ b/documentation/examples/kubernetes-rabbitmq/README.md @@ -13,7 +13,7 @@ yet found the exporter and is not scraping data from it. For more details on how to use Kubernetes service discovery take a look at the [documentation](http://prometheus.io/docs/operating/configuration/#kubernetes-sd-configurations-kubernetes_sd_config) -and at the [available examples](./documentation/examples). +and at the [available examples](./../). After you got Kubernetes service discovery up and running you just need to advertise that RabbitMQ is exposing metrics. To do that you need to define a service that: diff --git a/go.mod b/go.mod index faa095b507..1eccc9636b 100644 --- a/go.mod +++ b/go.mod @@ -88,7 +88,7 @@ require ( github.com/prometheus/client_golang v0.9.1 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/common v0.0.0-20181119215939-b36ad289a3ea - github.com/prometheus/tsdb v0.3.0 + github.com/prometheus/tsdb v0.3.1 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect github.com/rlmcpherson/s3gof3r v0.5.0 // indirect github.com/rubyist/circuitbreaker v2.2.1+incompatible // indirect diff --git a/go.sum b/go.sum index ab8060cc16..c92ca4c0ca 100644 --- a/go.sum +++ b/go.sum @@ -36,7 +36,7 @@ github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJW github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/coreos/etcd v3.3.10+incompatible h1:KjVWqrZ5U0wa3CxY2AxlH6/UcB+PK2td1DcsYhA+HRs= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -107,7 +107,7 @@ github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c h1:BTAbnbegUI github.com/hashicorp/go-msgpack v0.0.0-20150518234257-fa3f63826f7c/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:9HVkPxOpo+yO93Ah4yrO67d/qh0fbLLWbKqhYjyHq9A= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM= github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -209,8 +209,8 @@ github.com/prometheus/common v0.0.0-20181119215939-b36ad289a3ea h1:4RkbEb5XX0Wvu github.com/prometheus/common v0.0.0-20181119215939-b36ad289a3ea/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/tsdb v0.3.0 h1:NQIaA1zfXQWPOWkpfaVBwURsm7nViKLtI3uwYpe8LKs= -github.com/prometheus/tsdb v0.3.0/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.3.1 h1:uGgfubT2MesNpx3T46c5R32RcUoKAPGyWX+4x1orJLE= +github.com/prometheus/tsdb v0.3.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rlmcpherson/s3gof3r v0.5.0 h1:1izOJpTiohSibfOHuNyEA/yQnAirh05enzEdmhez43k= diff --git a/notifier/notifier.go b/notifier/notifier.go index d1c7814ab7..dbf5996cf9 100644 --- a/notifier/notifier.go +++ b/notifier/notifier.go @@ -123,7 +123,7 @@ type Manager struct { type Options struct { QueueCapacity int ExternalLabels model.LabelSet - RelabelConfigs []*config.RelabelConfig + RelabelConfigs []*relabel.Config // Used for sending HTTP requests to the Alertmanager. Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) diff --git a/notifier/notifier_test.go b/notifier/notifier_test.go index a98d7d7141..c5646e8eb8 100644 --- a/notifier/notifier_test.go +++ b/notifier/notifier_test.go @@ -25,6 +25,8 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/pkg/relabel" + yaml "gopkg.in/yaml.v2" config_util "github.com/prometheus/common/config" @@ -236,12 +238,12 @@ func TestExternalLabels(t *testing.T) { h := NewManager(&Options{ QueueCapacity: 3 * maxBatchSize, ExternalLabels: model.LabelSet{"a": "b"}, - RelabelConfigs: []*config.RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"alertname"}, TargetLabel: "a", Action: "replace", - Regex: config.MustNewRegexp("externalrelabelthis"), + Regex: relabel.MustNewRegexp("externalrelabelthis"), Replacement: "c", }, }, @@ -269,17 +271,17 @@ func TestExternalLabels(t *testing.T) { func TestHandlerRelabel(t *testing.T) { h := NewManager(&Options{ QueueCapacity: 3 * maxBatchSize, - RelabelConfigs: []*config.RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { SourceLabels: model.LabelNames{"alertname"}, Action: "drop", - Regex: config.MustNewRegexp("drop"), + Regex: relabel.MustNewRegexp("drop"), }, { SourceLabels: model.LabelNames{"alertname"}, TargetLabel: "alertname", Action: "replace", - Regex: config.MustNewRegexp("rename"), + Regex: relabel.MustNewRegexp("rename"), Replacement: "renamed", }, }, diff --git a/pkg/relabel/relabel.go b/pkg/relabel/relabel.go index 4f215ca7fc..4335201e75 100644 --- a/pkg/relabel/relabel.go +++ b/pkg/relabel/relabel.go @@ -16,19 +16,170 @@ package relabel import ( "crypto/md5" "fmt" + "regexp" "strings" "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" ) +var ( + relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + + DefaultRelabelConfig = Config{ + Action: Replace, + Separator: ";", + Regex: MustNewRegexp("(.*)"), + Replacement: "$1", + } +) + +// Action is the action to be performed on relabeling. +type Action string + +const ( + // Replace performs a regex replacement. + Replace Action = "replace" + // Keep drops targets for which the input does not match the regex. + Keep Action = "keep" + // Drop drops targets for which the input does match the regex. + Drop Action = "drop" + // HashMod sets a label to the modulus of a hash of labels. + HashMod Action = "hashmod" + // LabelMap copies labels to other labelnames based on a regex. + LabelMap Action = "labelmap" + // LabelDrop drops any label matching the regex. + LabelDrop Action = "labeldrop" + // LabelKeep drops any label not matching the regex. + LabelKeep Action = "labelkeep" +) + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + switch act := Action(strings.ToLower(s)); act { + case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep: + *a = act + return nil + } + return fmt.Errorf("unknown relabel action %q", s) +} + +// Config is the configuration for relabeling of target label sets. +type Config struct { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + // Separator is the string between concatenated values from the source labels. + Separator string `yaml:"separator,omitempty"` + // Regex against which the concatenation is matched. + Regex Regexp `yaml:"regex,omitempty"` + // Modulus to take of the hash of concatenated values from the source labels. + Modulus uint64 `yaml:"modulus,omitempty"` + // TargetLabel is the label to which the resulting string is written in a replacement. + // Regexp interpolation is allowed for the replace action. + TargetLabel string `yaml:"target_label,omitempty"` + // Replacement is the regex replacement pattern to be used. + Replacement string `yaml:"replacement,omitempty"` + // Action is the action to be performed for the relabeling. + Action Action `yaml:"action,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultRelabelConfig + type plain Config + if err := unmarshal((*plain)(c)); err != nil { + return err + } + if c.Regex.Regexp == nil { + c.Regex = MustNewRegexp("") + } + if c.Modulus == 0 && c.Action == HashMod { + return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") + } + if (c.Action == Replace || c.Action == HashMod) && c.TargetLabel == "" { + return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) + } + if c.Action == Replace && !relabelTarget.MatchString(c.TargetLabel) { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { + return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) + } + if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { + return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) + } + + if c.Action == LabelDrop || c.Action == LabelKeep { + if c.SourceLabels != nil || + c.TargetLabel != DefaultRelabelConfig.TargetLabel || + c.Modulus != DefaultRelabelConfig.Modulus || + c.Separator != DefaultRelabelConfig.Separator || + c.Replacement != DefaultRelabelConfig.Replacement { + return fmt.Errorf("%s action requires only 'regex', and no other fields", c.Action) + } + } + + return nil +} + +// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable. +type Regexp struct { + *regexp.Regexp + original string +} + +// NewRegexp creates a new anchored Regexp and returns an error if the +// passed-in regular expression does not compile. +func NewRegexp(s string) (Regexp, error) { + regex, err := regexp.Compile("^(?:" + s + ")$") + return Regexp{ + Regexp: regex, + original: s, + }, err +} + +// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile. +func MustNewRegexp(s string) Regexp { + re, err := NewRegexp(s) + if err != nil { + panic(err) + } + return re +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (re Regexp) MarshalYAML() (interface{}, error) { + if re.original != "" { + return re.original, nil + } + return nil, nil +} + // Process returns a relabeled copy of the given label set. The relabel configurations // are applied in order of input. // If a label set is dropped, nil is returned. // May return the input labelSet modified. -func Process(labels labels.Labels, cfgs ...*config.RelabelConfig) labels.Labels { +func Process(labels labels.Labels, cfgs ...*Config) labels.Labels { for _, cfg := range cfgs { labels = relabel(labels, cfg) if labels == nil { @@ -38,7 +189,7 @@ func Process(labels labels.Labels, cfgs ...*config.RelabelConfig) labels.Labels return labels } -func relabel(lset labels.Labels, cfg *config.RelabelConfig) labels.Labels { +func relabel(lset labels.Labels, cfg *Config) labels.Labels { values := make([]string, 0, len(cfg.SourceLabels)) for _, ln := range cfg.SourceLabels { values = append(values, lset.Get(string(ln))) @@ -48,15 +199,15 @@ func relabel(lset labels.Labels, cfg *config.RelabelConfig) labels.Labels { lb := labels.NewBuilder(lset) switch cfg.Action { - case config.RelabelDrop: + case Drop: if cfg.Regex.MatchString(val) { return nil } - case config.RelabelKeep: + case Keep: if !cfg.Regex.MatchString(val) { return nil } - case config.RelabelReplace: + case Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -73,23 +224,23 @@ func relabel(lset labels.Labels, cfg *config.RelabelConfig) labels.Labels { break } lb.Set(string(target), string(res)) - case config.RelabelHashMod: + case HashMod: mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod)) - case config.RelabelLabelMap: + case LabelMap: for _, l := range lset { if cfg.Regex.MatchString(l.Name) { res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement) lb.Set(res, l.Value) } } - case config.RelabelLabelDrop: + case LabelDrop: for _, l := range lset { if cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) } } - case config.RelabelLabelKeep: + case LabelKeep: for _, l := range lset { if !cfg.Regex.MatchString(l.Name) { lb.Del(l.Name) diff --git a/pkg/relabel/relabel_test.go b/pkg/relabel/relabel_test.go index dd3efa1204..538ac6593d 100644 --- a/pkg/relabel/relabel_test.go +++ b/pkg/relabel/relabel_test.go @@ -18,7 +18,6 @@ import ( "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/util/testutil" ) @@ -26,7 +25,7 @@ import ( func TestRelabel(t *testing.T) { tests := []struct { input labels.Labels - relabel []*config.RelabelConfig + relabel []*Config output labels.Labels }{ { @@ -35,14 +34,14 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f(.*)"), + Regex: MustNewRegexp("f(.*)"), TargetLabel: "d", Separator: ";", Replacement: "ch${1}-ch${1}", - Action: config.RelabelReplace, + Action: Replace, }, }, output: labels.FromMap(map[string]string{ @@ -58,22 +57,22 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a", "b"}, - Regex: config.MustNewRegexp("f(.*);(.*)r"), + Regex: MustNewRegexp("f(.*);(.*)r"), TargetLabel: "a", Separator: ";", Replacement: "b${1}${2}m", // boobam - Action: config.RelabelReplace, + Action: Replace, }, { SourceLabels: model.LabelNames{"c", "a"}, - Regex: config.MustNewRegexp("(b).*b(.*)ba(.*)"), + Regex: MustNewRegexp("(b).*b(.*)ba(.*)"), TargetLabel: "d", Separator: ";", Replacement: "$1$2$2$3", - Action: config.RelabelReplace, + Action: Replace, }, }, output: labels.FromMap(map[string]string{ @@ -87,18 +86,18 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "foo", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp(".*o.*"), - Action: config.RelabelDrop, + Regex: MustNewRegexp(".*o.*"), + Action: Drop, }, { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f(.*)"), + Regex: MustNewRegexp("f(.*)"), TargetLabel: "d", Separator: ";", Replacement: "ch$1-ch$1", - Action: config.RelabelReplace, + Action: Replace, }, }, output: nil, @@ -108,11 +107,11 @@ func TestRelabel(t *testing.T) { "a": "foo", "b": "bar", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp(".*o.*"), - Action: config.RelabelDrop, + Regex: MustNewRegexp(".*o.*"), + Action: Drop, }, }, output: nil, @@ -121,14 +120,14 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "abc", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp(".*(b).*"), + Regex: MustNewRegexp(".*(b).*"), TargetLabel: "d", Separator: ";", Replacement: "$1", - Action: config.RelabelReplace, + Action: Replace, }, }, output: labels.FromMap(map[string]string{ @@ -140,11 +139,11 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "foo", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("no-match"), - Action: config.RelabelDrop, + Regex: MustNewRegexp("no-match"), + Action: Drop, }, }, output: labels.FromMap(map[string]string{ @@ -155,11 +154,11 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "foo", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f|o"), - Action: config.RelabelDrop, + Regex: MustNewRegexp("f|o"), + Action: Drop, }, }, output: labels.FromMap(map[string]string{ @@ -170,11 +169,11 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "foo", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("no-match"), - Action: config.RelabelKeep, + Regex: MustNewRegexp("no-match"), + Action: Keep, }, }, output: nil, @@ -183,11 +182,11 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "foo", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f.*"), - Action: config.RelabelKeep, + Regex: MustNewRegexp("f.*"), + Action: Keep, }, }, output: labels.FromMap(map[string]string{ @@ -199,13 +198,13 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "boo", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f"), + Regex: MustNewRegexp("f"), TargetLabel: "b", Replacement: "bar", - Action: config.RelabelReplace, + Action: Replace, }, }, output: labels.FromMap(map[string]string{ @@ -218,12 +217,12 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"c"}, TargetLabel: "d", Separator: ";", - Action: config.RelabelHashMod, + Action: HashMod, Modulus: 1000, }, }, @@ -240,11 +239,11 @@ func TestRelabel(t *testing.T) { "b1": "bar", "b2": "baz", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { - Regex: config.MustNewRegexp("(b.*)"), + Regex: MustNewRegexp("(b.*)"), Replacement: "bar_${1}", - Action: config.RelabelLabelMap, + Action: LabelMap, }, }, output: labels.FromMap(map[string]string{ @@ -262,11 +261,11 @@ func TestRelabel(t *testing.T) { "__meta_my_baz": "bbb", "__meta_other": "ccc", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { - Regex: config.MustNewRegexp("__meta_(my.*)"), + Regex: MustNewRegexp("__meta_(my.*)"), Replacement: "${1}", - Action: config.RelabelLabelMap, + Action: LabelMap, }, }, output: labels.FromMap(map[string]string{ @@ -282,11 +281,11 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "some-name-value", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, Replacement: "${2}", TargetLabel: "${1}", }, @@ -300,11 +299,11 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "some-name-value", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, Replacement: "${3}", TargetLabel: "${1}", }, @@ -317,25 +316,25 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "a": "some-name-value", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, Replacement: "${1}", TargetLabel: "${3}", }, { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, Replacement: "${1}", TargetLabel: "0${3}", }, { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: Replace, Replacement: "${1}", TargetLabel: "-${3}", }, @@ -348,25 +347,25 @@ func TestRelabel(t *testing.T) { input: labels.FromMap(map[string]string{ "__meta_sd_tags": "path:/secret,job:some-job,label:foo=bar", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { SourceLabels: model.LabelNames{"__meta_sd_tags"}, - Regex: config.MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"), + Action: Replace, Replacement: "${1}", TargetLabel: "__metrics_path__", }, { SourceLabels: model.LabelNames{"__meta_sd_tags"}, - Regex: config.MustNewRegexp("(?:.+,|^)job:([^,]+).*"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("(?:.+,|^)job:([^,]+).*"), + Action: Replace, Replacement: "${1}", TargetLabel: "job", }, { SourceLabels: model.LabelNames{"__meta_sd_tags"}, - Regex: config.MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"), - Action: config.RelabelReplace, + Regex: MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"), + Action: Replace, Replacement: "${2}", TargetLabel: "${1}", }, @@ -384,10 +383,10 @@ func TestRelabel(t *testing.T) { "b1": "bar", "b2": "baz", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { - Regex: config.MustNewRegexp("(b.*)"), - Action: config.RelabelLabelKeep, + Regex: MustNewRegexp("(b.*)"), + Action: LabelKeep, }, }, output: labels.FromMap(map[string]string{ @@ -401,10 +400,10 @@ func TestRelabel(t *testing.T) { "b1": "bar", "b2": "baz", }), - relabel: []*config.RelabelConfig{ + relabel: []*Config{ { - Regex: config.MustNewRegexp("(b.*)"), - Action: config.RelabelLabelDrop, + Regex: MustNewRegexp("(b.*)"), + Action: LabelDrop, }, }, output: labels.FromMap(map[string]string{ @@ -418,3 +417,30 @@ func TestRelabel(t *testing.T) { testutil.Equals(t, test.output, res) } } + +func TestTargetLabelValidity(t *testing.T) { + tests := []struct { + str string + valid bool + }{ + {"-label", false}, + {"label", true}, + {"label${1}", true}, + {"${1}label", true}, + {"${1}", true}, + {"${1}label", true}, + {"${", false}, + {"$", false}, + {"${}", false}, + {"foo${", false}, + {"$1", true}, + {"asd$2asd", true}, + {"-foo${1}bar-", false}, + {"_${1}_", true}, + {"foo${bar}foo", true}, + } + for _, test := range tests { + testutil.Assert(t, relabelTarget.Match([]byte(test.str)) == test.valid, + "Expected %q to be %v", test.str, test.valid) + } +} diff --git a/pkg/rulefmt/rulefmt.go b/pkg/rulefmt/rulefmt.go index 8acb655128..cfb2fb9856 100644 --- a/pkg/rulefmt/rulefmt.go +++ b/pkg/rulefmt/rulefmt.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/template" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" ) // Error represents semantical errors on parsing rule groups. diff --git a/relabel/relabel.go b/relabel/relabel.go index d506620f1d..1b6bd7fb20 100644 --- a/relabel/relabel.go +++ b/relabel/relabel.go @@ -20,14 +20,16 @@ import ( "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" + pkgrelabel "github.com/prometheus/prometheus/pkg/relabel" ) // Process returns a relabeled copy of the given label set. The relabel configurations // are applied in order of input. // If a label set is dropped, nil is returned. // May return the input labelSet modified. -func Process(labels model.LabelSet, cfgs ...*config.RelabelConfig) model.LabelSet { +// TODO(https://github.com/prometheus/prometheus/issues/3647): Get rid of this package in favor of pkg/relabel +// once usage of `model.LabelSet` is removed. +func Process(labels model.LabelSet, cfgs ...*pkgrelabel.Config) model.LabelSet { for _, cfg := range cfgs { labels = relabel(labels, cfg) if labels == nil { @@ -37,7 +39,7 @@ func Process(labels model.LabelSet, cfgs ...*config.RelabelConfig) model.LabelSe return labels } -func relabel(labels model.LabelSet, cfg *config.RelabelConfig) model.LabelSet { +func relabel(labels model.LabelSet, cfg *pkgrelabel.Config) model.LabelSet { values := make([]string, 0, len(cfg.SourceLabels)) for _, ln := range cfg.SourceLabels { values = append(values, string(labels[ln])) @@ -45,15 +47,15 @@ func relabel(labels model.LabelSet, cfg *config.RelabelConfig) model.LabelSet { val := strings.Join(values, cfg.Separator) switch cfg.Action { - case config.RelabelDrop: + case pkgrelabel.Drop: if cfg.Regex.MatchString(val) { return nil } - case config.RelabelKeep: + case pkgrelabel.Keep: if !cfg.Regex.MatchString(val) { return nil } - case config.RelabelReplace: + case pkgrelabel.Replace: indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -70,10 +72,10 @@ func relabel(labels model.LabelSet, cfg *config.RelabelConfig) model.LabelSet { break } labels[target] = model.LabelValue(res) - case config.RelabelHashMod: + case pkgrelabel.HashMod: mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus labels[model.LabelName(cfg.TargetLabel)] = model.LabelValue(fmt.Sprintf("%d", mod)) - case config.RelabelLabelMap: + case pkgrelabel.LabelMap: out := make(model.LabelSet, len(labels)) // Take a copy to avoid infinite loops. for ln, lv := range labels { @@ -86,13 +88,13 @@ func relabel(labels model.LabelSet, cfg *config.RelabelConfig) model.LabelSet { } } labels = out - case config.RelabelLabelDrop: + case pkgrelabel.LabelDrop: for ln := range labels { if cfg.Regex.MatchString(string(ln)) { delete(labels, ln) } } - case config.RelabelLabelKeep: + case pkgrelabel.LabelKeep: for ln := range labels { if !cfg.Regex.MatchString(string(ln)) { delete(labels, ln) diff --git a/relabel/relabel_test.go b/relabel/relabel_test.go index 52ab2d10a6..ae66d0880e 100644 --- a/relabel/relabel_test.go +++ b/relabel/relabel_test.go @@ -18,14 +18,14 @@ import ( "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/config" + pkgrelabel "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/util/testutil" ) func TestRelabel(t *testing.T) { tests := []struct { input model.LabelSet - relabel []*config.RelabelConfig + relabel []*pkgrelabel.Config output model.LabelSet }{ { @@ -34,14 +34,14 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f(.*)"), + Regex: pkgrelabel.MustNewRegexp("f(.*)"), TargetLabel: "d", Separator: ";", Replacement: "ch${1}-ch${1}", - Action: config.RelabelReplace, + Action: pkgrelabel.Replace, }, }, output: model.LabelSet{ @@ -57,22 +57,22 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a", "b"}, - Regex: config.MustNewRegexp("f(.*);(.*)r"), + Regex: pkgrelabel.MustNewRegexp("f(.*);(.*)r"), TargetLabel: "a", Separator: ";", Replacement: "b${1}${2}m", // boobam - Action: config.RelabelReplace, + Action: pkgrelabel.Replace, }, { SourceLabels: model.LabelNames{"c", "a"}, - Regex: config.MustNewRegexp("(b).*b(.*)ba(.*)"), + Regex: pkgrelabel.MustNewRegexp("(b).*b(.*)ba(.*)"), TargetLabel: "d", Separator: ";", Replacement: "$1$2$2$3", - Action: config.RelabelReplace, + Action: pkgrelabel.Replace, }, }, output: model.LabelSet{ @@ -86,18 +86,18 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "foo", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp(".*o.*"), - Action: config.RelabelDrop, + Regex: pkgrelabel.MustNewRegexp(".*o.*"), + Action: pkgrelabel.Drop, }, { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f(.*)"), + Regex: pkgrelabel.MustNewRegexp("f(.*)"), TargetLabel: "d", Separator: ";", Replacement: "ch$1-ch$1", - Action: config.RelabelReplace, + Action: pkgrelabel.Replace, }, }, output: nil, @@ -107,11 +107,11 @@ func TestRelabel(t *testing.T) { "a": "foo", "b": "bar", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp(".*o.*"), - Action: config.RelabelDrop, + Regex: pkgrelabel.MustNewRegexp(".*o.*"), + Action: pkgrelabel.Drop, }, }, output: nil, @@ -120,14 +120,14 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "abc", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp(".*(b).*"), + Regex: pkgrelabel.MustNewRegexp(".*(b).*"), TargetLabel: "d", Separator: ";", Replacement: "$1", - Action: config.RelabelReplace, + Action: pkgrelabel.Replace, }, }, output: model.LabelSet{ @@ -139,11 +139,11 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "foo", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("no-match"), - Action: config.RelabelDrop, + Regex: pkgrelabel.MustNewRegexp("no-match"), + Action: pkgrelabel.Drop, }, }, output: model.LabelSet{ @@ -154,11 +154,11 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "foo", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f|o"), - Action: config.RelabelDrop, + Regex: pkgrelabel.MustNewRegexp("f|o"), + Action: pkgrelabel.Drop, }, }, output: model.LabelSet{ @@ -169,11 +169,11 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "foo", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("no-match"), - Action: config.RelabelKeep, + Regex: pkgrelabel.MustNewRegexp("no-match"), + Action: pkgrelabel.Keep, }, }, output: nil, @@ -182,11 +182,11 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "foo", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f.*"), - Action: config.RelabelKeep, + Regex: pkgrelabel.MustNewRegexp("f.*"), + Action: pkgrelabel.Keep, }, }, output: model.LabelSet{ @@ -198,13 +198,13 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "boo", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("f"), + Regex: pkgrelabel.MustNewRegexp("f"), TargetLabel: "b", Replacement: "bar", - Action: config.RelabelReplace, + Action: pkgrelabel.Replace, }, }, output: model.LabelSet{ @@ -217,12 +217,12 @@ func TestRelabel(t *testing.T) { "b": "bar", "c": "baz", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"c"}, TargetLabel: "d", Separator: ";", - Action: config.RelabelHashMod, + Action: pkgrelabel.HashMod, Modulus: 1000, }, }, @@ -239,11 +239,11 @@ func TestRelabel(t *testing.T) { "b1": "bar", "b2": "baz", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { - Regex: config.MustNewRegexp("(b.*)"), + Regex: pkgrelabel.MustNewRegexp("(b.*)"), Replacement: "bar_${1}", - Action: config.RelabelLabelMap, + Action: pkgrelabel.LabelMap, }, }, output: model.LabelSet{ @@ -261,11 +261,11 @@ func TestRelabel(t *testing.T) { "__meta_my_baz": "bbb", "__meta_other": "ccc", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { - Regex: config.MustNewRegexp("__meta_(my.*)"), + Regex: pkgrelabel.MustNewRegexp("__meta_(my.*)"), Replacement: "${1}", - Action: config.RelabelLabelMap, + Action: pkgrelabel.LabelMap, }, }, output: model.LabelSet{ @@ -281,11 +281,11 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "some-name-value", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: pkgrelabel.Replace, Replacement: "${2}", TargetLabel: "${1}", }, @@ -299,11 +299,11 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "some-name-value", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: pkgrelabel.Replace, Replacement: "${3}", TargetLabel: "${1}", }, @@ -316,25 +316,25 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "a": "some-name-value", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: pkgrelabel.Replace, Replacement: "${1}", TargetLabel: "${3}", }, { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: pkgrelabel.Replace, Replacement: "${1}", TargetLabel: "0${3}", }, { SourceLabels: model.LabelNames{"a"}, - Regex: config.MustNewRegexp("some-([^-]+)-([^,]+)"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("some-([^-]+)-([^,]+)"), + Action: pkgrelabel.Replace, Replacement: "${1}", TargetLabel: "-${3}", }, @@ -347,25 +347,25 @@ func TestRelabel(t *testing.T) { input: model.LabelSet{ "__meta_sd_tags": "path:/secret,job:some-job,label:foo=bar", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { SourceLabels: model.LabelNames{"__meta_sd_tags"}, - Regex: config.MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"), + Action: pkgrelabel.Replace, Replacement: "${1}", TargetLabel: "__metrics_path__", }, { SourceLabels: model.LabelNames{"__meta_sd_tags"}, - Regex: config.MustNewRegexp("(?:.+,|^)job:([^,]+).*"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("(?:.+,|^)job:([^,]+).*"), + Action: pkgrelabel.Replace, Replacement: "${1}", TargetLabel: "job", }, { SourceLabels: model.LabelNames{"__meta_sd_tags"}, - Regex: config.MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"), - Action: config.RelabelReplace, + Regex: pkgrelabel.MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"), + Action: pkgrelabel.Replace, Replacement: "${2}", TargetLabel: "${1}", }, @@ -383,10 +383,10 @@ func TestRelabel(t *testing.T) { "b1": "bar", "b2": "baz", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { - Regex: config.MustNewRegexp("(b.*)"), - Action: config.RelabelLabelKeep, + Regex: pkgrelabel.MustNewRegexp("(b.*)"), + Action: pkgrelabel.LabelKeep, }, }, output: model.LabelSet{ @@ -400,10 +400,10 @@ func TestRelabel(t *testing.T) { "b1": "bar", "b2": "baz", }, - relabel: []*config.RelabelConfig{ + relabel: []*pkgrelabel.Config{ { - Regex: config.MustNewRegexp("(b.*)"), - Action: config.RelabelLabelDrop, + Regex: pkgrelabel.MustNewRegexp("(b.*)"), + Action: pkgrelabel.LabelDrop, }, }, output: model.LabelSet{ diff --git a/rules/alerting.go b/rules/alerting.go index 8a16001f84..e7cec6666e 100644 --- a/rules/alerting.go +++ b/rules/alerting.go @@ -495,22 +495,22 @@ func (r *AlertingRule) HTMLSnippet(pathPrefix string) html_template.HTML { alertNameLabel: model.LabelValue(r.name), } - labels := make(map[string]string, len(r.labels)) + labelsMap := make(map[string]string, len(r.labels)) for _, l := range r.labels { - labels[l.Name] = html_template.HTMLEscapeString(l.Value) + labelsMap[l.Name] = html_template.HTMLEscapeString(l.Value) } - annotations := make(map[string]string, len(r.annotations)) + annotationsMap := make(map[string]string, len(r.annotations)) for _, l := range r.annotations { - annotations[l.Name] = html_template.HTMLEscapeString(l.Value) + annotationsMap[l.Name] = html_template.HTMLEscapeString(l.Value) } ar := rulefmt.Rule{ Alert: fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(alertMetric.String()), r.name), Expr: fmt.Sprintf("%s", pathPrefix+strutil.TableLinkForExpression(r.vector.String()), html_template.HTMLEscapeString(r.vector.String())), For: model.Duration(r.holdDuration), - Labels: labels, - Annotations: annotations, + Labels: labelsMap, + Annotations: annotationsMap, } byt, err := yaml.Marshal(ar) diff --git a/scrape/manager_test.go b/scrape/manager_test.go index 8a2cd1d9e9..fa0765785a 100644 --- a/scrape/manager_test.go +++ b/scrape/manager_test.go @@ -19,6 +19,8 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -28,14 +30,6 @@ import ( yaml "gopkg.in/yaml.v2" ) -func mustNewRegexp(s string) config.Regexp { - re, err := config.NewRegexp(s) - if err != nil { - panic(err) - } - return re -} - func TestPopulateLabels(t *testing.T) { cases := []struct { in labels.Labels @@ -144,10 +138,10 @@ func TestPopulateLabels(t *testing.T) { Scheme: "https", MetricsPath: "/metrics", JobName: "job", - RelabelConfigs: []*config.RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { - Action: config.RelabelReplace, - Regex: mustNewRegexp("(.*)"), + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), SourceLabels: model.LabelNames{"custom"}, Replacement: "${1}", TargetLabel: string(model.AddressLabel), @@ -176,10 +170,10 @@ func TestPopulateLabels(t *testing.T) { Scheme: "https", MetricsPath: "/metrics", JobName: "job", - RelabelConfigs: []*config.RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { - Action: config.RelabelReplace, - Regex: mustNewRegexp("(.*)"), + Action: relabel.Replace, + Regex: relabel.MustNewRegexp("(.*)"), SourceLabels: model.LabelNames{"custom"}, Replacement: "${1}", TargetLabel: string(model.AddressLabel), @@ -249,7 +243,7 @@ scrape_configs: scrapeManager.ApplyConfig(cfg) // As reload never happens, new loop should never be called. - newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*config.RelabelConfig) loop { + newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*relabel.Config) loop { t.Fatal("reload happened") return nil } diff --git a/scrape/scrape.go b/scrape/scrape.go index 7fcc41d364..5c8650dcb0 100644 --- a/scrape/scrape.go +++ b/scrape/scrape.go @@ -129,7 +129,7 @@ type scrapePool struct { cancel context.CancelFunc // Constructor for new scrape loops. This is settable for testing convenience. - newLoop func(*Target, scraper, int, bool, []*config.RelabelConfig) loop + newLoop func(*Target, scraper, int, bool, []*relabel.Config) loop } const maxAheadTime = 10 * time.Minute @@ -159,7 +159,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app Appendable, logger log.Logger) loops: map[uint64]loop{}, logger: logger, } - sp.newLoop = func(t *Target, s scraper, limit int, honor bool, mrc []*config.RelabelConfig) loop { + sp.newLoop = func(t *Target, s scraper, limit int, honor bool, mrc []*relabel.Config) loop { // Update the targets retrieval function for metadata to a new scrape cache. cache := newScrapeCache() t.setMetadataStore(cache) @@ -366,7 +366,7 @@ func (sp *scrapePool) sync(targets []*Target) { wg.Wait() } -func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*config.RelabelConfig) labels.Labels { +func mutateSampleLabels(lset labels.Labels, target *Target, honor bool, rc []*relabel.Config) labels.Labels { lb := labels.NewBuilder(lset) if honor { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 29cad05277..35e4eeecd1 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -29,6 +29,8 @@ import ( "testing" "time" + "github.com/prometheus/prometheus/pkg/relabel" + "github.com/prometheus/common/model" "github.com/stretchr/testify/require" @@ -68,10 +70,10 @@ func TestDroppedTargetsList(t *testing.T) { cfg = &config.ScrapeConfig{ JobName: "dropMe", ScrapeInterval: model.Duration(1), - RelabelConfigs: []*config.RelabelConfig{ + RelabelConfigs: []*relabel.Config{ { - Action: config.RelabelDrop, - Regex: mustNewRegexp("dropMe"), + Action: relabel.Drop, + Regex: relabel.MustNewRegexp("dropMe"), SourceLabels: model.LabelNames{"job"}, }, }, @@ -219,7 +221,7 @@ func TestScrapePoolReload(t *testing.T) { } // On starting to run, new loops created on reload check whether their preceding // equivalents have been stopped. - newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*config.RelabelConfig) loop { + newLoop := func(_ *Target, s scraper, _ int, _ bool, _ []*relabel.Config) loop { l := &testLoop{} l.startFunc = func(interval, timeout time.Duration, errc chan<- error) { if interval != 3*time.Second { diff --git a/storage/buffer.go b/storage/buffer.go index b2b97967c5..1b1d09bf59 100644 --- a/storage/buffer.go +++ b/storage/buffer.go @@ -59,11 +59,7 @@ func (b *BufferedSeriesIterator) Reset(it SeriesIterator) { // ReduceDelta lowers the buffered time delta, for the current SeriesIterator only. func (b *BufferedSeriesIterator) ReduceDelta(delta int64) bool { - if delta > b.buf.delta { - return false - } - b.buf.delta = delta - return true + return b.buf.reduceDelta(delta) } // PeekBack returns the nth previous element of the iterator. If there is none buffered, @@ -222,7 +218,8 @@ func (r *sampleRing) add(t int64, v float64) { r.l++ // Free head of the buffer of samples that just fell out of the range. - for r.buf[r.f].t < t-r.delta { + tmin := t - r.delta + for r.buf[r.f].t < tmin { r.f++ if r.f >= l { r.f -= l @@ -231,6 +228,31 @@ func (r *sampleRing) add(t int64, v float64) { } } +// reduceDelta lowers the buffered time delta, dropping any samples that are +// out of the new delta range. +func (r *sampleRing) reduceDelta(delta int64) bool { + if delta > r.delta { + return false + } + r.delta = delta + + if r.l == 0 { + return true + } + + // Free head of the buffer of samples that just fell out of the range. + l := len(r.buf) + tmin := r.buf[r.i].t - delta + for r.buf[r.f].t < tmin { + r.f++ + if r.f >= l { + r.f -= l + } + r.l-- + } + return true +} + // nthLast returns the nth most recent element added to the ring. func (r *sampleRing) nthLast(n int) (int64, float64, bool) { if n > r.l { diff --git a/storage/buffer_test.go b/storage/buffer_test.go index 237756f41c..da4376fa90 100644 --- a/storage/buffer_test.go +++ b/storage/buffer_test.go @@ -163,21 +163,8 @@ func TestBufferedSeriesIteratorNoBadAt(t *testing.T) { } func BenchmarkBufferedSeriesIterator(b *testing.B) { - var ( - samples []sample - lastT int64 - ) - for i := 0; i < b.N; i++ { - lastT += 30 - - samples = append(samples, sample{ - t: lastT, - v: 123, // doesn't matter - }) - } - // Simulate a 5 minute rate. - it := NewBufferIterator(newListSeriesIterator(samples), 5*60) + it := NewBufferIterator(newFakeSeriesIterator(int64(b.N), 30), 5*60) b.SetBytes(int64(b.N * 16)) b.ReportAllocs() @@ -255,3 +242,31 @@ func (it *listSeriesIterator) Seek(t int64) bool { func (it *listSeriesIterator) Err() error { return nil } + +type fakeSeriesIterator struct { + nsamples int64 + step int64 + idx int64 +} + +func newFakeSeriesIterator(nsamples, step int64) *fakeSeriesIterator { + return &fakeSeriesIterator{nsamples: nsamples, step: step, idx: -1} +} + +func (it *fakeSeriesIterator) At() (int64, float64) { + return it.idx * it.step, 123 // value doesn't matter +} + +func (it *fakeSeriesIterator) Next() bool { + it.idx++ + return it.idx < it.nsamples +} + +func (it *fakeSeriesIterator) Seek(t int64) bool { + it.idx = t / it.step + return it.idx < it.nsamples +} + +func (it *fakeSeriesIterator) Err() error { + return nil +} diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 50d1a05635..4c53683826 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" + pkgrelabel "github.com/prometheus/prometheus/pkg/relabel" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/relabel" ) @@ -144,7 +145,7 @@ type QueueManager struct { flushDeadline time.Duration cfg config.QueueConfig externalLabels model.LabelSet - relabelConfigs []*config.RelabelConfig + relabelConfigs []*pkgrelabel.Config client StorageClient queueName string logLimiter *rate.Limiter @@ -161,7 +162,7 @@ type QueueManager struct { } // NewQueueManager builds a new QueueManager. -func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels model.LabelSet, relabelConfigs []*config.RelabelConfig, client StorageClient, flushDeadline time.Duration) *QueueManager { +func NewQueueManager(logger log.Logger, cfg config.QueueConfig, externalLabels model.LabelSet, relabelConfigs []*pkgrelabel.Config, client StorageClient, flushDeadline time.Duration) *QueueManager { if logger == nil { logger = log.NewNopLogger() } else { diff --git a/vendor/github.com/prometheus/tsdb/.travis.yml b/vendor/github.com/prometheus/tsdb/.travis.yml index 78fe6858fc..c03d14f533 100644 --- a/vendor/github.com/prometheus/tsdb/.travis.yml +++ b/vendor/github.com/prometheus/tsdb/.travis.yml @@ -2,6 +2,10 @@ sudo: required dist: trusty language: go +os: + - windows + - linux + - osx go: - 1.10.x @@ -9,9 +13,12 @@ go: go_import_path: github.com/prometheus/tsdb +before_install: + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install make; fi + install: - go get -v -t ./... script: # `staticcheck` target is omitted due to linting errors - - make check_license style unused test + - if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then make test; else make check_license style unused test; fi diff --git a/vendor/github.com/prometheus/tsdb/CHANGELOG.md b/vendor/github.com/prometheus/tsdb/CHANGELOG.md index d220de4ce2..1f2abce09e 100644 --- a/vendor/github.com/prometheus/tsdb/CHANGELOG.md +++ b/vendor/github.com/prometheus/tsdb/CHANGELOG.md @@ -1,5 +1,9 @@ ## master / unreleased + +## 0.3.1 +- [BUGFIX] Fixed most windows test and some actual bugs for unclosed file readers. + ## 0.3.0 - [CHANGE] `LastCheckpoint()` used to return just the segment name and now it returns the full relative path. diff --git a/vendor/github.com/prometheus/tsdb/head.go b/vendor/github.com/prometheus/tsdb/head.go index 52a3dfe46e..4d917291a4 100644 --- a/vendor/github.com/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/tsdb/head.go @@ -473,14 +473,13 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { return errors.Wrap(err, "open WAL segments") } - defer sr.Close() err = h.loadWAL(wal.NewReader(sr)) + sr.Close() // Close the reader so that if there was an error the repair can remove the corrupted file under Windows. if err == nil { return nil } level.Warn(h.logger).Log("msg", "encountered WAL error, attempting repair", "err", err) - if err := h.wal.Repair(err); err != nil { return errors.Wrap(err, "repair corrupted WAL") } @@ -572,7 +571,7 @@ func (h *Head) Truncate(mint int64) (err error) { } // initTime initializes a head with the first timestamp. This only needs to be called -// for a compltely fresh head with an empty WAL. +// for a completely fresh head with an empty WAL. // Returns true if the initialization took an effect. func (h *Head) initTime(t int64) (initialized bool) { if !atomic.CompareAndSwapInt64(&h.minTime, math.MaxInt64, t) { diff --git a/vendor/github.com/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/tsdb/repair.go index fd40cbb5a9..15f79d5f78 100644 --- a/vendor/github.com/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/tsdb/repair.go @@ -71,7 +71,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { if _, err := io.Copy(repl, broken); err != nil { return wrapErr(err, d) } - // Set the 5th byte to 2 to indiciate the correct file format version. + // Set the 5th byte to 2 to indicate the correct file format version. if _, err := repl.WriteAt([]byte{2}, 4); err != nil { return wrapErr(err, d) } diff --git a/vendor/github.com/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/tsdb/wal.go index 28217639d0..684a2fa6a9 100644 --- a/vendor/github.com/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/tsdb/wal.go @@ -322,7 +322,7 @@ func (w *SegmentWAL) putBuffer(b *encbuf) { } // Truncate deletes the values prior to mint and the series which the keep function -// does not indiciate to preserve. +// does not indicate to preserve. func (w *SegmentWAL) Truncate(mint int64, keep func(uint64) bool) error { // The last segment is always active. if len(w.files) < 2 { diff --git a/vendor/github.com/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/tsdb/wal/wal.go index 2ed2018c78..5134850fe1 100644 --- a/vendor/github.com/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/tsdb/wal/wal.go @@ -298,9 +298,6 @@ func (w *WAL) Repair(origErr error) error { level.Warn(w.logger).Log("msg", "deleting all segments behind corruption", "segment", cerr.Segment) for _, s := range segs { - if s.index <= cerr.Segment { - continue - } if w.segment.i == s.index { // The active segment needs to be removed, // close it first (Windows!). Can be closed safely @@ -310,6 +307,9 @@ func (w *WAL) Repair(origErr error) error { return errors.Wrap(err, "close active segment") } } + if s.index <= cerr.Segment { + continue + } if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil { return errors.Wrapf(err, "delete segment:%v", s.index) } diff --git a/vendor/modules.txt b/vendor/modules.txt index d89c38845e..948d30fb06 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -187,7 +187,7 @@ github.com/prometheus/procfs github.com/prometheus/procfs/nfs github.com/prometheus/procfs/xfs github.com/prometheus/procfs/internal/util -# github.com/prometheus/tsdb v0.3.0 +# github.com/prometheus/tsdb v0.3.1 github.com/prometheus/tsdb github.com/prometheus/tsdb/labels github.com/prometheus/tsdb/chunkenc @@ -228,8 +228,8 @@ golang.org/x/net/internal/socket golang.org/x/net/idna golang.org/x/net/lex/httplex # golang.org/x/oauth2 v0.0.0-20160608215109-65a8d08c6292 -golang.org/x/oauth2 golang.org/x/oauth2/google +golang.org/x/oauth2 golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt @@ -251,12 +251,12 @@ google.golang.org/api/gensupport google.golang.org/api/googleapi google.golang.org/api/googleapi/internal/uritemplates # google.golang.org/appengine v0.0.0-20170522224838-a2f4131514e5 -google.golang.org/appengine/urlfetch google.golang.org/appengine +google.golang.org/appengine/urlfetch google.golang.org/appengine/internal -google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/app_identity google.golang.org/appengine/internal/modules +google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log diff --git a/web/api/v2/api.go b/web/api/v2/api.go index 9799875a37..fd284c9695 100644 --- a/web/api/v2/api.go +++ b/web/api/v2/api.go @@ -139,7 +139,7 @@ func (s *AdminDisabled) TSDBCleanTombstones(_ old_ctx.Context, _ *pb.TSDBCleanTo return nil, status.Error(codes.Unavailable, "Admin APIs are disabled") } -// DeleteSeries imeplements pb.AdminServer. +// DeleteSeries implements pb.AdminServer. func (s *AdminDisabled) DeleteSeries(_ old_ctx.Context, r *pb.SeriesDeleteRequest) (*pb.SeriesDeleteResponse, error) { return nil, status.Error(codes.Unavailable, "Admin APIs are disabled") }