diff --git a/.golangci.yml b/.golangci.yml index dd87e3fedb..37b488f812 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -164,31 +164,9 @@ linters: - name: unused-parameter - name: var-declaration - name: var-naming - staticcheck: - checks: - - all # Enable all checks. - # FIXME: We should enable this check once we have fixed all the issues. - - -QF1001 - - -QF1002 - - -QF1003 - - -QF1006 - - -QF1007 - - -QF1008 - - -QF1009 - - -QF1010 - - -QF1012 - - -ST1000 - - -ST1003 - - -ST1005 - - -ST1012 - - -ST1016 - - -ST1020 testifylint: disable: - - empty # FIXME - - equal-values # FIXME - float-compare - - formatter # FIXME - go-require - len # FIXME - useless-assert # FIXME: wait for golangci-lint > v2.0.2 diff --git a/cmd/prometheus/main_test.go b/cmd/prometheus/main_test.go index 0d0ab56eb4..5bb72dd2c2 100644 --- a/cmd/prometheus/main_test.go +++ b/cmd/prometheus/main_test.go @@ -268,7 +268,7 @@ func TestWALSegmentSizeBounds(t *testing.T) { go func() { done <- prom.Wait() }() select { case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) + t.Fatalf("prometheus should be still running: %v", err) case <-time.After(startupTime): prom.Process.Kill() <-done @@ -332,7 +332,7 @@ func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) { go func() { done <- prom.Wait() }() select { case err := <-done: - require.Fail(t, "prometheus should be still running: %v", err) + t.Fatalf("prometheus should be still running: %v", err) case <-time.After(startupTime): prom.Process.Kill() <-done diff --git a/cmd/prometheus/query_log_test.go b/cmd/prometheus/query_log_test.go index 25abf5e965..c63be02d4d 100644 --- a/cmd/prometheus/query_log_test.go +++ b/cmd/prometheus/query_log_test.go @@ -88,7 +88,7 @@ func (p *queryLogTest) setQueryLog(t *testing.T, queryLogFile string) { _, err = p.configFile.Seek(0, 0) require.NoError(t, err) if queryLogFile != "" { - _, err = p.configFile.Write([]byte(fmt.Sprintf("global:\n query_log_file: %s\n", queryLogFile))) + _, err = fmt.Fprintf(p.configFile, "global:\n query_log_file: %s\n", queryLogFile) require.NoError(t, err) } _, err = p.configFile.Write([]byte(p.configuration())) diff --git a/cmd/promtool/main_test.go b/cmd/promtool/main_test.go index d1390f0d67..f922d18c4e 100644 --- a/cmd/promtool/main_test.go +++ b/cmd/promtool/main_test.go @@ -510,7 +510,7 @@ func TestCheckRules(t *testing.T) { os.Stdin = r exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) - require.Equal(t, successExitCode, exitCode, "") + require.Equal(t, successExitCode, exitCode) }) t.Run("rules-bad", func(t *testing.T) { @@ -532,7 +532,7 @@ func TestCheckRules(t *testing.T) { os.Stdin = r exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false)) - require.Equal(t, failureExitCode, exitCode, "") + require.Equal(t, failureExitCode, exitCode) }) t.Run("rules-lint-fatal", func(t *testing.T) { @@ -554,7 +554,7 @@ func TestCheckRules(t *testing.T) { os.Stdin = r exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false)) - require.Equal(t, lintErrExitCode, exitCode, "") + require.Equal(t, lintErrExitCode, exitCode) }) } @@ -572,19 +572,19 @@ func TestCheckRulesWithRuleFiles(t *testing.T) { t.Run("rules-good", func(t *testing.T) { t.Parallel() exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules.yml") - require.Equal(t, successExitCode, exitCode, "") + require.Equal(t, successExitCode, exitCode) }) t.Run("rules-bad", func(t *testing.T) { t.Parallel() exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, false, false), "./testdata/rules-bad.yml") - require.Equal(t, failureExitCode, exitCode, "") + require.Equal(t, failureExitCode, exitCode) }) t.Run("rules-lint-fatal", func(t *testing.T) { t.Parallel() exitCode := CheckRules(newRulesLintConfig(lintOptionDuplicateRules, true, false), "./testdata/prometheus-rules.lint.yml") - require.Equal(t, lintErrExitCode, exitCode, "") + require.Equal(t, lintErrExitCode, exitCode) }) } diff --git a/cmd/promtool/unittest.go b/cmd/promtool/unittest.go index 9bc1af1f61..4910a0b1a6 100644 --- a/cmd/promtool/unittest.go +++ b/cmd/promtool/unittest.go @@ -321,12 +321,8 @@ func (tg *testGroup) test(testname string, evalInterval time.Duration, groupOrde return errs } - for { - if !(curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) && - time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint)) { - break - } - + for curr < len(alertEvalTimes) && ts.Sub(mint) <= time.Duration(alertEvalTimes[curr]) && + time.Duration(alertEvalTimes[curr]) < ts.Add(evalInterval).Sub(mint) { // We need to check alerts for this time. // If 'ts <= `eval_time=alertEvalTimes[curr]` < ts+evalInterval' // then we compare alerts with the Eval at `ts`. diff --git a/discovery/aws/lightsail.go b/discovery/aws/lightsail.go index fb249b8256..ff1059ede0 100644 --- a/discovery/aws/lightsail.go +++ b/discovery/aws/lightsail.go @@ -115,6 +115,7 @@ func (c *LightsailSDConfig) UnmarshalYAML(unmarshal func(interface{}) error) err region, err := metadata.Region() if err != nil { + //nolint:staticcheck // Capitalized first word. return errors.New("Lightsail SD configuration requires a region") } c.Region = region diff --git a/discovery/consul/consul_test.go b/discovery/consul/consul_test.go index ba3f63ccb5..ea896ce31b 100644 --- a/discovery/consul/consul_test.go +++ b/discovery/consul/consul_test.go @@ -425,14 +425,14 @@ func TestGetDatacenterShouldReturnError(t *testing.T) { d := newDiscovery(t, config) // Should be empty if not initialized. - require.Equal(t, "", d.clientDatacenter) + require.Empty(t, d.clientDatacenter) err = d.getDatacenter() // An error should be returned. require.EqualError(t, err, tc.errMessage) // Should still be empty. - require.Equal(t, "", d.clientDatacenter) + require.Empty(t, d.clientDatacenter) } } diff --git a/discovery/kubernetes/pod.go b/discovery/kubernetes/pod.go index 8704a66239..169c6a78a1 100644 --- a/discovery/kubernetes/pod.go +++ b/discovery/kubernetes/pod.go @@ -219,7 +219,7 @@ func podLabels(pod *apiv1.Pod) model.LabelSet { podPhaseLabel: lv(string(pod.Status.Phase)), podNodeNameLabel: lv(pod.Spec.NodeName), podHostIPLabel: lv(pod.Status.HostIP), - podUID: lv(string(pod.ObjectMeta.UID)), + podUID: lv(string(pod.UID)), } addObjectMetaLabels(ls, pod.ObjectMeta, RolePod) diff --git a/discovery/linode/linode.go b/discovery/linode/linode.go index 453901bc05..033025f840 100644 --- a/discovery/linode/linode.go +++ b/discovery/linode/linode.go @@ -194,7 +194,7 @@ func (d *Discovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) { events, err := d.client.ListEvents(ctx, &eventsOpts) if err != nil { var e *linodego.Error - if !(errors.As(err, &e) && e.Code == http.StatusUnauthorized) { + if !errors.As(err, &e) || e.Code != http.StatusUnauthorized { return nil, err } // If we get a 401, the token doesn't have `events:read_only` scope. diff --git a/discovery/manager_test.go b/discovery/manager_test.go index 0ff82d5415..1dd10baf47 100644 --- a/discovery/manager_test.go +++ b/discovery/manager_test.go @@ -695,7 +695,7 @@ func TestTargetUpdatesOrder(t *testing.T) { for x := 0; x < totalUpdatesCount; x++ { select { case <-ctx.Done(): - require.FailNow(t, "%d: no update arrived within the timeout limit", x) + t.Fatalf("%d: no update arrived within the timeout limit", x) case tgs := <-provUpdates: discoveryManager.updateGroup(poolKey{setName: strconv.Itoa(i), provider: tc.title}, tgs) for _, got := range discoveryManager.allGroups() { @@ -769,12 +769,10 @@ func verifyPresence(t *testing.T, tSets map[poolKey]map[string]*targetgroup.Grou } } } - if match != present { - msg := "" - if !present { - msg = "not" - } - require.FailNow(t, "%q should %s be present in Targets labels: %q", label, msg, mergedTargets) + if present { + require.Truef(t, match, "%q must be present in Targets labels: %q", label, mergedTargets) + } else { + require.Falsef(t, match, "%q must be absent in Targets labels: %q", label, mergedTargets) } } @@ -1091,9 +1089,9 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) { targetGroups, ok := discoveryManager.targets[p] require.True(t, ok, "'%v' should be present in targets", p) // Otherwise the targetGroups will leak, see https://github.com/prometheus/prometheus/issues/12436. - require.Empty(t, targetGroups, 0, "'%v' should no longer have any associated target groups", p) + require.Empty(t, targetGroups, "'%v' should no longer have any associated target groups", p) require.Len(t, syncedTargets, 1, "an update with no targetGroups should still be sent.") - require.Empty(t, syncedTargets["prometheus"], 0) + require.Empty(t, syncedTargets["prometheus"]) } func TestIdenticalConfigurationsAreCoalesced(t *testing.T) { @@ -1373,10 +1371,10 @@ func TestCoordinationWithReceiver(t *testing.T) { time.Sleep(expected.delay) select { case <-ctx.Done(): - require.FailNow(t, "step %d: no update received in the expected timeframe", i) + t.Fatalf("step %d: no update received in the expected timeframe", i) case tgs, ok := <-mgr.SyncCh(): require.True(t, ok, "step %d: discovery manager channel is closed", i) - require.Equal(t, len(expected.tgs), len(tgs), "step %d: targets mismatch", i) + require.Len(t, tgs, len(expected.tgs), "step %d: targets mismatch", i) for k := range expected.tgs { _, ok := tgs[k] diff --git a/discovery/marathon/marathon_test.go b/discovery/marathon/marathon_test.go index 61d8ef900d..18ec7bdf19 100644 --- a/discovery/marathon/marathon_test.go +++ b/discovery/marathon/marathon_test.go @@ -202,7 +202,7 @@ func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) { tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port: %s", tgt[model.AddressLabel]) } @@ -300,9 +300,9 @@ func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:1234", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] @@ -354,12 +354,12 @@ func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") require.Equal(t, "yes", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } @@ -401,13 +401,13 @@ func TestMarathonSDSendGroupWithPorts(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -458,12 +458,12 @@ func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:32000", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithDockerContainerPortMappings(labels map[string]string, runningTasks int) *appList { @@ -514,12 +514,12 @@ func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "mesos-slave1:31000", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "mesos-slave1:12345", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } func marathonTestAppListWithContainerNetworkAndPortMappings(labels map[string]string, runningTasks int) *appList { @@ -574,10 +574,10 @@ func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) { tgt := tg.Targets[0] require.Equal(t, "1.2.3.4:8080", string(tgt[model.AddressLabel]), "Wrong target address.") require.Equal(t, "yes", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the first port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the first port.") tgt = tg.Targets[1] require.Equal(t, "1.2.3.4:1234", string(tgt[model.AddressLabel]), "Wrong target address.") - require.Equal(t, "", string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") - require.Equal(t, "", string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portMappingLabelPrefix+"prometheus")]), "Wrong portMappings label from the second port.") + require.Empty(t, string(tgt[model.LabelName(portDefinitionLabelPrefix+"prometheus")]), "Wrong portDefinitions label from the second port.") } diff --git a/discovery/moby/docker.go b/discovery/moby/docker.go index 53a8b2e135..2b640dea82 100644 --- a/discovery/moby/docker.go +++ b/discovery/moby/docker.go @@ -235,10 +235,7 @@ func (d *DockerDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, er containerNetworkMode := container.NetworkMode(c.HostConfig.NetworkMode) if len(networks) == 0 { // Try to lookup shared networks - for { - if !containerNetworkMode.IsContainer() { - break - } + for containerNetworkMode.IsContainer() { tmpContainer, exists := allContainers[containerNetworkMode.ConnectedContainer()] if !exists { break diff --git a/discovery/scaleway/instance.go b/discovery/scaleway/instance.go index 291809be68..162a75e407 100644 --- a/discovery/scaleway/instance.go +++ b/discovery/scaleway/instance.go @@ -182,9 +182,10 @@ func (d *instanceDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, var ipv6Addresses []string for _, ip := range server.PublicIPs { - if ip.Family == instance.ServerIPIPFamilyInet { + switch ip.Family { + case instance.ServerIPIPFamilyInet: ipv4Addresses = append(ipv4Addresses, ip.Address.String()) - } else if ip.Family == instance.ServerIPIPFamilyInet6 { + case instance.ServerIPIPFamilyInet6: ipv6Addresses = append(ipv6Addresses, ip.Address.String()) } } diff --git a/discovery/uyuni/uyuni.go b/discovery/uyuni/uyuni.go index 11b1888db4..a7745eed46 100644 --- a/discovery/uyuni/uyuni.go +++ b/discovery/uyuni/uyuni.go @@ -141,18 +141,22 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.Server == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires server host") } _, err = url.Parse(c.Server) if err != nil { + //nolint:staticcheck // Capitalized first word. return fmt.Errorf("Uyuni Server URL is not valid: %w", err) } if c.Username == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a username") } if c.Password == "" { + //nolint:staticcheck // Capitalized first word. return errors.New("Uyuni SD configuration requires a password") } return c.HTTPClientConfig.Validate() diff --git a/documentation/examples/remote_storage/remote_storage_adapter/main.go b/documentation/examples/remote_storage/remote_storage_adapter/main.go index 9ea9b8e5f9..ffcbb5385a 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/main.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/main.go @@ -145,7 +145,7 @@ func parseFlags() *config { _, err := a.Parse(os.Args[1:]) if err != nil { - fmt.Fprintln(os.Stderr, fmt.Errorf("Error parsing commandline arguments: %w", err)) + fmt.Fprintf(os.Stderr, "Error parsing commandline arguments: %s", err) a.Usage(os.Args[1:]) os.Exit(2) } diff --git a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go index 99cef0b242..6a691778af 100644 --- a/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go +++ b/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/tagvalue.go @@ -78,7 +78,7 @@ func (tv TagValue) MarshalJSON() ([]byte, error) { case b == ':': result.WriteString("_.") default: - result.WriteString(fmt.Sprintf("_%X", b)) + fmt.Fprintf(result, "_%X", b) } } result.WriteByte('"') diff --git a/model/histogram/float_histogram.go b/model/histogram/float_histogram.go index e5519a56d6..0a2b43951e 100644 --- a/model/histogram/float_histogram.go +++ b/model/histogram/float_histogram.go @@ -1016,7 +1016,7 @@ type floatBucketIterator struct { func (i *floatBucketIterator) At() Bucket[float64] { // Need to use i.targetSchema rather than i.baseBucketIterator.schema. - return i.baseBucketIterator.at(i.targetSchema) + return i.at(i.targetSchema) } func (i *floatBucketIterator) Next() bool { diff --git a/model/labels/labels_test.go b/model/labels/labels_test.go index b7ba71b553..2abc322699 100644 --- a/model/labels/labels_test.go +++ b/model/labels/labels_test.go @@ -513,7 +513,7 @@ func TestLabels_Has(t *testing.T) { } func TestLabels_Get(t *testing.T) { - require.Equal(t, "", FromStrings("aaa", "111", "bbb", "222").Get("foo")) + require.Empty(t, FromStrings("aaa", "111", "bbb", "222").Get("foo")) require.Equal(t, "111", FromStrings("aaaa", "111", "bbb", "222").Get("aaaa")) require.Equal(t, "222", FromStrings("aaaa", "111", "bbb", "222").Get("bbb")) } diff --git a/prompb/io/prometheus/client/decoder.go b/prompb/io/prometheus/client/decoder.go index 0d62f1f7cf..2f11d278ce 100644 --- a/prompb/io/prometheus/client/decoder.go +++ b/prompb/io/prometheus/client/decoder.go @@ -81,7 +81,7 @@ func (m *MetricStreamingDecoder) NextMetricFamily() error { m.mfData = b[varIntLength:totalLength] m.inPos += totalLength - return m.MetricFamily.unmarshalWithoutMetrics(m, m.mfData) + return m.unmarshalWithoutMetrics(m, m.mfData) } // resetMetricFamily resets all the fields in m to equal the zero value, but re-using slice memory. @@ -98,7 +98,7 @@ func (m *MetricStreamingDecoder) NextMetric() error { m.resetMetric() m.mData = m.mfData[m.metrics[m.metricIndex].start:m.metrics[m.metricIndex].end] - if err := m.Metric.unmarshalWithoutLabels(m, m.mData); err != nil { + if err := m.unmarshalWithoutLabels(m, m.mData); err != nil { return err } m.metricIndex++ @@ -111,37 +111,37 @@ func (m *MetricStreamingDecoder) resetMetric() { m.TimestampMs = 0 // TODO(bwplotka): Autogenerate reset functions. - if m.Metric.Counter != nil { - m.Metric.Counter.Value = 0 - m.Metric.Counter.CreatedTimestamp = nil - m.Metric.Counter.Exemplar = nil + if m.Counter != nil { + m.Counter.Value = 0 + m.Counter.CreatedTimestamp = nil + m.Counter.Exemplar = nil } - if m.Metric.Gauge != nil { - m.Metric.Gauge.Value = 0 + if m.Gauge != nil { + m.Gauge.Value = 0 } - if m.Metric.Histogram != nil { - m.Metric.Histogram.SampleCount = 0 - m.Metric.Histogram.SampleCountFloat = 0 - m.Metric.Histogram.SampleSum = 0 - m.Metric.Histogram.Bucket = m.Metric.Histogram.Bucket[:0] - m.Metric.Histogram.CreatedTimestamp = nil - m.Metric.Histogram.Schema = 0 - m.Metric.Histogram.ZeroThreshold = 0 - m.Metric.Histogram.ZeroCount = 0 - m.Metric.Histogram.ZeroCountFloat = 0 - m.Metric.Histogram.NegativeSpan = m.Metric.Histogram.NegativeSpan[:0] - m.Metric.Histogram.NegativeDelta = m.Metric.Histogram.NegativeDelta[:0] - m.Metric.Histogram.NegativeCount = m.Metric.Histogram.NegativeCount[:0] - m.Metric.Histogram.PositiveSpan = m.Metric.Histogram.PositiveSpan[:0] - m.Metric.Histogram.PositiveDelta = m.Metric.Histogram.PositiveDelta[:0] - m.Metric.Histogram.PositiveCount = m.Metric.Histogram.PositiveCount[:0] - m.Metric.Histogram.Exemplars = m.Metric.Histogram.Exemplars[:0] + if m.Histogram != nil { + m.Histogram.SampleCount = 0 + m.Histogram.SampleCountFloat = 0 + m.Histogram.SampleSum = 0 + m.Histogram.Bucket = m.Histogram.Bucket[:0] + m.Histogram.CreatedTimestamp = nil + m.Histogram.Schema = 0 + m.Histogram.ZeroThreshold = 0 + m.Histogram.ZeroCount = 0 + m.Histogram.ZeroCountFloat = 0 + m.Histogram.NegativeSpan = m.Histogram.NegativeSpan[:0] + m.Histogram.NegativeDelta = m.Histogram.NegativeDelta[:0] + m.Histogram.NegativeCount = m.Histogram.NegativeCount[:0] + m.Histogram.PositiveSpan = m.Histogram.PositiveSpan[:0] + m.Histogram.PositiveDelta = m.Histogram.PositiveDelta[:0] + m.Histogram.PositiveCount = m.Histogram.PositiveCount[:0] + m.Histogram.Exemplars = m.Histogram.Exemplars[:0] } - if m.Metric.Summary != nil { - m.Metric.Summary.SampleCount = 0 - m.Metric.Summary.SampleSum = 0 - m.Metric.Summary.Quantile = m.Metric.Summary.Quantile[:0] - m.Metric.Summary.CreatedTimestamp = nil + if m.Summary != nil { + m.Summary.SampleCount = 0 + m.Summary.SampleSum = 0 + m.Summary.Quantile = m.Summary.Quantile[:0] + m.Summary.CreatedTimestamp = nil } } diff --git a/prompb/rwcommon/codec_test.go b/prompb/rwcommon/codec_test.go index 2ab95e0d19..b91355c51c 100644 --- a/prompb/rwcommon/codec_test.go +++ b/prompb/rwcommon/codec_test.go @@ -135,12 +135,12 @@ func TestToMetadata(t *testing.T) { func TestToHistogram_Empty(t *testing.T) { t.Run("v1", func(t *testing.T) { - require.NotNilf(t, prompb.Histogram{}.ToIntHistogram(), "") - require.NotNilf(t, prompb.Histogram{}.ToFloatHistogram(), "") + require.NotNil(t, prompb.Histogram{}.ToIntHistogram()) + require.NotNil(t, prompb.Histogram{}.ToFloatHistogram()) }) t.Run("v2", func(t *testing.T) { - require.NotNilf(t, writev2.Histogram{}.ToIntHistogram(), "") - require.NotNilf(t, writev2.Histogram{}.ToFloatHistogram(), "") + require.NotNil(t, writev2.Histogram{}.ToIntHistogram()) + require.NotNil(t, writev2.Histogram{}.ToFloatHistogram()) }) } diff --git a/promql/bench_test.go b/promql/bench_test.go index 943baceecb..9741a02102 100644 --- a/promql/bench_test.go +++ b/promql/bench_test.go @@ -89,8 +89,8 @@ func setupRangeQueryTestData(stor *teststorage.TestStorage, _ *promql.Engine, in } } - stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. - stor.DB.Compact(ctx) + stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.Compact(ctx) return nil } @@ -269,7 +269,7 @@ func rangeQueryCases() []benchCase { func BenchmarkRangeQuery(b *testing.B) { stor := teststorage.New(b) - stor.DB.DisableCompactions() // Don't want auto-compaction disrupting timings. + stor.DisableCompactions() // Don't want auto-compaction disrupting timings. defer stor.Close() opts := promql.EngineOpts{ Logger: nil, @@ -498,8 +498,8 @@ func generateInfoFunctionTestSeries(tb testing.TB, stor *teststorage.TestStorage require.NoError(tb, a.Commit()) } - stor.DB.ForceHeadMMap() // Ensure we have at most one head chunk for every series. - stor.DB.Compact(ctx) + stor.ForceHeadMMap() // Ensure we have at most one head chunk for every series. + stor.Compact(ctx) } func generateNativeHistogramSeries(app storage.Appender, numSeries int) error { diff --git a/promql/engine.go b/promql/engine.go index f1829efdd8..d5a192f8ba 100644 --- a/promql/engine.go +++ b/promql/engine.go @@ -731,7 +731,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr) evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval) // Instant evaluation. This is executed as a range evaluation with one step. - if s.Start == s.End && s.Interval == 0 { + if s.Start.Equal(s.End) && s.Interval == 0 { start := timeMilliseconds(s.Start) evaluator := &evaluator{ startTimestamp: start, diff --git a/promql/parser/lex.go b/promql/parser/lex.go index 66522f59da..3bb74b4ad2 100644 --- a/promql/parser/lex.go +++ b/promql/parser/lex.go @@ -674,10 +674,10 @@ func lexInsideBraces(l *Lexer) stateFn { l.backup() l.emit(EQL) case r == '!': - switch nr := l.next(); { - case nr == '~': + switch nr := l.next(); nr { + case '~': l.emit(NEQ_REGEX) - case nr == '=': + case '=': l.emit(NEQ) default: return l.errorf("unexpected character after '!' inside braces: %q", nr) diff --git a/rules/manager_test.go b/rules/manager_test.go index 46a87787ce..efd7a8b23c 100644 --- a/rules/manager_test.go +++ b/rules/manager_test.go @@ -180,7 +180,7 @@ func TestAlertingRule(t *testing.T) { for i := range test.result { test.result[i].T = timestamp.FromTime(evalTime) } - require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) + require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) sort.Slice(filteredRes, func(i, j int) bool { return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 @@ -188,7 +188,7 @@ func TestAlertingRule(t *testing.T) { prom_testutil.RequireEqual(t, test.result, filteredRes) for _, aa := range rule.ActiveAlerts() { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } } } @@ -333,7 +333,7 @@ func TestForStateAddSamples(t *testing.T) { test.result[i].F = forState } } - require.Equal(t, len(test.result), len(filteredRes), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) + require.Len(t, filteredRes, len(test.result), "%d. Number of samples in expected and actual output don't match (%d vs. %d)", i, len(test.result), len(res)) sort.Slice(filteredRes, func(i, j int) bool { return labels.Compare(filteredRes[i].Metric, filteredRes[j].Metric) < 0 @@ -341,7 +341,7 @@ func TestForStateAddSamples(t *testing.T) { prom_testutil.RequireEqual(t, test.result, filteredRes) for _, aa := range rule.ActiveAlerts() { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } } }) @@ -489,7 +489,7 @@ func TestForStateRestore(t *testing.T) { got := newRule.ActiveAlerts() for _, aa := range got { - require.Zero(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) + require.Empty(t, aa.Labels.Get(model.MetricNameLabel), "%s label set on active alert: %s", model.MetricNameLabel, aa.Labels) } sort.Slice(got, func(i, j int) bool { return labels.Compare(got[i].Labels, got[j].Labels) < 0 @@ -513,7 +513,7 @@ func TestForStateRestore(t *testing.T) { } default: exp := tt.expectedAlerts - require.Equal(t, len(exp), len(got)) + require.Len(t, got, len(exp)) sortAlerts(exp) sortAlerts(got) for i, e := range exp { @@ -2442,7 +2442,7 @@ func TestBoundedRuleEvalConcurrency(t *testing.T) { wg.Wait() // Synchronous queries also count towards inflight, so at most we can have maxConcurrency+$groupCount inflight evaluations. - require.EqualValues(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) + require.Equal(t, maxInflight.Load(), int32(maxConcurrency)+int32(groupCount)) } func TestUpdateWhenStopped(t *testing.T) { diff --git a/scrape/scrape_test.go b/scrape/scrape_test.go index 3ddb767356..699a3864b2 100644 --- a/scrape/scrape_test.go +++ b/scrape/scrape_test.go @@ -1157,7 +1157,7 @@ func TestScrapeLoopRun(t *testing.T) { case <-time.After(5 * time.Second): require.FailNow(t, "Cancellation during initial offset failed.") case err := <-errc: - require.FailNow(t, "Unexpected error: %s", err) + require.FailNow(t, "Unexpected error", "err: %s", err) } // The provided timeout must cause cancellation of the context passed down to the @@ -1200,7 +1200,7 @@ func TestScrapeLoopRun(t *testing.T) { case <-signal: // Loop terminated as expected. case err := <-errc: - require.FailNow(t, "Unexpected error: %s", err) + require.FailNow(t, "Unexpected error", "err: %s", err) case <-time.After(3 * time.Second): require.FailNow(t, "Loop did not terminate on context cancellation") } @@ -1309,14 +1309,14 @@ test_metric_total 1 md, ok = cache.GetMetadata("test_metric_no_help") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeGauge, md.Type, "unexpected metric type") - require.Equal(t, "", md.Help) - require.Equal(t, "", md.Unit) + require.Empty(t, md.Help) + require.Empty(t, md.Unit) md, ok = cache.GetMetadata("test_metric_no_type") require.True(t, ok, "expected metadata to be present") require.Equal(t, model.MetricTypeUnknown, md.Type, "unexpected metric type") require.Equal(t, "other help text", md.Help) - require.Equal(t, "", md.Unit) + require.Empty(t, md.Unit) } func simpleTestScrapeLoop(t testing.TB) (context.Context, *scrapeLoop) { @@ -1567,7 +1567,7 @@ func TestSetOptionsHandlingStaleness(t *testing.T) { if numScrapes == cue { action(sl) } - w.Write([]byte(fmt.Sprintf("metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes))) + fmt.Fprintf(w, "metric_a{a=\"1\",b=\"1\"} %d\n", 42+numScrapes) return nil } sl.run(nil) @@ -4259,7 +4259,7 @@ test_summary_count 199 foundLeValues[v] = true } - require.Equal(t, len(expectedValues), len(foundLeValues), "number of label values not as expected") + require.Len(t, foundLeValues, len(expectedValues), "number of label values not as expected") for _, v := range expectedValues { require.Contains(t, foundLeValues, v, "label value not found") } @@ -4568,7 +4568,7 @@ metric: < foundLeValues[v] = true } - require.Equal(t, len(expectedValues), len(foundLeValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues) + require.Len(t, foundLeValues, len(expectedValues), "unexpected number of label values, expected %v but found %v", expectedValues, foundLeValues) for _, v := range expectedValues { require.Contains(t, foundLeValues, v, "label value not found") } @@ -4817,7 +4817,7 @@ func TestScrapeLoopRunCreatesStaleMarkersOnFailedScrapeForTimestampedMetrics(t * switch numScrapes { case 1: - w.Write([]byte(fmt.Sprintf("metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)))) + fmt.Fprintf(w, "metric_a 42 %d\n", time.Now().UnixNano()/int64(time.Millisecond)) return nil case 5: cancel() @@ -4867,7 +4867,7 @@ func TestScrapeLoopCompression(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { require.Equal(t, tc.acceptEncoding, r.Header.Get("Accept-Encoding"), "invalid value of the Accept-Encoding header") - fmt.Fprint(w, metricsText) + fmt.Fprint(w, string(metricsText)) close(scraped) })) defer ts.Close() @@ -5164,7 +5164,7 @@ scrape_configs: s := teststorage.New(t) defer s.Close() - s.DB.EnableNativeHistograms() + s.EnableNativeHistograms() reg := prometheus.NewRegistry() mng, err := NewManager(&Options{DiscoveryReloadInterval: model.Duration(10 * time.Millisecond), EnableNativeHistogramsIngestion: true}, nil, nil, s, reg) diff --git a/scrape/target.go b/scrape/target.go index 4f576504f0..30b47976a3 100644 --- a/scrape/target.go +++ b/scrape/target.go @@ -144,7 +144,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) { func (t *Target) hash() uint64 { h := fnv.New64a() - h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) + fmt.Fprintf(h, "%016d", t.labels.Hash()) h.Write([]byte(t.URL().String())) return h.Sum64() diff --git a/storage/merge.go b/storage/merge.go index bc70ceea55..9b3bcee580 100644 --- a/storage/merge.go +++ b/storage/merge.go @@ -64,10 +64,8 @@ func NewMergeQuerier(primaries, secondaries []Querier, mergeFn VerticalSeriesMer queriers = append(queriers, newSecondaryQuerierFrom(q)) } - concurrentSelect := false - if len(secondaries) > 0 { - concurrentSelect = true - } + concurrentSelect := len(secondaries) > 0 + return &querierAdapter{&mergeGenericQuerier{ mergeFn: (&seriesMergerAdapter{VerticalSeriesMergeFunc: mergeFn}).Merge, queriers: queriers, @@ -111,10 +109,8 @@ func NewMergeChunkQuerier(primaries, secondaries []ChunkQuerier, mergeFn Vertica queriers = append(queriers, newSecondaryQuerierFromChunk(q)) } - concurrentSelect := false - if len(secondaries) > 0 { - concurrentSelect = true - } + concurrentSelect := len(secondaries) > 0 + return &chunkQuerierAdapter{&mergeGenericQuerier{ mergeFn: (&chunkSeriesMergerAdapter{VerticalChunkSeriesMergeFunc: mergeFn}).Merge, queriers: queriers, diff --git a/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 09be335a8b..527a0c879f 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -210,7 +210,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, setting log.Println("label " + name + " is overwritten. Check if Prometheus reserved labels are used.") } // internal labels should be maintained - if !settings.AllowUTF8 && !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { + if !settings.AllowUTF8 && (len(name) <= 4 || name[:2] != "__" || name[len(name)-2:] != "__") { name = otlptranslator.NormalizeLabel(name) } l[name] = extras[i+1] diff --git a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 79d127bb80..3d0285a185 100644 --- a/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -102,8 +102,8 @@ func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metric // Cumulative temporality is always valid. // Delta temporality is also valid if AllowDeltaTemporality is true. // All other temporality values are invalid. - !(temporality == pmetric.AggregationTemporalityCumulative || - (settings.AllowDeltaTemporality && temporality == pmetric.AggregationTemporalityDelta)) { + (temporality != pmetric.AggregationTemporalityCumulative && + (!settings.AllowDeltaTemporality || temporality != pmetric.AggregationTemporalityDelta)) { errs = multierr.Append(errs, fmt.Errorf("invalid temporality and type combination for metric %q", metric.Name())) continue } diff --git a/storage/remote/queue_manager.go b/storage/remote/queue_manager.go index 87567fb9c6..db602b8dc3 100644 --- a/storage/remote/queue_manager.go +++ b/storage/remote/queue_manager.go @@ -515,10 +515,8 @@ func NewQueueManager( compr: compression.Snappy, // Hardcoded for now, but scaffolding exists for likely future use. } - walMetadata := false - if t.protoMsg != config.RemoteWriteProtoMsgV1 { - walMetadata = true - } + walMetadata := t.protoMsg != config.RemoteWriteProtoMsgV1 + t.watcher = wlog.NewWatcher(watcherMetrics, readerMetrics, logger, client.Name(), t, dir, enableExemplarRemoteWrite, enableNativeHistogramRemoteWrite, walMetadata) // The current MetadataWatcher implementation is mutually exclusive diff --git a/storage/remote/read.go b/storage/remote/read.go index 2ec48784dc..881b5c28d1 100644 --- a/storage/remote/read.go +++ b/storage/remote/read.go @@ -93,7 +93,7 @@ func (c *sampleAndChunkQueryableClient) ChunkQuerier(mint, maxt int64) (storage. noop bool err error ) - cq.querier.maxt, noop, err = c.preferLocalStorage(mint, maxt) + cq.maxt, noop, err = c.preferLocalStorage(mint, maxt) if err != nil { return nil, err } diff --git a/storage/remote/read_handler_test.go b/storage/remote/read_handler_test.go index fd7f3ad48d..6b6275edac 100644 --- a/storage/remote/read_handler_test.go +++ b/storage/remote/read_handler_test.go @@ -277,7 +277,7 @@ func TestStreamReadEndpoint(t *testing.T) { require.Equal(t, 2, recorder.Code/100) require.Equal(t, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse", recorder.Result().Header.Get("Content-Type")) - require.Equal(t, "", recorder.Result().Header.Get("Content-Encoding")) + require.Empty(t, recorder.Result().Header.Get("Content-Encoding")) var results []*prompb.ChunkedReadResponse stream := NewChunkedReader(recorder.Result().Body, config.DefaultChunkedReadLimit, nil) diff --git a/storage/series_test.go b/storage/series_test.go index 5309494069..1ade558648 100644 --- a/storage/series_test.go +++ b/storage/series_test.go @@ -112,7 +112,7 @@ func TestChunkSeriesSetToSeriesSet(t *testing.T) { require.Len(t, ssSlice, 2) var iter chunkenc.Iterator for i, s := range ssSlice { - require.EqualValues(t, series[i].lbs, s.Labels()) + require.Equal(t, series[i].lbs, s.Labels()) iter = s.Iterator(iter) j := 0 for iter.Next() == chunkenc.ValFloat { @@ -597,15 +597,15 @@ func testHistogramsSeriesToChunks(t *testing.T, test histogramTest) { } series := NewListSeries(lbs, copiedSamples) encoder := NewSeriesToChunkEncoder(series) - require.EqualValues(t, lbs, encoder.Labels()) + require.Equal(t, lbs, encoder.Labels()) chks, err := ExpandChunks(encoder.Iterator(nil)) require.NoError(t, err) - require.Equal(t, len(test.expectedCounterResetHeaders), len(chks)) + require.Len(t, chks, len(test.expectedCounterResetHeaders)) // Decode all encoded samples and assert they are equal to the original ones. encodedSamples := chunks.ChunkMetasToSamples(chks) - require.Equal(t, len(test.expectedSamples), len(encodedSamples)) + require.Len(t, encodedSamples, len(test.expectedSamples)) for i, s := range test.expectedSamples { encodedSample := encodedSamples[i] diff --git a/tsdb/agent/db_test.go b/tsdb/agent/db_test.go index db98e87408..0cd780677e 100644 --- a/tsdb/agent/db_test.go +++ b/tsdb/agent/db_test.go @@ -1305,7 +1305,7 @@ func TestDBCreatedTimestampSamplesIngestion(t *testing.T) { outputSamples := readWALSamples(t, s.wal.Dir()) - require.Equal(t, len(tc.expectedSamples), len(outputSamples), "Expected %d samples", len(tc.expectedSamples)) + require.Len(t, outputSamples, len(tc.expectedSamples), "Expected %d samples", len(tc.expectedSamples)) for i, expectedSample := range tc.expectedSamples { for _, sample := range outputSamples { diff --git a/tsdb/chunks/head_chunks_test.go b/tsdb/chunks/head_chunks_test.go index 4c0b27e82f..68742471e6 100644 --- a/tsdb/chunks/head_chunks_test.go +++ b/tsdb/chunks/head_chunks_test.go @@ -129,7 +129,7 @@ func TestChunkDiskMapper_WriteChunk_Chunk_IterateChunks(t *testing.T) { // Checking on-disk bytes for the first file. require.Len(t, hrw.mmappedChunkFiles, 3, "expected 3 mmapped files, got %d", len(hrw.mmappedChunkFiles)) - require.Equal(t, len(hrw.mmappedChunkFiles), len(hrw.closers)) + require.Len(t, hrw.closers, len(hrw.mmappedChunkFiles)) actualBytes, err := os.ReadFile(firstFileName) require.NoError(t, err) @@ -208,9 +208,9 @@ func TestChunkDiskMapper_Truncate(t *testing.T) { files, err := os.ReadDir(hrw.dir.Name()) require.NoError(t, err) - require.Equal(t, len(remainingFiles), len(files), "files on disk") - require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles") - require.Equal(t, len(remainingFiles), len(hrw.closers), "closers") + require.Len(t, files, len(remainingFiles), "files on disk") + require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles") + require.Len(t, hrw.closers, len(remainingFiles), "closers") for _, i := range remainingFiles { _, ok := hrw.mmappedChunkFiles[i] @@ -325,9 +325,9 @@ func TestChunkDiskMapper_Truncate_PreservesFileSequence(t *testing.T) { files, err := os.ReadDir(hrw.dir.Name()) require.NoError(t, err) - require.Equal(t, len(remainingFiles), len(files), "files on disk") - require.Equal(t, len(remainingFiles), len(hrw.mmappedChunkFiles), "hrw.mmappedChunkFiles") - require.Equal(t, len(remainingFiles), len(hrw.closers), "closers") + require.Len(t, files, len(remainingFiles), "files on disk") + require.Len(t, hrw.mmappedChunkFiles, len(remainingFiles), "hrw.mmappedChunkFiles") + require.Len(t, hrw.closers, len(remainingFiles), "closers") for _, i := range remainingFiles { _, ok := hrw.mmappedChunkFiles[i] diff --git a/tsdb/compact_test.go b/tsdb/compact_test.go index 4b10a42ef7..655cfa408b 100644 --- a/tsdb/compact_test.go +++ b/tsdb/compact_test.go @@ -1399,7 +1399,7 @@ func TestDeleteCompactionBlockAfterFailedReload(t *testing.T) { createBlock(t, db.Dir(), genSeries(1, 1, m.MinTime, m.MaxTime)) } require.NoError(t, db.reload()) - require.Equal(t, len(blocks), len(db.Blocks()), "unexpected block count after a reloadBlocks") + require.Len(t, db.Blocks(), len(blocks), "unexpected block count after a reloadBlocks") return len(blocks) }, diff --git a/tsdb/db_test.go b/tsdb/db_test.go index 6270220be4..b5ce5f20fd 100644 --- a/tsdb/db_test.go +++ b/tsdb/db_test.go @@ -1347,7 +1347,7 @@ func TestTombstoneCleanFail(t *testing.T) { actualBlockDirs, err := blockDirs(db.dir) require.NoError(t, err) // Only one block should have been replaced by a new block. - require.Equal(t, len(oldBlockDirs), len(actualBlockDirs)) + require.Len(t, actualBlockDirs, len(oldBlockDirs)) require.Len(t, intersection(oldBlockDirs, actualBlockDirs), len(actualBlockDirs)-1) } @@ -1535,7 +1535,7 @@ func TestSizeRetention(t *testing.T) { // Test that registered size matches the actual disk size. require.NoError(t, db.reloadBlocks()) // Reload the db to register the new db size. - require.Equal(t, len(blocks), len(db.Blocks())) // Ensure all blocks are registered. + require.Len(t, db.Blocks(), len(blocks)) // Ensure all blocks are registered. blockSize := int64(prom_testutil.ToFloat64(db.metrics.blocksBytes)) // Use the actual internal metrics. walSize, err := db.Head().wal.Size() require.NoError(t, err) @@ -2052,7 +2052,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.NoError(t, db.Compact(ctx)) actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Empty(t, actBlocks) require.Equal(t, 0, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "no compaction should be triggered here") }) @@ -2072,7 +2072,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Empty(t, actBlocks) app = db.Appender(ctx) @@ -2093,7 +2093,7 @@ func TestNoEmptyBlocks(t *testing.T) { require.Equal(t, 2, int(prom_testutil.ToFloat64(db.compactor.(*LeveledCompactor).metrics.Ran)), "compaction should have been triggered here") actBlocks, err = blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Len(t, actBlocks, 1, "No blocks created when compacting with >0 samples") }) @@ -2134,7 +2134,7 @@ func TestNoEmptyBlocks(t *testing.T) { actBlocks, err := blockDirs(db.Dir()) require.NoError(t, err) - require.Equal(t, len(db.Blocks()), len(actBlocks)) + require.Len(t, actBlocks, len(db.Blocks())) require.Len(t, actBlocks, 1, "All samples are deleted. Only the most recent block should remain after compaction.") }) } @@ -2450,7 +2450,7 @@ func TestDBReadOnly(t *testing.T) { t.Run("blocks", func(t *testing.T) { blocks, err := dbReadOnly.Blocks() require.NoError(t, err) - require.Equal(t, len(expBlocks), len(blocks)) + require.Len(t, blocks, len(expBlocks)) for i, expBlock := range expBlocks { require.Equal(t, expBlock.Meta(), blocks[i].Meta(), "block meta mismatch") } @@ -2478,7 +2478,7 @@ func TestDBReadOnly(t *testing.T) { readOnlySeries := query(t, q, matchAll) readOnlyDBHash := testutil.DirHash(t, dbDir) - require.Equal(t, len(expSeries), len(readOnlySeries), "total series mismatch") + require.Len(t, readOnlySeries, len(expSeries), "total series mismatch") require.Equal(t, expSeries, readOnlySeries, "series mismatch") require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same") }) @@ -2488,7 +2488,7 @@ func TestDBReadOnly(t *testing.T) { readOnlySeries := queryAndExpandChunks(t, cq, matchAll) readOnlyDBHash := testutil.DirHash(t, dbDir) - require.Equal(t, len(expChunks), len(readOnlySeries), "total series mismatch") + require.Len(t, readOnlySeries, len(expChunks), "total series mismatch") require.Equal(t, expChunks, readOnlySeries, "series chunks mismatch") require.Equal(t, expDBHash, readOnlyDBHash, "after all read operations the db hash should remain the same") }) @@ -8260,7 +8260,7 @@ func testNoGapAfterRestartWithOOO(t *testing.T, scenario sampleTypeScenario) { require.NoError(t, db.Compact(ctx)) verifyBlockRanges := func() { blocks := db.Blocks() - require.Equal(t, len(c.blockRanges), len(blocks)) + require.Len(t, blocks, len(c.blockRanges)) for j, br := range c.blockRanges { require.Equal(t, br[0]*time.Minute.Milliseconds(), blocks[j].MinTime()) require.Equal(t, br[1]*time.Minute.Milliseconds(), blocks[j].MaxTime()) diff --git a/tsdb/head_test.go b/tsdb/head_test.go index dcf9c9c9aa..561c8c789d 100644 --- a/tsdb/head_test.go +++ b/tsdb/head_test.go @@ -4666,7 +4666,7 @@ func testHistogramStaleSampleHelper(t *testing.T, floatHistogram bool) { } // We cannot compare StaleNAN with require.Equal, hence checking each histogram manually. - require.Equal(t, len(expHistograms), len(actHistograms)) + require.Len(t, actHistograms, len(expHistograms)) actNumStale := 0 for i, eh := range expHistograms { ah := actHistograms[i] @@ -5304,7 +5304,7 @@ func TestChunkSnapshotTakenAfterIncompleteSnapshot(t *testing.T) { // Verify the snapshot. name, idx, offset, err := LastChunkSnapshot(dir) require.NoError(t, err) - require.NotEqual(t, "", name) + require.NotEmpty(t, name) require.Equal(t, 0, idx) require.Positive(t, offset) } diff --git a/tsdb/index/index_test.go b/tsdb/index/index_test.go index ee186c1d95..e3fe5a41fd 100644 --- a/tsdb/index/index_test.go +++ b/tsdb/index/index_test.go @@ -424,7 +424,7 @@ func TestPersistence_index_e2e(t *testing.T) { res, err := ir.SortedLabelValues(ctx, k) require.NoError(t, err) - require.Equal(t, len(v), len(res)) + require.Len(t, res, len(v)) for i := 0; i < len(v); i++ { require.Equal(t, v[i], res[i]) } diff --git a/tsdb/ooo_head_read_test.go b/tsdb/ooo_head_read_test.go index d49c7d8fc3..9dcf125b92 100644 --- a/tsdb/ooo_head_read_test.go +++ b/tsdb/ooo_head_read_test.go @@ -860,7 +860,7 @@ func testOOOHeadChunkReader_Chunk(t *testing.T, scenario sampleTypeScenario) { var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) require.NoError(t, err) - require.Equal(t, len(tc.expChunksSamples), len(chks)) + require.Len(t, chks, len(tc.expChunksSamples)) cr := NewHeadAndOOOChunkReader(db.head, tc.queryMinT, tc.queryMaxT, nil, nil, 0) defer cr.Close() @@ -1030,7 +1030,7 @@ func testOOOHeadChunkReader_Chunk_ConsistentQueryResponseDespiteOfHeadExpanding( var b labels.ScratchBuilder err = ir.Series(s1Ref, &b, &chks) require.NoError(t, err) - require.Equal(t, len(tc.expChunksSamples), len(chks)) + require.Len(t, chks, len(tc.expChunksSamples)) // Now we keep receiving ooo samples // OOO few samples for s1. diff --git a/tsdb/ooo_head_test.go b/tsdb/ooo_head_test.go index 07467479dc..2d5901a13b 100644 --- a/tsdb/ooo_head_test.go +++ b/tsdb/ooo_head_test.go @@ -246,7 +246,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { // Sanity check. - require.Equal(t, len(tc.samples), len(tc.expectedCounterResets), "number of samples and counter resets") + require.Len(t, tc.expectedCounterResets, len(tc.samples), "number of samples and counter resets") oooChunk := OOOChunk{} for _, s := range tc.samples { @@ -264,7 +264,7 @@ func TestOOOChunks_ToEncodedChunks(t *testing.T) { chunks, err := oooChunk.ToEncodedChunks(math.MinInt64, math.MaxInt64) require.NoError(t, err) - require.Equal(t, len(tc.expectedChunks), len(chunks), "number of chunks") + require.Len(t, chunks, len(tc.expectedChunks), "number of chunks") sampleIndex := 0 for i, c := range chunks { require.Equal(t, tc.expectedChunks[i].encoding, c.chunk.Encoding(), "chunk %d encoding", i) diff --git a/tsdb/querier.go b/tsdb/querier.go index 5d9801f2b8..f7d564a2dd 100644 --- a/tsdb/querier.go +++ b/tsdb/querier.go @@ -525,7 +525,7 @@ func (b *blockBaseSeriesSet) Next() bool { // Count those in range to size allocation (roughly - ignoring tombstones). nChks := 0 for _, chk := range b.bufChks { - if !(chk.MaxTime < b.mint || chk.MinTime > b.maxt) { + if chk.MaxTime >= b.mint && chk.MinTime <= b.maxt { nChks++ } } diff --git a/tsdb/querier_test.go b/tsdb/querier_test.go index f55d57cc79..cb96fa3716 100644 --- a/tsdb/querier_test.go +++ b/tsdb/querier_test.go @@ -263,7 +263,7 @@ func testBlockQuerier(t *testing.T, c blockQuerierTestCase, ir IndexReader, cr C rmChunkRefs(chksRes) require.Equal(t, errExp, errRes) - require.Equal(t, len(chksExp), len(chksRes)) + require.Len(t, chksRes, len(chksExp)) var exp, act [][]chunks.Sample for i := range chksExp { samples, err := storage.ExpandSamples(chksExp[i].Chunk.Iterator(nil), nil) diff --git a/tsdb/testutil.go b/tsdb/testutil.go index 4dac8c29ff..4d413322c8 100644 --- a/tsdb/testutil.go +++ b/tsdb/testutil.go @@ -174,7 +174,7 @@ func requireEqualSamples(t *testing.T, name string, expected, actual []chunks.Sa } } - require.Equal(t, len(expected), len(actual), "Length not equal to expected for %s", name) + require.Len(t, actual, len(expected), "Length not equal to expected for %s", name) for i, s := range expected { expectedSample := s actualSample := actual[i] diff --git a/util/annotations/annotations.go b/util/annotations/annotations.go index 95783957a7..a8e1d9f900 100644 --- a/util/annotations/annotations.go +++ b/util/annotations/annotations.go @@ -125,12 +125,13 @@ func (a Annotations) CountWarningsAndInfo() (countWarnings, countInfo int) { return } -//nolint:revive // error-naming. +//nolint:staticcheck,revive // error-naming. var ( // Currently there are only 2 types, warnings and info. // For now, info are visually identical with warnings as we have not updated // the API spec or the frontend to show a different kind of warning. But we // make the distinction here to prepare for adding them in future. + PromQLInfo = errors.New("PromQL info") PromQLWarning = errors.New("PromQL warning") diff --git a/util/documentcli/documentcli.go b/util/documentcli/documentcli.go index 6964952af4..f324afa3d6 100644 --- a/util/documentcli/documentcli.go +++ b/util/documentcli/documentcli.go @@ -50,7 +50,7 @@ func GenerateMarkdown(model *kingpin.ApplicationModel, writer io.Writer) error { return err } - return writeSubcommands(writer, 1, model.Name, model.CmdGroupModel.Commands) + return writeSubcommands(writer, 1, model.Name, model.Commands) } func header(title, help string) []byte { @@ -172,13 +172,13 @@ func writeTable(writer io.Writer, data [][]string, header string) error { buf := bytes.NewBuffer(nil) - buf.WriteString(fmt.Sprintf("\n\n%s\n\n", header)) + fmt.Fprintf(buf, "\n\n%s\n\n", header) columnsToRender := determineColumnsToRender(data) headers := data[0] buf.WriteString("|") for _, j := range columnsToRender { - buf.WriteString(fmt.Sprintf(" %s |", headers[j])) + fmt.Fprintf(buf, " %s |", headers[j]) } buf.WriteString("\n") @@ -192,7 +192,7 @@ func writeTable(writer io.Writer, data [][]string, header string) error { row := data[i] buf.WriteString("|") for _, j := range columnsToRender { - buf.WriteString(fmt.Sprintf(" %s |", row[j])) + fmt.Fprintf(buf, " %s |", row[j]) } buf.WriteString("\n") } @@ -243,7 +243,7 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] help = cmd.HelpLong } help = formatHyphenatedWords(help) - if _, err := writer.Write([]byte(fmt.Sprintf("\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help))); err != nil { + if _, err := fmt.Fprintf(writer, "\n\n%s `%s %s`\n\n%s\n\n", strings.Repeat("#", level+1), modelName, cmd.FullCommand, help); err != nil { return err } @@ -255,8 +255,8 @@ func writeSubcommands(writer io.Writer, level int, modelName string, commands [] return err } - if cmd.CmdGroupModel != nil && len(cmd.CmdGroupModel.Commands) > 0 { - if err := writeSubcommands(writer, level+1, modelName, cmd.CmdGroupModel.Commands); err != nil { + if cmd.CmdGroupModel != nil && len(cmd.Commands) > 0 { + if err := writeSubcommands(writer, level+1, modelName, cmd.Commands); err != nil { return err } } diff --git a/util/stats/query_stats.go b/util/stats/query_stats.go index e83a6015c7..b1c91a69fd 100644 --- a/util/stats/query_stats.go +++ b/util/stats/query_stats.go @@ -134,7 +134,7 @@ func NewQueryStats(s *Statistics) QueryStats { sp = s.Samples ) - for s, timer := range tg.TimerGroup.timers { + for s, timer := range tg.timers { switch s { case EvalTotalTime: qt.EvalTotalTime = timer.Duration() @@ -328,5 +328,5 @@ func (qs *QuerySamples) NewChild() *QuerySamples { } func (qs *QueryTimers) GetSpanTimer(ctx context.Context, qt QueryTiming, observers ...prometheus.Observer) (*SpanTimer, context.Context) { - return NewSpanTimer(ctx, qt.SpanOperation(), qs.TimerGroup.GetTimer(qt), observers...) + return NewSpanTimer(ctx, qt.SpanOperation(), qs.GetTimer(qt), observers...) } diff --git a/util/strutil/strconv.go b/util/strutil/strconv.go index 8cdd7d4830..88d2a3b610 100644 --- a/util/strutil/strconv.go +++ b/util/strutil/strconv.go @@ -54,10 +54,10 @@ func SanitizeFullLabelName(name string) string { } var validSb strings.Builder for i, b := range name { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - validSb.WriteRune('_') - } else { + if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0) { validSb.WriteRune(b) + } else { + validSb.WriteRune('_') } } return validSb.String() diff --git a/web/web.go b/web/web.go index 84c4a2a529..601d42cbea 100644 --- a/web/web.go +++ b/web/web.go @@ -812,7 +812,7 @@ func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) { hostname, err := os.Hostname() if err != nil { - return status, fmt.Errorf("Error getting hostname: %w", err) + return status, fmt.Errorf("error getting hostname: %w", err) } status.Hostname = hostname status.ServerTime = time.Now().UTC() diff --git a/web/web_test.go b/web/web_test.go index 696ba80d1d..ea7e099041 100644 --- a/web/web_test.go +++ b/web/web_test.go @@ -624,7 +624,7 @@ func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) { b, err := io.ReadAll(resp.Body) require.NoError(t, err) require.NoError(t, json.Unmarshal(b, snapshot)) - require.NotZero(t, snapshot.Data.Name, "snapshot directory not returned") + require.NotEmpty(t, snapshot.Data.Name, "snapshot directory not returned") require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name))) require.NoError(t, os.Remove(filepath.Join(dbDir, "snapshots"))) }