Update modernize (#17471)

Apply additional Go modernize tool improvements.

Signed-off-by: SuperQ <superq@gmail.com>
This commit is contained in:
Ben Kochie 2025-11-04 06:13:49 +01:00 committed by GitHub
parent 784ec0a792
commit 48956f60d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
53 changed files with 147 additions and 193 deletions

View File

@ -221,8 +221,7 @@ type flagConfig struct {
// setFeatureListOptions sets the corresponding options from the featureList.
func (c *flagConfig) setFeatureListOptions(logger *slog.Logger) error {
for _, f := range c.featureList {
opts := strings.Split(f, ",")
for _, o := range opts {
for o := range strings.SplitSeq(f, ",") {
switch o {
case "exemplar-storage":
c.tsdb.EnableExemplarStorage = true

View File

@ -173,7 +173,7 @@ func verifyConfigReloadMetric(t *testing.T, baseURL string, expectedValue float6
var actualValue float64
found := false
for _, line := range strings.Split(lines, "\n") {
for line := range strings.SplitSeq(lines, "\n") {
if strings.HasPrefix(line, configReloadMetric) {
parts := strings.Fields(line)
if len(parts) >= 2 {

View File

@ -340,8 +340,7 @@ func main() {
}
for _, f := range *featureList {
opts := strings.Split(f, ",")
for _, o := range opts {
for o := range strings.SplitSeq(f, ",") {
switch o {
case "promql-experimental-functions":
parser.EnableExperimentalFunctions = true
@ -485,7 +484,7 @@ func newRulesLintConfig(stringVal string, fatal, ignoreUnknownFields bool, nameV
if stringVal == "" {
return ls
}
for _, setting := range strings.Split(stringVal, ",") {
for setting := range strings.SplitSeq(stringVal, ",") {
switch setting {
case lintOptionAll:
ls.all = true
@ -518,7 +517,7 @@ func newConfigLintConfig(optionsStr string, fatal, ignoreUnknownFields bool, nam
lintNone := false
var rulesOptions []string
for _, option := range strings.Split(optionsStr, ",") {
for option := range strings.SplitSeq(optionsStr, ",") {
switch option {
case lintOptionAll, lintOptionTooLongScrapeInterval:
c.lookbackDelta = lookbackDelta

View File

@ -197,9 +197,8 @@ func TestCheckDuplicates(t *testing.T) {
func BenchmarkCheckDuplicates(b *testing.B) {
rgs, err := rulefmt.ParseFile("./testdata/rules_large.yml", false, model.UTF8Validation)
require.Empty(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
checkDuplicates(rgs.Groups)
}
}

View File

@ -1421,9 +1421,8 @@ func BenchmarkResolvePodRef(b *testing.B) {
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
p := e.resolvePodRef(&v1.ObjectReference{
Kind: "Pod",
Name: "testpod",

View File

@ -783,8 +783,7 @@ func pk(provider, setName string, n int) poolKey {
}
func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -820,8 +819,7 @@ func TestTargetSetTargetGroupsPresentOnConfigReload(t *testing.T) {
}
func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -860,8 +858,7 @@ func TestTargetSetTargetGroupsPresentOnConfigRename(t *testing.T) {
}
func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -903,8 +900,7 @@ func TestTargetSetTargetGroupsPresentOnConfigDuplicateAndDeleteOriginal(t *testi
}
func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -971,8 +967,7 @@ func TestTargetSetTargetGroupsPresentOnConfigChange(t *testing.T) {
}
func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -1015,8 +1010,7 @@ func TestTargetSetRecreatesTargetGroupsOnConfigChange(t *testing.T) {
}
func TestDiscovererConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -1052,8 +1046,7 @@ func TestDiscovererConfigs(t *testing.T) {
// removing all targets from the static_configs cleans the corresponding targetGroups entries to avoid leaks and sends an empty update.
// The update is required to signal the consumers that the previous targets should be dropped.
func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -1094,8 +1087,7 @@ func TestTargetSetRecreatesEmptyStaticConfigs(t *testing.T) {
}
func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -1133,8 +1125,7 @@ func TestApplyConfigDoesNotModifyStaticTargets(t *testing.T) {
processedConfig := Configs{
staticConfig("foo:9090", "bar:9090", "baz:9090"),
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -1194,8 +1185,7 @@ func (s lockStaticDiscoverer) Run(ctx context.Context, up chan<- []*targetgroup.
}
func TestGaugeFailedConfigs(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)
@ -1445,8 +1435,7 @@ func (o onceProvider) Run(_ context.Context, ch chan<- []*targetgroup.Group) {
// TestTargetSetTargetGroupsUpdateDuringApplyConfig is used to detect races when
// ApplyConfig happens at the same time as targets update.
func TestTargetSetTargetGroupsUpdateDuringApplyConfig(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
_, sdMetrics := NewTestMetrics(t, reg)

View File

@ -83,8 +83,7 @@ func TestRefresh(t *testing.T) {
)
ch := make(chan []*targetgroup.Group)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
go d.Run(ctx, ch)
tg := <-ch

View File

@ -3854,9 +3854,8 @@ func BenchmarkFloatHistogramAllBucketIterator(b *testing.B) {
fh := createRandomFloatHistogram(rng, 50)
b.ReportAllocs() // the current implementation reports 1 alloc
b.ResetTimer()
for n := 0; n < b.N; n++ {
for b.Loop() {
for it := fh.AllBucketIterator(); it.Next(); {
}
}
@ -3868,9 +3867,8 @@ func BenchmarkFloatHistogramDetectReset(b *testing.B) {
fh := createRandomFloatHistogram(rng, 50)
b.ReportAllocs() // the current implementation reports 0 allocs
b.ResetTimer()
for n := 0; n < b.N; n++ {
for b.Loop() {
// Detect against the itself (no resets is the worst case input).
fh.DetectReset(fh)
}

View File

@ -58,7 +58,7 @@ func TestLabels_String(t *testing.T) {
func BenchmarkString(b *testing.B) {
ls := New(benchmarkLabels...)
for i := 0; i < b.N; i++ {
for b.Loop() {
_ = ls.String()
}
}
@ -86,7 +86,7 @@ var GlobalTotal uint64 // Encourage the compiler not to elide the benchmark comp
func BenchmarkSize(b *testing.B) {
lb := New(benchmarkLabels...)
b.Run("SizeOfLabels", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
var total uint64
lb.Range(func(l Label) {
total += SizeOfLabels(l.Name, l.Value, 1)
@ -95,7 +95,7 @@ func BenchmarkSize(b *testing.B) {
}
})
b.Run("ByteSize", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
GlobalTotal = lb.ByteSize()
}
})
@ -641,12 +641,12 @@ func BenchmarkLabels_Get(b *testing.B) {
} {
b.Run(scenario.desc, func(b *testing.B) {
b.Run("get", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_ = labels.Get(scenario.label)
}
})
b.Run("has", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_ = labels.Has(scenario.label)
}
})
@ -696,7 +696,7 @@ func BenchmarkLabels_Equals(b *testing.B) {
for _, scenario := range comparisonBenchmarkScenarios {
b.Run(scenario.desc, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
_ = Equal(scenario.base, scenario.other)
}
})
@ -707,7 +707,7 @@ func BenchmarkLabels_Compare(b *testing.B) {
for _, scenario := range comparisonBenchmarkScenarios {
b.Run(scenario.desc, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
_ = Compare(scenario.base, scenario.other)
}
})
@ -942,7 +942,7 @@ func BenchmarkLabels_Hash(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
h = tcase.lbls.Hash()
}
benchmarkLabelsResult = h
@ -965,7 +965,7 @@ var benchmarkLabels = []Label{
func BenchmarkBuilder(b *testing.B) {
var l Labels
builder := NewBuilder(EmptyLabels())
for i := 0; i < b.N; i++ {
for b.Loop() {
builder.Reset(EmptyLabels())
for _, l := range benchmarkLabels {
builder.Set(l.Name, l.Value)
@ -978,7 +978,7 @@ func BenchmarkBuilder(b *testing.B) {
func BenchmarkLabels_Copy(b *testing.B) {
l := NewForBenchmark(benchmarkLabels...)
for i := 0; i < b.N; i++ {
for b.Loop() {
l = l.Copy()
}
}

View File

@ -285,7 +285,7 @@ func BenchmarkFastRegexMatcher(b *testing.B) {
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
for _, text := range texts {
_ = m.MatchString(text)
}
@ -329,7 +329,7 @@ func BenchmarkToNormalizedLower(b *testing.B) {
inputs[i] = benchCase(l, uppercase, asciiOnly, i)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
for n := 0; b.Loop(); n++ {
var a [256]byte
toNormalisedLower(inputs[n%len(inputs)], a[:])
}
@ -1123,7 +1123,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) {
}
b.Run("without optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) {
for n := 0; n < b.N; n++ {
for b.Loop() {
for _, t := range texts {
unoptimized.Matches(t)
}
@ -1131,7 +1131,7 @@ func BenchmarkOptimizeEqualOrPrefixStringMatchers(b *testing.B) {
})
b.Run("with optimizeEqualOrPrefixStringMatchers()", func(b *testing.B) {
for n := 0; n < b.N; n++ {
for b.Loop() {
for _, t := range texts {
optimized.Matches(t)
}
@ -1220,9 +1220,8 @@ func BenchmarkZeroOrOneCharacterStringMatcher(b *testing.B) {
}
matcher := &zeroOrOneCharacterStringMatcher{matchNL: true}
b.ResetTimer()
for n := 0; n < b.N; n++ {
for n := 0; b.Loop(); n++ {
c := cases[n%len(cases)]
got := matcher.Matches(c.str)
if got != c.matches {

View File

@ -62,8 +62,7 @@ func ReadLabels(fn string, n int) ([]Labels, error) {
r := strings.NewReplacer("\"", "", "{", "", "}", "")
s := r.Replace(scanner.Text())
labelChunks := strings.Split(s, ",")
for _, labelChunk := range labelChunks {
for labelChunk := range strings.SplitSeq(s, ",") {
split := strings.Split(labelChunk, ":")
b.Add(split[0], split[1])
}

View File

@ -1065,7 +1065,7 @@ func BenchmarkRelabel(b *testing.B) {
}
for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_, _ = Process(tt.lbls, tt.cfgs...)
}
})

View File

@ -172,10 +172,9 @@ func benchParse(b *testing.B, data []byte, parser string) {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
st := labels.NewSymbolTable()
for i := 0; i < b.N; i++ {
for b.Loop() {
p := newParserFn(data, st)
Inner:
@ -229,9 +228,8 @@ func benchExpFmt(b *testing.B, data []byte, expFormatTypeStr string) {
b.SetBytes(int64(len(data)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
decSamples := make(model.Vector, 0, 50)
sdec := expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(bytes.NewReader(data), expfmt.NewFormat(expfmtFormatType)),
@ -302,7 +300,7 @@ Inner:
b.Run("case=no-ct", func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
if p.CreatedTimestamp() != 0 {
b.Fatal("should be nil")
}
@ -332,7 +330,7 @@ Inner2:
b.Run("case=ct", func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
if p.CreatedTimestamp() == 0 {
b.Fatal("should be not nil")
}

View File

@ -5766,13 +5766,13 @@ func FuzzProtobufParser_Labels(f *testing.F) {
) {
var (
r = rand.New(rand.NewSource(randSeed))
buffers = pool.New(1+r.Intn(128), 128+r.Intn(1024), 2, func(sz int) interface{} { return make([]byte, 0, sz) })
buffers = pool.New(1+r.Intn(128), 128+r.Intn(1024), 2, func(sz int) any { return make([]byte, 0, sz) })
lastScrapeSize = 0
observedLabels []labels.Labels
st = labels.NewSymbolTable()
)
for i := 0; i < 20; i++ { // run multiple iterations to encounter memory corruptions
for range 20 { // run multiple iterations to encounter memory corruptions
// Get buffer from pool like in scrape.go
b := buffers.Get(lastScrapeSize).([]byte)
buf := bytes.NewBuffer(b)
@ -5848,7 +5848,7 @@ func generateFuzzMetricFamily(
Unit: unit,
}
metricsCount := r.Intn(20)
for i := 0; i < metricsCount; i++ {
for range metricsCount {
metric := dto.Metric{
Label: generateFuzzLabels(r),
}
@ -5870,7 +5870,7 @@ func generateFuzzMetricFamily(
func generateExemplars(r *rand.Rand) []*dto.Exemplar {
exemplarsCount := r.Intn(5)
exemplars := make([]*dto.Exemplar, 0, exemplarsCount)
for i := 0; i < exemplarsCount; i++ {
for range exemplarsCount {
exemplars = append(exemplars, &dto.Exemplar{
Label: generateFuzzLabels(r),
Value: r.Float64(),
@ -5886,7 +5886,7 @@ func generateExemplars(r *rand.Rand) []*dto.Exemplar {
func generateFuzzLabels(r *rand.Rand) []dto.LabelPair {
labelsCount := r.Intn(10)
ls := make([]dto.LabelPair, 0, labelsCount)
for i := 0; i < labelsCount; i++ {
for range labelsCount {
ls = append(ls, dto.LabelPair{
Name: generateValidLabelName(r),
Value: generateValidLabelName(r),
@ -5897,7 +5897,7 @@ func generateFuzzLabels(r *rand.Rand) []dto.LabelPair {
func generateHelp(r *rand.Rand) string {
result := make([]string, 1+r.Intn(20))
for i := 0; i < len(result); i++ {
for i := range result {
result[i] = generateValidLabelName(r)
}
return strings.Join(result, "_")

View File

@ -752,8 +752,7 @@ func TestHangingNotifier(t *testing.T) {
// Initialize the discovery manager
// This is relevant as the updates aren't sent continually in real life, but only each updatert.
// The old implementation of TestHangingNotifier didn't take that into account.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
reg := prometheus.NewRegistry()
sdMetrics, err := discovery.RegisterSDMetrics(reg, discovery.NewRefreshMetrics(reg))
require.NoError(t, err)

View File

@ -362,7 +362,7 @@ func BenchmarkRangeQuery(b *testing.B) {
b.Run(name, func(b *testing.B) {
ctx := context.Background()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
qry, err := engine.NewRangeQuery(
ctx, stor, nil, c.expr,
time.Unix(int64((numIntervals-c.steps)*10), 0),
@ -426,7 +426,7 @@ func BenchmarkJoinQuery(b *testing.B) {
b.Run(name, func(b *testing.B) {
ctx := context.Background()
b.ReportAllocs()
for range b.N {
for b.Loop() {
qry, err := engine.NewRangeQuery(
ctx, stor, nil, c.expr,
timestamp.Time(int64((numIntervals-c.steps)*10_000)),
@ -508,7 +508,7 @@ func BenchmarkNativeHistograms(b *testing.B) {
for _, tc := range cases {
b.Run(tc.name, func(b *testing.B) {
ng := promqltest.NewTestEngineWithOpts(b, opts)
for i := 0; i < b.N; i++ {
for b.Loop() {
qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
if err != nil {
b.Fatal(err)
@ -578,7 +578,7 @@ func BenchmarkNativeHistogramsCustomBuckets(b *testing.B) {
for _, tc := range cases {
b.Run(tc.name, func(b *testing.B) {
ng := promqltest.NewTestEngineWithOpts(b, opts)
for i := 0; i < b.N; i++ {
for b.Loop() {
qry, err := ng.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
if err != nil {
b.Fatal(err)
@ -640,7 +640,7 @@ func BenchmarkInfoFunction(b *testing.B) {
engine := promql.NewEngine(opts)
b.Run(tc.name, func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
b.StopTimer() // Stop the timer to exclude setup time.
qry, err := engine.NewRangeQuery(context.Background(), testStorage, nil, tc.query, start, end, step)
require.NoError(b, err)
@ -795,7 +795,7 @@ func BenchmarkParser(b *testing.B) {
for _, c := range cases {
b.Run(c, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
parser.ParseExpr(c)
}
})
@ -804,7 +804,7 @@ func BenchmarkParser(b *testing.B) {
b.Run("preprocess "+c, func(b *testing.B) {
expr, _ := parser.ParseExpr(c)
start, end := time.Now().Add(-time.Hour), time.Now()
for i := 0; i < b.N; i++ {
for b.Loop() {
promql.PreprocessExpr(expr, start, end, 0)
}
})
@ -813,7 +813,7 @@ func BenchmarkParser(b *testing.B) {
name := fmt.Sprintf("%s (should fail)", c)
b.Run(name, func(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
parser.ParseExpr(c)
}
})

View File

@ -164,8 +164,7 @@ func TestQueryTimeout(t *testing.T) {
Timeout: 5 * time.Millisecond,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
ctx := t.Context()
query := engine.NewTestQuery(func(ctx context.Context) error {
time.Sleep(100 * time.Millisecond)
@ -189,8 +188,7 @@ func TestQueryCancel(t *testing.T) {
Timeout: 10 * time.Second,
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
ctx := t.Context()
// Cancel a running query before it completes.
block := make(chan struct{})
@ -267,8 +265,7 @@ func TestQueryError(t *testing.T) {
queryable := storage.QueryableFunc(func(_, _ int64) (storage.Querier, error) {
return &errQuerier{err: errStorage}, nil
})
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
ctx := t.Context()
vectorQuery, err := engine.NewInstantQuery(ctx, queryable, nil, "foo", time.Unix(1, 0))
require.NoError(t, err)

View File

@ -286,7 +286,7 @@ func BenchmarkExprString(b *testing.B) {
b.Run(readable(test), func(b *testing.B) {
expr, err := ParseExpr(test)
require.NoError(b, err)
for i := 0; i < b.N; i++ {
for b.Loop() {
_ = expr.String()
}
})

View File

@ -569,7 +569,7 @@ func BenchmarkAlertingRuleAtomicField(b *testing.B) {
rule := NewAlertingRule("bench", nil, 0, 0, labels.EmptyLabels(), labels.EmptyLabels(), labels.EmptyLabels(), "", true, nil)
done := make(chan struct{})
go func() {
for i := 0; i < b.N; i++ {
for b.Loop() {
rule.GetEvaluationTimestamp()
}
close(done)
@ -594,8 +594,7 @@ func TestAlertingRuleDuplicate(t *testing.T) {
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
ctx := t.Context()
now := time.Now()

View File

@ -2772,8 +2772,7 @@ func BenchmarkRuleDependencyController_AnalyseRules(b *testing.B) {
require.Empty(b, errs)
require.Len(b, groups, 1)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
for _, g := range groups {
ruleManager.opts.RuleDependencyController.AnalyseRules(g.rules)
}

View File

@ -145,7 +145,7 @@ func BenchmarkRuleEval(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err := rule.Eval(context.TODO(), 0, ruleEvaluationTime, EngineQueryFunc(ng, storage), nil, 0)
if err != nil {
require.NoError(b, err)
@ -168,8 +168,7 @@ func TestRuleEvalDuplicate(t *testing.T) {
}
engine := promqltest.NewTestEngineWithOpts(t, opts)
ctx, cancelCtx := context.WithCancel(context.Background())
defer cancelCtx()
ctx := t.Context()
now := time.Now()

View File

@ -1081,8 +1081,7 @@ func TestNHCBAndCTZeroIngestion(t *testing.T) {
expectedHistogramSum = 45.5
)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
app := &collectResultAppender{}
discoveryManager, scrapeManager := runManagers(t, ctx, &Options{

View File

@ -1353,7 +1353,7 @@ func benchmarkDrain(b *testing.B, makeSeriesSet func() SeriesSet) {
var t int64
var v float64
var iter chunkenc.Iterator
for n := 0; n < b.N; n++ {
for b.Loop() {
seriesSet := makeSeriesSet()
for seriesSet.Next() {
iter = seriesSet.At().Iterator(iter)

View File

@ -100,7 +100,7 @@ func (m *mockCombinedAppender) Commit() error {
return nil
}
func requireEqual(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) {
func requireEqual(t testing.TB, expected, actual any, msgAndArgs ...any) {
testutil.RequireEqualWithOptions(t, expected, actual, []cmp.Option{cmp.AllowUnexported(combinedSample{}, combinedHistogram{})}, msgAndArgs...)
}

View File

@ -410,7 +410,7 @@ func BenchmarkConvertBucketLayout(b *testing.B) {
}
}
b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
convertBucketsLayout(buckets.BucketCounts().AsRaw(), buckets.Offset(), 0, true)
}
})
@ -1010,7 +1010,7 @@ func BenchmarkConvertHistogramBucketsToNHCBLayout(b *testing.B) {
}
}
b.Run(fmt.Sprintf("gap %d", scenario.gap), func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
offset := getBucketOffset(buckets)
convertBucketsLayout(buckets, int32(offset), 0, false)
}

View File

@ -1065,7 +1065,7 @@ func BenchmarkPrometheusConverter_FromMetrics(b *testing.B) {
noOpLogger := promslog.NewNopLogger()
b.ResetTimer()
for range b.N {
for b.Loop() {
app := &noOpAppender{}
mockAppender := NewCombinedAppender(app, noOpLogger, false, appMetrics)
converter := NewPrometheusConverter(mockAppender)

View File

@ -1440,7 +1440,7 @@ func BenchmarkSampleSend(b *testing.B) {
defer m.Stop()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
m.Append(samples)
m.UpdateSeriesSegment(series, i+1) // simulate what wlog.Watcher.garbageCollectSeries does
m.SeriesReset(i + 1)
@ -1488,7 +1488,7 @@ func BenchmarkStoreSeries(b *testing.B) {
for _, tc := range testCases {
b.Run(tc.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
c := NewTestWriteClient(remoteapi.WriteV1MessageType)
dir := b.TempDir()
cfg := config.DefaultQueueConfig
@ -1932,7 +1932,7 @@ func BenchmarkBuildWriteRequest(b *testing.B) {
pBuf := proto.NewBuffer(nil)
totalSize := 0
for i := 0; i < b.N; i++ {
for b.Loop() {
populateTimeSeries(batch, seriesBuff, true, true)
req, _, _, err := buildWriteRequest(noopLogger, seriesBuff, nil, pBuf, nil, cEnc, compression.Snappy)
if err != nil {
@ -1973,7 +1973,7 @@ func BenchmarkBuildV2WriteRequest(b *testing.B) {
pBuf := []byte{}
totalSize := 0
for i := 0; i < b.N; i++ {
for b.Loop() {
populateV2TimeSeries(&symbolTable, batch, seriesBuff, true, true, false)
req, _, _, err := buildV2WriteRequest(noopLogger, seriesBuff, symbolTable.Symbols(), &pBuf, nil, cEnc, "snappy")
if err != nil {
@ -2365,7 +2365,7 @@ func BenchmarkBuildTimeSeries(b *testing.B) {
// Send one sample per series, which is the typical remote_write case
const numSamples = 10000
filter := func(ts prompb.TimeSeries) bool { return filterTsLimit(99, ts) }
for i := 0; i < b.N; i++ {
for b.Loop() {
samples := createProtoTimeseriesWithOld(numSamples, 100, extraLabels...)
_, _, result, _, _, _ := buildTimeSeries(samples, filter)
require.NotNil(b, result)

View File

@ -165,10 +165,9 @@ func BenchmarkStreamReadEndpoint(b *testing.B) {
data, err := proto.Marshal(req)
require.NoError(b, err)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
compressed := snappy.Encode(nil, data)
request, err := http.NewRequest(http.MethodPost, "", bytes.NewBuffer(compressed))
require.NoError(b, err)

View File

@ -934,7 +934,7 @@ func BenchmarkRemoteWriteHandler(b *testing.B) {
appendable := &mockAppendable{}
handler := NewWriteHandler(promslog.NewNopLogger(), nil, appendable, []remoteapi.WriteMessageType{tc.protoFormat}, false, false)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
b.StopTimer()
buf, err := tc.payloadFunc()
require.NoError(b, err)

View File

@ -83,7 +83,7 @@ func BenchmarkOpenBlock(b *testing.B) {
tmpdir := b.TempDir()
blockDir := createBlock(b, tmpdir, genSeries(1e6, 20, 0, 10))
b.Run("benchmark", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
block, err := OpenBlock(nil, blockDir, nil, nil)
require.NoError(b, err)
require.NoError(b, block.Close())
@ -455,10 +455,9 @@ func BenchmarkLabelValuesWithMatchers(b *testing.B) {
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "c_ninety", "value0")}
b.ResetTimer()
b.ReportAllocs()
for benchIdx := 0; benchIdx < b.N; benchIdx++ {
for b.Loop() {
actualValues, err := indexReader.LabelValues(ctx, "b_tens", nil, matchers...)
require.NoError(b, err)
require.Len(b, actualValues, 9)

View File

@ -232,11 +232,10 @@ func benchmarkIterator(b *testing.B, newChunk func() Chunk) {
}
b.ReportAllocs()
b.ResetTimer()
var res float64
var it Iterator
for i := 0; i < b.N; {
for i := 0; b.Loop(); {
it := chunk.Iterator(it)
for it.Next() == ValFloat {
@ -295,9 +294,8 @@ func benchmarkAppender(b *testing.B, deltas func() (int64, float64), newChunk fu
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
c := newChunk()
a, err := c.Appender()

View File

@ -1762,7 +1762,7 @@ func BenchmarkAppendable(b *testing.B) {
hApp := app.(*HistogramAppender)
isAppendable := true
for i := 0; i < b.N; i++ {
for b.Loop() {
_, _, _, _, ok, _ := hApp.appendable(h)
isAppendable = isAppendable && ok
}

View File

@ -28,10 +28,9 @@ func BenchmarkXorRead(b *testing.B) {
}
b.ReportAllocs()
b.ResetTimer()
var it Iterator
for i := 0; i < b.N; i++ {
for b.Loop() {
var ts int64
var v float64
it = c.Iterator(it)

View File

@ -232,7 +232,7 @@ func BenchmarkChunkWriteQueue_addJob(b *testing.B) {
start.Add(1)
jobs := make(chan chunkWriteJob, b.N)
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
jobs <- chunkWriteJob{
seriesRef: HeadSeriesRef(i),
ref: ChunkDiskMapperRef(i),

View File

@ -1174,7 +1174,7 @@ func BenchmarkCompaction(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err = c.Compact(dir, blockDirs, blocks)
require.NoError(b, err)
}
@ -1204,7 +1204,7 @@ func BenchmarkCompactionFromHead(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
createBlockFromHead(b, filepath.Join(dir, fmt.Sprintf("%d-%d", i, labelNames)), h)
}
h.Close()
@ -1242,7 +1242,7 @@ func BenchmarkCompactionFromOOOHead(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
oooHead, err := NewOOOCompactionHead(context.TODO(), h)
require.NoError(b, err)
createBlockFromOOOHead(b, filepath.Join(dir, fmt.Sprintf("%d-%d", i, labelNames)), oooHead)

View File

@ -419,7 +419,7 @@ func BenchmarkAddExemplar(b *testing.B) {
for _, capacity := range []int{1000, 10000, 100000} {
for _, n := range []int{10000, 100000, 1000000} {
b.Run(fmt.Sprintf("%d/%d", n, capacity), func(b *testing.B) {
for j := 0; j < b.N; j++ {
for b.Loop() {
b.StopTimer()
exs, err := NewCircularExemplarStorage(int64(capacity), eMetrics)
require.NoError(b, err)
@ -477,7 +477,7 @@ func BenchmarkResizeExemplars(b *testing.B) {
for _, tc := range testCases {
b.Run(fmt.Sprintf("%s-%d-to-%d", tc.name, tc.startSize, tc.endSize), func(b *testing.B) {
for j := 0; j < b.N; j++ {
for b.Loop() {
b.StopTimer()
exs, err := NewCircularExemplarStorage(tc.startSize, eMetrics)
require.NoError(b, err)

View File

@ -42,7 +42,7 @@ func BenchmarkHeadStripeSeriesCreate(b *testing.B) {
require.NoError(b, err)
defer h.Close()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i)), false)
}
}
@ -81,7 +81,7 @@ func BenchmarkHeadStripeSeriesCreate_PreCreationFailure(b *testing.B) {
require.NoError(b, err)
defer h.Close()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
h.getOrCreate(uint64(i), labels.FromStrings("a", strconv.Itoa(i)), false)
}
}
@ -130,7 +130,7 @@ func BenchmarkHead_WalCommit(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
b.StopTimer()
h, w := newTestHead(b, 10000, compression.None, false)
b.Cleanup(func() {

View File

@ -141,7 +141,7 @@ func BenchmarkHeadAppender_Append_Commit_ExistingSeries(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
require.NoError(b, appendSamples())
}
})
@ -448,7 +448,7 @@ func BenchmarkLoadWLs(b *testing.B) {
b.ResetTimer()
// Load the WAL.
for i := 0; i < b.N; i++ {
for b.Loop() {
opts := DefaultHeadOptions()
opts.ChunkRange = 1000
opts.ChunkDirRoot = dir
@ -485,7 +485,7 @@ func BenchmarkLoadRealWLs(b *testing.B) {
}
// Load the WAL.
for i := 0; i < b.N; i++ {
for b.Loop() {
b.StopTimer()
dir := b.TempDir()
require.NoError(b, fileutil.CopyDirs(srcDir, dir))
@ -1378,7 +1378,7 @@ func BenchmarkHead_Truncate(b *testing.B) {
h := prepare(b, churn)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for i := 0; b.Loop(); i++ {
require.NoError(b, h.Truncate(1000*int64(i)))
// Make sure the benchmark is meaningful and it's actually truncating the expected amount of series.
require.Equal(b, total-churn*i, int(h.NumSeries()))
@ -3717,10 +3717,9 @@ func BenchmarkHeadLabelValuesWithMatchers(b *testing.B) {
headIdxReader := head.indexRange(0, 200)
matchers := []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "c_ninety", "value0")}
b.ResetTimer()
b.ReportAllocs()
for benchIdx := 0; benchIdx < b.N; benchIdx++ {
for b.Loop() {
actualValues, err := headIdxReader.LabelValues(ctx, "b_tens", nil, matchers...)
require.NoError(b, err)
require.Len(b, actualValues, 9)

View File

@ -518,9 +518,8 @@ func BenchmarkReader_ShardedPostings(b *testing.B) {
})
}
ir, _, _ := createFileReader(ctx, b, input)
b.ResetTimer()
for n := 0; n < b.N; n++ {
for n := 0; b.Loop(); n++ {
allPostings, err := ir.Postings(ctx, "const", fmt.Sprintf("%10d", 1))
require.NoError(b, err)

View File

@ -115,7 +115,7 @@ func BenchmarkMemPostings_ensureOrder(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
for b.Loop() {
p.EnsureOrder(0)
p.ordered = false
}
@ -307,7 +307,7 @@ func BenchmarkIntersect(t *testing.B) {
bench.ResetTimer()
bench.ReportAllocs()
for i := 0; i < bench.N; i++ {
for bench.Loop() {
i1 := newListPostings(a...)
i2 := newListPostings(b...)
i3 := newListPostings(c...)
@ -336,7 +336,7 @@ func BenchmarkIntersect(t *testing.B) {
bench.ResetTimer()
bench.ReportAllocs()
for i := 0; i < bench.N; i++ {
for bench.Loop() {
i1 := newListPostings(a...)
i2 := newListPostings(b...)
i3 := newListPostings(c...)
@ -365,7 +365,7 @@ func BenchmarkIntersect(t *testing.B) {
its := make([]Postings, len(refs))
bench.ResetTimer()
bench.ReportAllocs()
for i := 0; i < bench.N; i++ {
for bench.Loop() {
// Reset the ListPostings to their original values each time round the loop.
for j := range refs {
lps[j].list = refs[j]
@ -396,7 +396,7 @@ func BenchmarkMerge(t *testing.B) {
for _, nSeries := range []int{1, 10, 10000, 100000} {
t.Run(strconv.Itoa(nSeries), func(bench *testing.B) {
ctx := context.Background()
for i := 0; i < bench.N; i++ {
for bench.Loop() {
// Reset the ListPostings to their original values each time round the loop.
for j := range refs[:nSeries] {
lps[j].list = refs[j]
@ -936,8 +936,8 @@ func BenchmarkPostings_Stats(b *testing.B) {
createPostingsLabelValues(fmt.Sprintf("area-%d", i), "new_area_of_work-", 1e3)
createPostingsLabelValues(fmt.Sprintf("request_id-%d", i), "owner_name_work-", 1e3)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
for b.Loop() {
p.Stats("__name__", 10, labels.SizeOfLabels)
}
}
@ -1083,7 +1083,7 @@ func BenchmarkMemPostings_Delete(b *testing.B) {
})
b.ResetTimer()
for n := 0; n < b.N; n++ {
for n := 0; b.Loop(); n++ {
deleted := make(map[storage.SeriesRef]struct{}, refs)
affected := make(map[labels.Label]struct{}, refs)
for i := range refs {
@ -1396,7 +1396,7 @@ func BenchmarkListPostings(b *testing.B) {
for _, count := range []int{100, 1e3, 10e3, 100e3, maxCount} {
b.Run(fmt.Sprintf("count=%d", count), func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
p := NewListPostings(input[:count])
var sum storage.SeriesRef
for p.Next() {
@ -1446,7 +1446,7 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) {
require.NoError(b, err)
b.Logf("Fast matcher matches %d series", len(fp))
b.Run("matcher=fast", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
mp.PostingsForLabelMatching(context.Background(), "label", fast.MatchString).Next()
}
})
@ -1455,13 +1455,13 @@ func BenchmarkMemPostings_PostingsForLabelMatching(b *testing.B) {
require.NoError(b, err)
b.Logf("Slow matcher matches %d series", len(sp))
b.Run("matcher=slow", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
mp.PostingsForLabelMatching(context.Background(), "label", slow.MatchString).Next()
}
})
b.Run("matcher=all", func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
// Match everything.
p := mp.PostingsForLabelMatching(context.Background(), "label", func(_ string) bool { return true })
var sum storage.SeriesRef

View File

@ -59,8 +59,8 @@ func BenchmarkPostingStatsMaxHep(b *testing.B) {
stats := &maxHeap{}
const maxCount = 9000000
const heapLength = 10
b.ResetTimer()
for n := 0; n < b.N; n++ {
for b.Loop() {
stats.init(heapLength)
for i := range maxCount {
item := Stat{

View File

@ -94,7 +94,7 @@ func BenchmarkIsolation(b *testing.B) {
defer wg.Done()
<-start
for i := 0; i < b.N; i++ {
for b.Loop() {
appendID, _ := iso.newAppendID(0)
iso.closeAppend(appendID)
@ -124,7 +124,7 @@ func BenchmarkIsolationWithState(b *testing.B) {
defer wg.Done()
<-start
for i := 0; i < b.N; i++ {
for b.Loop() {
appendID, _ := iso.newAppendID(0)
iso.closeAppend(appendID)
@ -144,7 +144,7 @@ func BenchmarkIsolationWithState(b *testing.B) {
defer wg.Done()
<-start
for i := 0; i < b.N; i++ {
for b.Loop() {
s := iso.State(math.MinInt64, math.MaxInt64)
s.Close()
}

View File

@ -179,7 +179,7 @@ func benchmarkPostingsForMatchers(b *testing.B, ir IndexReader) {
b.Run(c.name, func(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
p, err := PostingsForMatchers(ctx, ir, c.matchers...)
require.NoError(b, err)
// Iterate over the postings
@ -235,7 +235,7 @@ func benchmarkLabelValuesWithMatchers(b *testing.B, ir IndexReader) {
for _, c := range cases {
b.Run(c.name, func(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_, err := labelValuesWithMatchers(ctx, ir, c.labelName, nil, c.matchers...)
require.NoError(b, err)
}
@ -250,7 +250,7 @@ func BenchmarkMergedStringIter(b *testing.B) {
s[i] = fmt.Sprintf("symbol%v", i)
}
for i := 0; i < b.N; i++ {
for b.Loop() {
it := NewMergedStringIter(index.NewStringListIter(s), index.NewStringListIter(s))
for range 100 {
it = NewMergedStringIter(it, index.NewStringListIter(s))
@ -298,7 +298,7 @@ func benchmarkSelect(b *testing.B, queryable storage.Queryable, numSeries int, s
require.NoError(b, err)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
ss := q.Select(context.Background(), sorted, nil, matcher)
for ss.Next() {
}

View File

@ -2052,7 +2052,7 @@ func BenchmarkMergedSeriesSet(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
var sets []storage.SeriesSet
for _, s := range in {
sets = append(sets, newMockSeriesSet(s))
@ -2676,7 +2676,7 @@ func BenchmarkSetMatcher(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for n := 0; n < b.N; n++ {
for b.Loop() {
ss := sq.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "test", c.pattern))
for ss.Next() {
}
@ -3279,9 +3279,8 @@ func BenchmarkQueries(b *testing.B) {
}
func benchQuery(b *testing.B, expExpansions int, q storage.Querier, selectors labels.Selector) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
for b.Loop() {
ss := q.Select(context.Background(), false, nil, selectors...)
var actualExpansions int
var it chunkenc.Iterator
@ -3524,8 +3523,8 @@ func BenchmarkHeadChunkQuerier(b *testing.B) {
require.NoError(b, q.Close())
}(querier)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
total := 0
for ss.Next() {
@ -3569,8 +3568,8 @@ func BenchmarkHeadQuerier(b *testing.B) {
require.NoError(b, q.Close())
}(querier)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
ss := querier.Select(context.Background(), false, nil, labels.MustNewMatcher(labels.MatchRegexp, "foo", "bar.*"))
total := int64(0)
for ss.Next() {

View File

@ -722,7 +722,7 @@ func BenchmarkWAL_HistogramEncoding(b *testing.B) {
b.Run(fmt.Sprintf("type=%s/labels=%d/histograms=%d/buckets=%d", maker.name, labelCount, histograms, buckets), func(b *testing.B) {
series, samples, nhcbs := maker.make(labelCount, histograms, buckets)
enc := Encoder{}
for range b.N {
for b.Loop() {
var buf []byte
enc.Series(series, buf)
enc.Samples(samples, buf)

View File

@ -524,7 +524,7 @@ func BenchmarkWAL_LogBatched(b *testing.B) {
var recs [][]byte
b.SetBytes(2048)
for i := 0; i < b.N; i++ {
for b.Loop() {
recs = append(recs, buf[:])
if len(recs) < 1000 {
continue
@ -553,7 +553,7 @@ func BenchmarkWAL_Log(b *testing.B) {
var buf [2048]byte
b.SetBytes(2048)
for i := 0; i < b.N; i++ {
for b.Loop() {
err := w.Log(buf[:])
require.NoError(b, err)
}

View File

@ -154,7 +154,7 @@ func BenchmarkEncode(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
if buf == nil {
buf = NewSyncEncodeBuffer()
}
@ -181,7 +181,7 @@ func BenchmarkDecode(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
if buf == nil {
buf = NewSyncDecodeBuffer()
}

View File

@ -78,8 +78,8 @@ func BenchmarkNewCompressionHandler_MaliciousAcceptEncoding(b *testing.B) {
req := httptest.NewRequest(http.MethodGet, "/whatever", nil)
req.Header.Set("Accept-Encoding", strings.Repeat(",", http.DefaultMaxHeaderBytes))
b.ReportAllocs()
b.ResetTimer()
for range b.N {
for b.Loop() {
newCompressedResponseWriter(rec, req)
}
}

View File

@ -37,7 +37,7 @@ func TestDedupe(t *testing.T) {
// Trim empty lines
lines := []string{}
for _, line := range strings.Split(buf.String(), "\n") {
for line := range strings.SplitSeq(buf.String(), "\n") {
if line != "" {
lines = append(lines, line)
}
@ -49,7 +49,7 @@ func TestDedupe(t *testing.T) {
dlog.Info("test", "hello", "world")
// Trim empty lines
lines = []string{}
for _, line := range strings.Split(buf.String(), "\n") {
for line := range strings.SplitSeq(buf.String(), "\n") {
if line != "" {
lines = append(lines, line)
}

View File

@ -116,8 +116,7 @@ func BenchmarkZeropoolPool(b *testing.B) {
item := pool.Get()
pool.Put(item)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
item := pool.Get()
pool.Put(item)
}
@ -133,8 +132,7 @@ func BenchmarkSyncPoolValue(b *testing.B) {
item := pool.Get().([]byte)
pool.Put(item) //nolint:staticcheck // This allocates.
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
item := pool.Get().([]byte)
pool.Put(item) //nolint:staticcheck // This allocates.
}
@ -151,8 +149,7 @@ func BenchmarkSyncPoolNewPointer(b *testing.B) {
item := pool.Get().(*[]byte)
pool.Put(item)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
item := pool.Get().(*[]byte)
buf := *item
pool.Put(&buf)
@ -170,8 +167,7 @@ func BenchmarkSyncPoolPointer(b *testing.B) {
item := pool.Get().(*[]byte)
pool.Put(item)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
item := pool.Get().(*[]byte)
pool.Put(item)
}

View File

@ -1539,7 +1539,7 @@ type AlertingRule struct {
type RecordingRule struct {
Name string `json:"name"`
Query string `json:"query"`
Labels labels.Labels `json:"labels,omitempty"`
Labels labels.Labels `json:"labels"`
Health rules.RuleHealth `json:"health"`
LastError string `json:"lastError,omitempty"`
EvaluationTime float64 `json:"evaluationTime"`

View File

@ -4567,7 +4567,7 @@ func BenchmarkRespond(b *testing.B) {
b.ResetTimer()
api := API{}
api.InstallCodec(JSONCodec{})
for n := 0; n < b.N; n++ {
for b.Loop() {
api.respond(&testResponseWriter, request, c.response, nil, "")
}
})

View File

@ -110,8 +110,7 @@ func TestReadyAndHealthy(t *testing.T) {
panic(fmt.Sprintf("Unable to start web listeners: %s", err))
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
go func() {
err := webHandler.Run(ctx, l, "")
if err != nil {
@ -228,8 +227,7 @@ func TestRoutePrefix(t *testing.T) {
if err != nil {
panic(fmt.Sprintf("Unable to start web listeners: %s", err))
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
go func() {
err := webHandler.Run(ctx, l, "")
if err != nil {
@ -549,8 +547,7 @@ func TestAgentAPIEndPoints(t *testing.T) {
panic(fmt.Sprintf("Unable to start web listeners: %s", err))
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
go func() {
err := webHandler.Run(ctx, l, "")
if err != nil {
@ -683,8 +680,7 @@ func TestMultipleListenAddresses(t *testing.T) {
panic(fmt.Sprintf("Unable to start web listener: %s", err))
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ctx := t.Context()
go func() {
err := webHandler.Run(ctx, l, "")
if err != nil {