parallelize ./scrape test

Signed-off-by: pipiland2612 <nguyen.t.dang.minh@gmail.com>
This commit is contained in:
pipiland2612 2025-08-06 15:31:47 +03:00 committed by Ayoub Mrini
parent 82a4b12507
commit 1607b5c8cc
2 changed files with 15 additions and 0 deletions

View File

@ -745,6 +745,7 @@ func setupTestServer(t *testing.T, typ string, toWrite []byte) *httptest.Server
// TestManagerCTZeroIngestion tests scrape manager for various CT cases.
func TestManagerCTZeroIngestion(t *testing.T) {
t.Parallel()
const (
// _total suffix is required, otherwise expfmt with OMText will mark metric as "unknown"
expectedMetricName = "expected_metric_total"
@ -917,6 +918,7 @@ func generateTestHistogram(i int) *dto.Histogram {
}
func TestManagerCTZeroIngestionHistogram(t *testing.T) {
t.Parallel()
const mName = "expected_histogram"
for _, tc := range []struct {
@ -952,6 +954,7 @@ func TestManagerCTZeroIngestionHistogram(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -1156,6 +1159,7 @@ func requireTargets(
// TestTargetDisappearsAfterProviderRemoved makes sure that when a provider is dropped, (only) its targets are dropped.
func TestTargetDisappearsAfterProviderRemoved(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -1256,6 +1260,7 @@ scrape_configs:
// TestOnlyProviderStaleTargetsAreDropped makes sure that when a job has only one provider with multiple targets
// and when the provider can no longer discover some of those targets, only those stale targets are dropped.
func TestOnlyProviderStaleTargetsAreDropped(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

View File

@ -442,6 +442,7 @@ func (*testLoop) getCache() *scrapeCache {
}
func TestScrapePoolStop(t *testing.T) {
t.Parallel()
sp := &scrapePool{
activeTargets: map[uint64]*Target{},
loops: map[uint64]loop{},
@ -501,6 +502,7 @@ func TestScrapePoolStop(t *testing.T) {
}
func TestScrapePoolReload(t *testing.T) {
t.Parallel()
var mtx sync.Mutex
numTargets := 20
@ -866,6 +868,7 @@ func TestScrapePoolAppender(t *testing.T) {
}
func TestScrapePoolRaces(t *testing.T) {
t.Parallel()
interval, _ := model.ParseDuration("1s")
timeout, _ := model.ParseDuration("500ms")
newConfig := func() *config.ScrapeConfig {
@ -998,6 +1001,7 @@ func newBasicScrapeLoopWithFallback(t testing.TB, ctx context.Context, scraper s
}
func TestScrapeLoopStopBeforeRun(t *testing.T) {
t.Parallel()
scraper := &testScraper{}
sl := newBasicScrapeLoop(t, context.Background(), scraper, nil, 1)
@ -1102,6 +1106,7 @@ func TestScrapeLoopStop(t *testing.T) {
}
func TestScrapeLoopRun(t *testing.T) {
t.Parallel()
var (
signal = make(chan struct{}, 1)
errc = make(chan error)
@ -3922,6 +3927,7 @@ func TestCheckAddError(t *testing.T) {
}
func TestScrapeReportSingleAppender(t *testing.T) {
t.Parallel()
s := teststorage.New(t)
defer s.Close()
@ -4331,6 +4337,7 @@ test_summary_count 199
// Testing whether we can automatically convert scraped classic histograms into native histograms with custom buckets.
func TestConvertClassicHistogramsToNHCB(t *testing.T) {
t.Parallel()
genTestCounterText := func(name string, value int, withMetadata bool) string {
if withMetadata {
return fmt.Sprintf(`
@ -4741,6 +4748,7 @@ metric: <
}
t.Run(fmt.Sprintf("%s with %s", name, metricsTextName), func(t *testing.T) {
t.Parallel()
simpleStorage := teststorage.New(t)
defer simpleStorage.Close()
@ -5367,6 +5375,7 @@ scrape_configs:
}
func TestTargetScrapeConfigWithLabels(t *testing.T) {
t.Parallel()
const (
configTimeout = 1500 * time.Millisecond
expectedTimeout = "1.5"
@ -5521,6 +5530,7 @@ func TestTargetScrapeConfigWithLabels(t *testing.T) {
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
t.Parallel()
select {
case <-run(t, c.cfg, c.targets):
case <-time.After(10 * time.Second):