mirror of
https://github.com/prometheus/prometheus.git
synced 2025-08-06 22:27:17 +02:00
[BUGFIX] Scraping: bump cache iteration after error (#16174)
We use the cache iteration number to detect when the same metric has occurred twice in a scrape. We need to bump this number at the end of every scrape, not just successful scrapes. The `iter` number is also used: * After a successful scrape, to delete older metrics and metadata. * To detect when metadata changed in this scrape. None of those additional cases is broken by incrementing the number on error. Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
parent
da0412a519
commit
e32d89af7f
@ -1034,8 +1034,6 @@ func (c *scrapeCache) iterDone(flushCache bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.metaMtx.Unlock()
|
c.metaMtx.Unlock()
|
||||||
|
|
||||||
c.iter++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Swap current and previous series.
|
// Swap current and previous series.
|
||||||
@ -1045,6 +1043,8 @@ func (c *scrapeCache) iterDone(flushCache bool) {
|
|||||||
for k := range c.seriesCur {
|
for k := range c.seriesCur {
|
||||||
delete(c.seriesCur, k)
|
delete(c.seriesCur, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.iter++
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) {
|
func (c *scrapeCache) get(met []byte) (*cacheEntry, bool, bool) {
|
||||||
|
@ -5153,3 +5153,40 @@ func TestScrapePoolScrapeAfterReload(t *testing.T) {
|
|||||||
|
|
||||||
<-time.After(1 * time.Second)
|
<-time.After(1 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Regression test against https://github.com/prometheus/prometheus/issues/16160.
|
||||||
|
// The first scrape fails with a parsing error, but the second should
|
||||||
|
// succeed and cause `metric_1=11` to appear in the appender.
|
||||||
|
func TestScrapeAppendWithParseError(t *testing.T) {
|
||||||
|
const (
|
||||||
|
scrape1 = `metric_a 1
|
||||||
|
`
|
||||||
|
scrape2 = `metric_a 11
|
||||||
|
# EOF`
|
||||||
|
)
|
||||||
|
|
||||||
|
sl := newBasicScrapeLoop(t, context.Background(), nil, nil, 0)
|
||||||
|
sl.cache = newScrapeCache(sl.metrics)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
capp := &collectResultAppender{next: nopAppender{}}
|
||||||
|
_, _, _, err := sl.append(capp, []byte(scrape1), "application/openmetrics-text", now)
|
||||||
|
require.Error(t, err)
|
||||||
|
_, _, _, err = sl.append(capp, nil, "application/openmetrics-text", now)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Empty(t, capp.resultFloats)
|
||||||
|
|
||||||
|
capp = &collectResultAppender{next: nopAppender{}}
|
||||||
|
_, _, _, err = sl.append(capp, []byte(scrape2), "application/openmetrics-text", now.Add(15*time.Second))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, capp.Commit())
|
||||||
|
|
||||||
|
want := []floatSample{
|
||||||
|
{
|
||||||
|
metric: labels.FromStrings(model.MetricNameLabel, "metric_a"),
|
||||||
|
t: timestamp.FromTime(now.Add(15 * time.Second)),
|
||||||
|
f: 11,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
requireEqual(t, want, capp.resultFloats, "Appended samples not as expected:\n%s", capp)
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user