mirror of
https://github.com/prometheus/prometheus.git
synced 2025-12-05 01:21:23 +01:00
Scraping: use clear builtin function
This was added in Go 1.21, and is neater than a loop deleting all elements. Also move the comment noting why we do this, because it could be read as saying this is the only reason we have two maps. Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
parent
5915a013b7
commit
8563ed03e0
@ -981,7 +981,6 @@ type scrapeCache struct {
|
||||
droppedSeries map[string]*uint64
|
||||
|
||||
// Series that were seen in the current and previous scrape, for staleness detection.
|
||||
// We hold two maps and swap them out to save allocations.
|
||||
seriesCur map[storage.SeriesRef]*cacheEntry
|
||||
seriesPrev map[storage.SeriesRef]*cacheEntry
|
||||
|
||||
@ -1059,13 +1058,9 @@ func (c *scrapeCache) iterDone(flushCache bool) {
|
||||
c.metaMtx.Unlock()
|
||||
}
|
||||
|
||||
// Swap current and previous series.
|
||||
// Swap current and previous series then clear the new current, to save allocations.
|
||||
c.seriesPrev, c.seriesCur = c.seriesCur, c.seriesPrev
|
||||
|
||||
// We have to delete every single key in the map.
|
||||
for k := range c.seriesCur {
|
||||
delete(c.seriesCur, k)
|
||||
}
|
||||
clear(c.seriesCur)
|
||||
|
||||
c.iter++
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user