MINOR: pools: compute an estimate of each pool's average needed objects

This adds a sliding estimate of the pools' usage. The goal is to be able
to use this to start to more aggressively free memory instead of keeping
lots of unused objects in pools. The average is calculated as a sliding
average over the last 1024 consecutive measures of ->used during calls to
pool_free(), and is bumped up for 1/4 of its history from ->allocated when
allocation from the pool fails and results in a call to malloc().

The result is a floating value between ->used and ->allocated, that tries
to react fast to under-estimates that result in expensive malloc() but
still maintains itself well in case of stable usage, and progressively
goes down if usage shrinks over time.

This new metric is reported as "needed_avg" in "show pools".

Sadly due to yet another include dependency hell, we couldn't reuse the
functions from freq_ctr.h so they were temporarily duplicated into memory.h.
This commit is contained in:
Willy Tarreau 2020-05-08 08:31:56 +02:00
parent 68ad53cb37
commit a1e4f8c27c
2 changed files with 48 additions and 2 deletions

View File

@ -90,6 +90,7 @@ struct pool_head {
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
#endif
unsigned int used; /* how many chunks are currently in use */
unsigned int needed_avg;/* floating indicator between used and allocated */
unsigned int allocated; /* how many chunks have been allocated */
unsigned int limit; /* hard limit on the number of chunks */
unsigned int minavail; /* how many chunks are expected to be used */
@ -181,6 +182,44 @@ static inline ssize_t pool_get_index(const struct pool_head *pool)
return idx;
}
/* The two functions below were copied from freq_ctr.h's swrate_add, impossible
* to use here due to include dependency hell again!
*/
#define POOL_AVG_SAMPLES 1024
static inline unsigned int pool_avg_add(unsigned int *sum, unsigned int v)
{
unsigned int new_sum, old_sum;
unsigned int n = POOL_AVG_SAMPLES;
old_sum = *sum;
do {
new_sum = old_sum - (old_sum + n - 1) / n + v;
} while (!_HA_ATOMIC_CAS(sum, &old_sum, new_sum));
return new_sum;
}
/* make the new value <v> count for 1/4 of the total sum */
static inline unsigned int pool_avg_bump(unsigned int *sum, unsigned int v)
{
unsigned int new_sum, old_sum;
unsigned int n = POOL_AVG_SAMPLES;
old_sum = *sum;
do {
new_sum = old_sum - (old_sum + 3) / 4;
new_sum += (n * v + 3) / 4;
} while (!_HA_ATOMIC_CAS(sum, &old_sum, new_sum));
return new_sum;
}
static inline unsigned int pool_avg(unsigned int sum)
{
unsigned int n = POOL_AVG_SAMPLES;
return (sum + n - 1) / n;
}
#ifdef CONFIG_HAP_LOCKLESS_POOLS
/* Tries to retrieve an object from the local pool cache corresponding to pool
@ -300,6 +339,7 @@ static inline void __pool_free(struct pool_head *pool, void *ptr)
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
__ha_barrier_atomic_store();
_HA_ATOMIC_SUB(&pool->used, 1);
pool_avg_add(&pool->needed_avg, pool->used);
}
/* frees an object to the local cache, possibly pushing oldest objects to the
@ -509,6 +549,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
pool->used--;
pool_avg_add(&pool->needed_avg, pool->used);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#else /* release the entry for real to detect use after free */
/* ensure we crash on double free or free of a const area*/
@ -517,6 +558,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
pool->allocated--;
pool->used--;
pool_avg_add(&pool->needed_avg, pool->used);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#endif /* DEBUG_UAF */
}

View File

@ -176,6 +176,8 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
return NULL;
}
pool_avg_bump(&pool->needed_avg, pool->allocated);
ptr = malloc(size + POOL_EXTRA);
if (!ptr) {
_HA_ATOMIC_ADD(&pool->failed, 1);
@ -342,6 +344,7 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
return NULL;
}
pool_avg_bump(&pool->needed_avg, pool->allocated);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
#ifdef DEBUG_MEMORY_POOLS
@ -486,9 +489,10 @@ void dump_pools_to_trash()
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
#endif
chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, %u failures, %u users, @%p=%02d%s\n",
chunk_appendf(&trash, " - Pool %s (%u bytes) : %u allocated (%u bytes), %u used, needed_avg %u, %u failures, %u users, @%p=%02d%s\n",
entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used, entry->failed,
entry->size * entry->allocated, entry->used,
pool_avg(entry->needed_avg), entry->failed,
entry->users, entry, (int)pool_get_index(entry),
(entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");