MINOR: pools: always use atomic ops to maintain counters

A part of the code cannot be factored out because it still uses non-atomic
inc/dec for pool->used and pool->allocated as these are located under the
pool's lock. While it can make sense in terms of bus cycles, it does not
make sense in terms of code normalization. Further, some operations were
still performed under a lock that could be totally removed via the use of
atomic ops.

There is still one occurrence in pool_put_to_shared_cache() in the locked
code where pool_free_area() is called under the lock, which must absolutely
be fixed.
This commit is contained in:
Willy Tarreau 2021-04-17 18:06:57 +02:00
parent 13843641e5
commit 2b5579f6da
2 changed files with 10 additions and 14 deletions

View File

@ -217,11 +217,11 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool)
void *p;
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
if ((p = pool->free_list) != NULL) {
if ((p = pool->free_list) != NULL)
pool->free_list = *POOL_LINK(pool, p);
pool->used++;
}
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
if (p)
_HA_ATOMIC_INC(&pool->used);
#ifdef DEBUG_MEMORY_POOLS
if (p) {
@ -238,11 +238,11 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool)
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
{
#ifndef DEBUG_UAF /* normal pool behaviour */
_HA_ATOMIC_DEC(&pool->used);
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
pool->used--;
if (pool_is_crowded(pool)) {
pool_free_area(ptr, pool->size + POOL_EXTRA);
pool->allocated--;
_HA_ATOMIC_DEC(&pool->allocated);
} else {
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
@ -253,11 +253,9 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
/* ensure we crash on double free or free of a const area*/
*(uint32_t *)ptr = 0xDEADADD4;
pool_free_area(ptr, pool->size + POOL_EXTRA);
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
pool->allocated--;
pool->used--;
_HA_ATOMIC_DEC(&pool->allocated);
_HA_ATOMIC_DEC(&pool->used);
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#endif /* DEBUG_UAF */
}

View File

@ -215,7 +215,6 @@ void pool_flush(struct pool_head *pool)
{
struct pool_free_list cmp, new;
void **next, *temp;
int removed = 0;
if (!pool)
return;
@ -232,11 +231,10 @@ void pool_flush(struct pool_head *pool)
while (next) {
temp = next;
next = *POOL_LINK(pool, temp);
removed++;
pool_free_area(temp, pool->size + POOL_EXTRA);
_HA_ATOMIC_DEC(&pool->allocated);
}
pool->free_list = next;
_HA_ATOMIC_SUB(&pool->allocated, removed);
/* here, we should have pool->allocate == pool->used */
}
@ -300,9 +298,9 @@ void pool_flush(struct pool_head *pool)
break;
}
pool->free_list = *POOL_LINK(pool, temp);
pool->allocated--;
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
pool_free_area(temp, pool->size + POOL_EXTRA);
_HA_ATOMIC_DEC(&pool->allocated);
}
/* here, we should have pool->allocated == pool->used */
}
@ -327,8 +325,8 @@ void pool_gc(struct pool_head *pool_ctx)
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
temp = entry->free_list;
entry->free_list = *POOL_LINK(entry, temp);
entry->allocated--;
pool_free_area(temp, entry->size + POOL_EXTRA);
_HA_ATOMIC_DEC(&entry->allocated);
}
}