From 2b5579f6da2b24bfcc7fefb4ba06d4a4aba2eb05 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 17 Apr 2021 18:06:57 +0200 Subject: [PATCH] MINOR: pools: always use atomic ops to maintain counters A part of the code cannot be factored out because it still uses non-atomic inc/dec for pool->used and pool->allocated as these are located under the pool's lock. While it can make sense in terms of bus cycles, it does not make sense in terms of code normalization. Further, some operations were still performed under a lock that could be totally removed via the use of atomic ops. There is still one occurrence in pool_put_to_shared_cache() in the locked code where pool_free_area() is called under the lock, which must absolutely be fixed. --- include/haproxy/pool.h | 16 +++++++--------- src/pool.c | 8 +++----- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index 19e596552..a44062556 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -217,11 +217,11 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool) void *p; HA_SPIN_LOCK(POOL_LOCK, &pool->lock); - if ((p = pool->free_list) != NULL) { + if ((p = pool->free_list) != NULL) pool->free_list = *POOL_LINK(pool, p); - pool->used++; - } HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock); + if (p) + _HA_ATOMIC_INC(&pool->used); #ifdef DEBUG_MEMORY_POOLS if (p) { @@ -238,11 +238,11 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool) static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr) { #ifndef DEBUG_UAF /* normal pool behaviour */ + _HA_ATOMIC_DEC(&pool->used); HA_SPIN_LOCK(POOL_LOCK, &pool->lock); - pool->used--; if (pool_is_crowded(pool)) { pool_free_area(ptr, pool->size + POOL_EXTRA); - pool->allocated--; + _HA_ATOMIC_DEC(&pool->allocated); } else { *POOL_LINK(pool, ptr) = (void *)pool->free_list; pool->free_list = (void *)ptr; @@ -253,11 +253,9 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr) /* ensure we crash on double free or free of a const area*/ *(uint32_t *)ptr = 0xDEADADD4; pool_free_area(ptr, pool->size + POOL_EXTRA); - HA_SPIN_LOCK(POOL_LOCK, &pool->lock); - pool->allocated--; - pool->used--; + _HA_ATOMIC_DEC(&pool->allocated); + _HA_ATOMIC_DEC(&pool->used); swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used); - HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock); #endif /* DEBUG_UAF */ } diff --git a/src/pool.c b/src/pool.c index f309b8487..f98092225 100644 --- a/src/pool.c +++ b/src/pool.c @@ -215,7 +215,6 @@ void pool_flush(struct pool_head *pool) { struct pool_free_list cmp, new; void **next, *temp; - int removed = 0; if (!pool) return; @@ -232,11 +231,10 @@ void pool_flush(struct pool_head *pool) while (next) { temp = next; next = *POOL_LINK(pool, temp); - removed++; pool_free_area(temp, pool->size + POOL_EXTRA); + _HA_ATOMIC_DEC(&pool->allocated); } pool->free_list = next; - _HA_ATOMIC_SUB(&pool->allocated, removed); /* here, we should have pool->allocate == pool->used */ } @@ -300,9 +298,9 @@ void pool_flush(struct pool_head *pool) break; } pool->free_list = *POOL_LINK(pool, temp); - pool->allocated--; HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock); pool_free_area(temp, pool->size + POOL_EXTRA); + _HA_ATOMIC_DEC(&pool->allocated); } /* here, we should have pool->allocated == pool->used */ } @@ -327,8 +325,8 @@ void pool_gc(struct pool_head *pool_ctx) (int)(entry->allocated - entry->used) > (int)entry->minavail) { temp = entry->free_list; entry->free_list = *POOL_LINK(entry, temp); - entry->allocated--; pool_free_area(temp, entry->size + POOL_EXTRA); + _HA_ATOMIC_DEC(&entry->allocated); } }