From 57c5c6db0cc67e5d1673dee108712f5e9b603409 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Thu, 30 Dec 2021 17:20:27 +0100 Subject: [PATCH] MINOR: pool: rely on pool_free_nocache() in pool_put_to_shared_cache() At the moment pool_put_to_shared_cache() checks if the pool is crowded, and if so it does the exact same job as pool_free_nocache(), otherwise it adds the object there. This patch rearranges the code so that the function is split in two and either uses one path or the other, and always relies on pool_free_nocache() in case we don't want to store the object. This way there will be a common path with the variant not using the shared cache. The patch is better viewed using git show -b since a whole block got reindented. It's worth noting that there is a tiny difference now in the local cache usage measurement, as the decrement of "used" used to be performed before checking for pool_is_crowded() instead of being done after. This used to result in always one less object being kept in the cache than what was configured in minavail. The rearrangement of the code aligns it with other call places. --- include/haproxy/pool.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index c899f7766..2a6fa9d42 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -142,22 +142,22 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr) { void **free_list; - _HA_ATOMIC_DEC(&pool->used); - if (unlikely(pool_is_crowded(pool))) { - pool_put_to_os(pool, ptr); - } else { - free_list = _HA_ATOMIC_LOAD(&pool->free_list); - do { - while (unlikely(free_list == POOL_BUSY)) { - __ha_cpu_relax(); - free_list = _HA_ATOMIC_LOAD(&pool->free_list); - } - _HA_ATOMIC_STORE((void **)ptr, (void *)free_list); - __ha_barrier_atomic_store(); - } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr)); - __ha_barrier_atomic_store(); + pool_free_nocache(pool, ptr); + return; } + + _HA_ATOMIC_DEC(&pool->used); + free_list = _HA_ATOMIC_LOAD(&pool->free_list); + do { + while (unlikely(free_list == POOL_BUSY)) { + __ha_cpu_relax(); + free_list = _HA_ATOMIC_LOAD(&pool->free_list); + } + _HA_ATOMIC_STORE((void **)ptr, (void *)free_list); + __ha_barrier_atomic_store(); + } while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr)); + __ha_barrier_atomic_store(); swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used); }