From facfad2b64518e2e43ed20ae0bccce4e77503f1b Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Wed, 17 Aug 2022 09:12:53 +0200 Subject: [PATCH] MINOR: pool/memprof: report pool alloc/free in memory profiling Pools are being used so well that it becomes difficult to profile their usage via the regular memory profiling. Let's add new entries for pools there, named "p_alloc" and "p_free" that correspond to pool_alloc() and pool_free(). Ideally it would be nice to only report those that fail cache lookups but that's complicated, particularly on the free() path since free lists are released in clusters to the shared pools. It's worth noting that the alloc_tot/free_tot fields can easily be determined by multiplying alloc_calls/free_calls by the pool's size, and could be better used to store a pointer to the pool itself. However it would require significant changes down the code that sorts output. If this were to cause a measurable slowdown, an alternate approach could consist in using a different value of USE_MEMORY_PROFILING to enable pools profiling. Also, this profiler doesn't depend on intercepting regular malloc functions, so we could also imagine enabling it alone or the other one alone or both. Tests show that the CPU overhead on QUIC (which is already an extremely intensive user of pools) jumps from ~7% to ~10%. This is quite acceptable in most deployments. --- include/haproxy/activity-t.h | 2 ++ src/activity.c | 2 +- src/pool.c | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/include/haproxy/activity-t.h b/include/haproxy/activity-t.h index f21f8057c..0f5210f69 100644 --- a/include/haproxy/activity-t.h +++ b/include/haproxy/activity-t.h @@ -48,6 +48,8 @@ enum memprof_method { MEMPROF_METH_CALLOC, MEMPROF_METH_REALLOC, MEMPROF_METH_FREE, + MEMPROF_METH_P_ALLOC, // pool_alloc() + MEMPROF_METH_P_FREE, // pool_free() MEMPROF_METH_METHODS /* count, must be last */ }; diff --git a/src/activity.c b/src/activity.c index c020f24d2..ee5577102 100644 --- a/src/activity.c +++ b/src/activity.c @@ -53,7 +53,7 @@ struct sched_activity sched_activity[256] __attribute__((aligned(64))) = { }; #ifdef USE_MEMORY_PROFILING static const char *const memprof_methods[MEMPROF_METH_METHODS] = { - "unknown", "malloc", "calloc", "realloc", "free", + "unknown", "malloc", "calloc", "realloc", "free", "p_alloc", "p_free", }; /* last one is for hash collisions ("others") and has no caller address */ diff --git a/src/pool.c b/src/pool.c index 973f8a0f2..6f3e41971 100644 --- a/src/pool.c +++ b/src/pool.c @@ -727,6 +727,15 @@ void *__pool_alloc(struct pool_head *pool, unsigned int flags) p = pool_alloc_nocache(pool); if (likely(p)) { +#ifdef USE_MEMORY_PROFILING + if (unlikely(profiling & HA_PROF_MEMORY)) { + struct memprof_stats *bin; + + bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_P_ALLOC); + _HA_ATOMIC_ADD(&bin->alloc_calls, 1); + _HA_ATOMIC_ADD(&bin->alloc_tot, pool->size); + } +#endif if (unlikely(flags & POOL_F_MUST_ZERO)) memset(p, 0, pool->size); else if (unlikely(!(flags & POOL_F_NO_POISON) && (pool_debugging & POOL_DBG_POISON))) @@ -747,6 +756,16 @@ void __pool_free(struct pool_head *pool, void *ptr) POOL_DEBUG_CHECK_MARK(pool, ptr); POOL_DEBUG_RESET_MARK(pool, ptr); +#ifdef USE_MEMORY_PROFILING + if (unlikely(profiling & HA_PROF_MEMORY) && ptr) { + struct memprof_stats *bin; + + bin = memprof_get_bin(__builtin_return_address(0), MEMPROF_METH_P_FREE); + _HA_ATOMIC_ADD(&bin->free_calls, 1); + _HA_ATOMIC_ADD(&bin->free_tot, pool->size); + } +#endif + if (unlikely(pool_debugging & POOL_DBG_NO_CACHE)) { pool_free_nocache(pool, ptr); return;