BUG/MINOR: pools: Fix the dump of pools info to deal with buffers limitations

The "show pools" CLI command was not designed to dump information exceeding
the size of a buffer. But there is now much more pools than few years ago
and when detailed information are dumped, we exceeds the buffer limit and
the output is truncated.

To fix the issue, the command must be refactored to be able to stream the
result. To do so, the array containing pools info is now part of the command
context and it is dynamically allocated. A dedicated function was created to
fill all info. In addition, the index of the next pool to dump is saved in
the command context too to properly handle resumption cases. Finally global
information about pools are also stored in the command context for
convenience.

This patch should fix the issue #3067. It must be backported to 3.2. On
older release, the buffer limit is never reached.
This commit is contained in:
Christopher Faulet 2025-09-08 15:19:04 +02:00
parent d75718af14
commit 982805e6a3

View File

@ -105,6 +105,12 @@ struct show_pools_ctx {
char *prefix; /* if non-null, match this prefix name for the pool */ char *prefix; /* if non-null, match this prefix name for the pool */
int how; /* bits 0..3: 0=no sort, 1=by name, 2=by item size, 3=by total alloc */ int how; /* bits 0..3: 0=no sort, 1=by name, 2=by item size, 3=by total alloc */
int maxcnt; /* 0=no limit, other=max number of output entries */ int maxcnt; /* 0=no limit, other=max number of output entries */
int pool_idx; /* Index of the pool to restart the dump (-1 means the header must be dumped) */
unsigned int nbpools; /* total number of pools */
unsigned long long allocated; /* total number of bytes allocated*/
unsigned long long used; /* total number of bytes used */
unsigned long long cached_bytes; /* total number of bytes cached */
struct pool_dump_info *pool_info; /* array containing info about all pools (contains nbpools entries) */
}; };
static int mem_fail_rate __read_mostly = 0; static int mem_fail_rate __read_mostly = 0;
@ -1274,33 +1280,31 @@ static int cmp_dump_pools_usage(const void *a, const void *b)
return 0; return 0;
} }
/* will not dump more than this number of entries. Anything beyond this will /* This function fills the show_pools_ctx context by looping on pools. It will
* likely not fit into a regular output buffer anyway. * count the number of pools and allocate the array to store info about each
* pool. It returns 0 on success and -1 on error.
*
* It may sort pools by a criterion if bits 0..3 of <ctx->how> are non-zero, and
* limit the number of output lines if <ctx->maxcnt> is non-zero. It may limit
* only to pools whose names start with <ctx->prefix> if not null.
*/ */
#define POOLS_MAX_DUMPED_ENTRIES 1024 static int get_show_pools_info(struct show_pools_ctx *ctx)
/* This function dumps memory usage information into the trash buffer.
* It may sort by a criterion if bits 0..3 of <how> are non-zero, and
* limit the number of output lines if <max> is non-zero. It may limit
* only to pools whose names start with <pfx> if <pfx> is non-null.
*/
void dump_pools_to_trash(int how, int max, const char *pfx)
{ {
struct pool_dump_info pool_info[POOLS_MAX_DUMPED_ENTRIES];
struct pool_head *entry; struct pool_head *entry;
unsigned long long allocated, used; int by_what = (ctx->how & 0xF); // bits 0..3 = sorting criterion
int nbpools, i; int n = 0;
unsigned long long cached_bytes = 0;
uint cached = 0;
uint alloc_items;
int by_what = how & 0xF; // bits 0..3 = sorting criterion
int detailed = !!(how & 0x10); // print details
allocated = used = nbpools = 0; ctx->nbpools = 0;
list_for_each_entry(entry, &pools, list)
ctx->nbpools++;
ctx->pool_info = calloc(ctx->nbpools, sizeof(*ctx->pool_info));
if (ctx->pool_info == NULL)
return -1;
list_for_each_entry(entry, &pools, list) { list_for_each_entry(entry, &pools, list) {
if (nbpools >= POOLS_MAX_DUMPED_ENTRIES) uint cached = 0;
break; uint alloc_items;
alloc_items = pool_allocated(entry); alloc_items = pool_allocated(entry);
/* do not dump unused entries when sorting by usage */ /* do not dump unused entries when sorting by usage */
@ -1308,75 +1312,117 @@ void dump_pools_to_trash(int how, int max, const char *pfx)
continue; continue;
/* verify the pool name if a prefix is requested */ /* verify the pool name if a prefix is requested */
if (pfx && strncmp(entry->name, pfx, strlen(pfx)) != 0) if (ctx->prefix && strncmp(entry->name, ctx->prefix, strlen(ctx->prefix)) != 0)
continue; continue;
if (!(pool_debugging & POOL_DBG_NO_CACHE)) { if (!(pool_debugging & POOL_DBG_NO_CACHE)) {
for (cached = i = 0; i < global.nbthread; i++) int i;
for (i = 0; i < global.nbthread; i++)
cached += entry->cache[i].count; cached += entry->cache[i].count;
} }
pool_info[nbpools].entry = entry; ctx->pool_info[n].entry = entry;
pool_info[nbpools].alloc_items = alloc_items; ctx->pool_info[n].alloc_items = alloc_items;
pool_info[nbpools].alloc_bytes = (ulong)entry->size * alloc_items; ctx->pool_info[n].alloc_bytes = (ulong)entry->size * alloc_items;
pool_info[nbpools].used_items = pool_used(entry); ctx->pool_info[n].used_items = pool_used(entry);
pool_info[nbpools].cached_items = cached; ctx->pool_info[n].cached_items = cached;
pool_info[nbpools].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES); ctx->pool_info[n].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES);
pool_info[nbpools].failed_items = pool_failed(entry); ctx->pool_info[n].failed_items = pool_failed(entry);
nbpools++;
ctx->cached_bytes += ctx->pool_info[n].cached_items * (ulong)ctx->pool_info[n].entry->size;
ctx->allocated += ctx->pool_info[n].alloc_items * (ulong)ctx->pool_info[n].entry->size;
ctx->used += ctx->pool_info[n].used_items * (ulong)ctx->pool_info[n].entry->size;
n++;
} }
if (by_what == 1) /* sort by name */ if (by_what == 1) /* sort by name */
qsort(pool_info, nbpools, sizeof(pool_info[0]), cmp_dump_pools_name); qsort(ctx->pool_info, ctx->nbpools, sizeof(*ctx->pool_info), cmp_dump_pools_name);
else if (by_what == 2) /* sort by item size */ else if (by_what == 2) /* sort by item size */
qsort(pool_info, nbpools, sizeof(pool_info[0]), cmp_dump_pools_size); qsort(ctx->pool_info, ctx->nbpools, sizeof(*ctx->pool_info), cmp_dump_pools_size);
else if (by_what == 3) /* sort by total usage */ else if (by_what == 3) /* sort by total usage */
qsort(pool_info, nbpools, sizeof(pool_info[0]), cmp_dump_pools_usage); qsort(ctx->pool_info, ctx->nbpools, sizeof(*ctx->pool_info), cmp_dump_pools_usage);
if (!ctx->maxcnt || ctx->maxcnt > ctx->nbpools)
ctx->maxcnt = ctx->nbpools;
ctx->pool_idx = -1;
return 0;
}
/* This function dumps information about pools found in the context <ctx>. If
* <appctx> is NULL, it dumps it on stderr. Otherwise the applet is used.
*/
int dump_pools_info(struct appctx *appctx, struct show_pools_ctx *ctx)
{
int i;
if (ctx->pool_idx == -1) {
chunk_printf(&trash, "Dumping pools usage"); chunk_printf(&trash, "Dumping pools usage");
if (!max || max >= POOLS_MAX_DUMPED_ENTRIES) if (ctx->nbpools >= ctx->maxcnt)
max = POOLS_MAX_DUMPED_ENTRIES; chunk_appendf(&trash, " (limited to the first %u entries)", ctx->maxcnt);
if (nbpools >= max)
chunk_appendf(&trash, " (limited to the first %u entries)", max);
chunk_appendf(&trash, ". Use SIGQUIT to flush them.\n"); chunk_appendf(&trash, ". Use SIGQUIT to flush them.\n");
for (i = 0; i < nbpools && i < max; i++) { if (!appctx)
chunk_appendf(&trash, " - Pool %s (%u bytes/%u) : %lu allocated (%lu bytes), %lu used" qfprintf(stderr, "%s", trash.area);
else if (applet_putchk(appctx, &trash) == -1)
return 0;
ctx->pool_idx = 0;
}
for (i = ctx->pool_idx; i < ctx->nbpools && i < ctx->maxcnt; i++) {
chunk_printf(&trash, " - Pool %s (%u bytes/%u) : %lu allocated (%lu bytes), %lu used"
" (~%lu by thread caches)" " (~%lu by thread caches)"
", needed_avg %lu, %lu failures, %u users, @%p%s\n", ", needed_avg %lu, %lu failures, %u users, @%p%s\n",
pool_info[i].entry->name, pool_info[i].entry->size, pool_info[i].entry->align, ctx->pool_info[i].entry->name, ctx->pool_info[i].entry->size, ctx->pool_info[i].entry->align,
pool_info[i].alloc_items, pool_info[i].alloc_bytes, ctx->pool_info[i].alloc_items, ctx->pool_info[i].alloc_bytes,
pool_info[i].used_items, pool_info[i].cached_items, ctx->pool_info[i].used_items, ctx->pool_info[i].cached_items,
pool_info[i].need_avg, pool_info[i].failed_items, ctx->pool_info[i].need_avg, ctx->pool_info[i].failed_items,
pool_info[i].entry->users, pool_info[i].entry, ctx->pool_info[i].entry->users, ctx->pool_info[i].entry,
(pool_info[i].entry->flags & MEM_F_SHARED) ? " [SHARED]" : ""); (ctx->pool_info[i].entry->flags & MEM_F_SHARED) ? " [SHARED]" : "");
cached_bytes += pool_info[i].cached_items * (ulong)pool_info[i].entry->size; if (ctx->how & 0x10) { // print details
allocated += pool_info[i].alloc_items * (ulong)pool_info[i].entry->size;
used += pool_info[i].used_items * (ulong)pool_info[i].entry->size;
if (detailed) {
struct pool_registration *reg; struct pool_registration *reg;
list_for_each_entry(reg, &pool_info[i].entry->regs, list) {
list_for_each_entry(reg, &ctx->pool_info[i].entry->regs, list) {
chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u", reg->name, reg->size, reg->flags, reg->align); chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u", reg->name, reg->size, reg->flags, reg->align);
if (reg->file && reg->line) if (reg->file && reg->line)
chunk_appendf(&trash, " [%s:%u]", reg->file, reg->line); chunk_appendf(&trash, " [%s:%u]", reg->file, reg->line);
chunk_appendf(&trash, "\n"); chunk_appendf(&trash, "\n");
} }
} }
if (!appctx)
qfprintf(stderr, "%s", trash.area);
else if (applet_putchk(appctx, &trash) == -1) {
ctx->pool_idx = i;
return 0;
}
} }
chunk_appendf(&trash, "Total: %d pools, %llu bytes allocated, %llu used" chunk_printf(&trash, "Total: %d pools, %llu bytes allocated, %llu used"
" (~%llu by thread caches)" " (~%llu by thread caches)"
".\n", ".\n",
nbpools, allocated, used, cached_bytes ctx->nbpools, ctx->allocated, ctx->used, ctx->cached_bytes
); );
if (!appctx)
qfprintf(stderr, "%s", trash.area);
else if (applet_putchk(appctx, &trash) == -1)
return 0;
return 1;
} }
/* Dump statistics on pools usage. */ /* Dump statistics on pools usage. */
void dump_pools(void) void dump_pools(void)
{ {
dump_pools_to_trash(0, 0, NULL); struct show_pools_ctx ctx;
qfprintf(stderr, "%s", trash.area);
memset(&ctx, 0, sizeof(ctx));
if (get_show_pools_info(&ctx) == -1) {
qfprintf(stderr, "Failed to get info about pools.\n");
}
dump_pools_info(NULL, &ctx);
} }
/* This function returns the total number of failed pool allocations */ /* This function returns the total number of failed pool allocations */
@ -1546,6 +1592,9 @@ static int cli_parse_show_pools(char **args, char *payload, struct appctx *appct
else else
return cli_err(appctx, "Expects either 'byname', 'bysize', 'byusage', 'match <pfx>', 'detailed', or a max number of output lines.\n"); return cli_err(appctx, "Expects either 'byname', 'bysize', 'byusage', 'match <pfx>', 'detailed', or a max number of output lines.\n");
} }
if (get_show_pools_info(ctx) == -1)
return cli_err(appctx, "Failed to get info about pools.\n");
return 0; return 0;
} }
@ -1554,21 +1603,19 @@ static void cli_release_show_pools(struct appctx *appctx)
{ {
struct show_pools_ctx *ctx = appctx->svcctx; struct show_pools_ctx *ctx = appctx->svcctx;
ha_free(&ctx->pool_info);
ha_free(&ctx->prefix); ha_free(&ctx->prefix);
} }
/* This function dumps memory usage information onto the stream connector's /* This function dumps memory usage information onto the stream connector's
* read buffer. It returns 0 as long as it does not complete, non-zero upon * read buffer. It returns 0 as long as it does not complete, non-zero upon
* completion. No state is used. * completion.
*/ */
static int cli_io_handler_dump_pools(struct appctx *appctx) static int cli_io_handler_dump_pools(struct appctx *appctx)
{ {
struct show_pools_ctx *ctx = appctx->svcctx; struct show_pools_ctx *ctx = appctx->svcctx;
dump_pools_to_trash(ctx->how, ctx->maxcnt, ctx->prefix); return dump_pools_info(appctx, ctx);
if (applet_putchk(appctx, &trash) == -1)
return 0;
return 1;
} }
/* callback used to create early pool <name> of size <size> and store the /* callback used to create early pool <name> of size <size> and store the