MINOR: pools: move the failed allocation counter over a few buckets

The failed allocation counter cannot depend on a pointer, but since it's
a perpetually increasing counter and not a gauge, we don't care where
it's incremented. Thus instead we're hashing on the TID. There's no
contention there anyway, but it's better not to waste the room in
the pool's heads and to move that with the other counters.
This commit is contained in:
Willy Tarreau 2023-07-24 16:38:09 +02:00
parent da6999f839
commit 8a0b5f783b
3 changed files with 15 additions and 4 deletions

View File

@ -122,7 +122,6 @@ struct pool_head {
THREAD_ALIGN(64);
struct pool_item *free_list; /* list of free shared objects */
unsigned int failed; /* failed allocations */
/* these entries depend on the pointer value, they're used to reduce
* the contention on fast-changing values. The alignment here is
@ -133,6 +132,7 @@ struct pool_head {
unsigned int allocated; /* how many chunks have been allocated */
unsigned int used; /* how many chunks are currently in use */
unsigned int needed_avg;/* floating indicator between used and allocated */
unsigned int failed; /* failed allocations (indexed by hash of TID) */
} buckets[CONFIG_HAP_POOL_BUCKETS];
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */

View File

@ -171,6 +171,17 @@ static inline uint pool_needed_avg(const struct pool_head *pool)
return ret;
}
/* returns the total number of failed allocations for a pool across all buckets */
static inline uint pool_failed(const struct pool_head *pool)
{
int bucket;
uint ret;
for (bucket = ret = 0; bucket < CONFIG_HAP_POOL_BUCKETS; bucket++)
ret += HA_ATOMIC_LOAD(&pool->buckets[bucket].failed);
return ret;
}
/* Returns the max number of entries that may be brought back to the pool
* before it's considered as full. Note that it is only usable for releasing
* objects, hence the function assumes that no more than ->used entries will

View File

@ -395,7 +395,7 @@ void *pool_get_from_os_noinc(struct pool_head *pool)
ptr = pool_alloc_area(pool->alloc_sz);
if (ptr)
return ptr;
_HA_ATOMIC_INC(&pool->failed);
_HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
}
activity[tid].pool_fail++;
return NULL;
@ -985,7 +985,7 @@ void dump_pools_to_trash(int by_what, int max, const char *pfx)
pool_info[nbpools].used_items = pool_used(entry);
pool_info[nbpools].cached_items = cached;
pool_info[nbpools].need_avg = swrate_avg(pool_needed_avg(entry), POOL_AVG_SAMPLES);
pool_info[nbpools].failed_items = entry->failed;
pool_info[nbpools].failed_items = pool_failed(entry);
nbpools++;
}
@ -1040,7 +1040,7 @@ int pool_total_failures()
int failed = 0;
list_for_each_entry(entry, &pools, list)
failed += entry->failed;
failed += pool_failed(entry);
return failed;
}