mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-02-05 01:11:51 +01:00
CLEANUP: pools: use the regular lock for the flush operation on lockless pools
Commit 04f5fe87d3d introduced an rwlock in the pools to deal with the risk that pool_flush() dereferences an area being freed, and commit 899fb8abdcd turned it into a spinlock. The pools already contain a spinlock in case of locked pools, so let's use the same and simplify the code by removing ifdefs. At this point I'm really suspecting that if pool_flush() would instead rely on __pool_get_first() to pick entries from the pool, the concurrency problem could never happen since only one user would get a given entry at once, thus it could not be freed by another user. It's not certain this would be faster however because of the number of atomic ops to retrieve one entry compared to a locked batch.
This commit is contained in:
parent
2f44a59c7f
commit
21072b9480
@ -81,14 +81,15 @@ struct pool_free_list {
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Note below, in case of lockless pools, we still need the lock only for
|
||||
* the flush() operation.
|
||||
*/
|
||||
struct pool_head {
|
||||
void **free_list;
|
||||
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||
uintptr_t seq;
|
||||
HA_SPINLOCK_T flush_lock;
|
||||
#else
|
||||
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
||||
#endif
|
||||
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
||||
unsigned int used; /* how many chunks are currently in use */
|
||||
unsigned int needed_avg;/* floating indicator between used and allocated */
|
||||
unsigned int allocated; /* how many chunks have been allocated */
|
||||
|
||||
@ -139,11 +139,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||
for (thr = 0; thr < MAX_THREADS; thr++)
|
||||
pool_cache[thr][idx].size = size;
|
||||
}
|
||||
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||
HA_SPIN_INIT(&pool->lock);
|
||||
#else
|
||||
HA_SPIN_INIT(&pool->flush_lock);
|
||||
#endif
|
||||
}
|
||||
pool->users++;
|
||||
return pool;
|
||||
@ -227,7 +223,7 @@ void pool_flush(struct pool_head *pool)
|
||||
|
||||
if (!pool)
|
||||
return;
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
|
||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||
do {
|
||||
cmp.free_list = pool->free_list;
|
||||
cmp.seq = pool->seq;
|
||||
@ -235,7 +231,7 @@ void pool_flush(struct pool_head *pool)
|
||||
new.seq = cmp.seq + 1;
|
||||
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
|
||||
__ha_barrier_atomic_store();
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
|
||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||
next = cmp.free_list;
|
||||
while (next) {
|
||||
temp = next;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user