mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-08 08:07:10 +02:00
BUG/MINOR: pools/threads: don't ignore DEBUG_UAF on double-word CAS capable archs
Since commit cf975d4
("MINOR: pools/threads: Implement lockless memory
pools."), we support lockless pools. However the parts dedicated to
detecting use-after-free are not present in this part, making DEBUG_UAF
useless in this situation.
The present patch sets a new define CONFIG_HAP_LOCKLESS_POOLS when such
a compatible architecture is detected, and when pool debugging is not
requested, then makes use of this everywhere in pools and buffers
functions. This way enabling DEBUG_UAF will automatically disable the
lockless version.
No backport is needed as this is purely 1.9-dev.
This commit is contained in:
parent
5e64286bab
commit
f161d0f51e
@ -735,7 +735,7 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
|
|||||||
return *buf;
|
return *buf;
|
||||||
|
|
||||||
*buf = &buf_wanted;
|
*buf = &buf_wanted;
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
|
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -743,7 +743,7 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
|
|||||||
if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
|
if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
|
||||||
b = __pool_get_first(pool_head_buffer);
|
b = __pool_get_first(pool_head_buffer);
|
||||||
if (likely(b)) {
|
if (likely(b)) {
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||||
#endif
|
#endif
|
||||||
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
b->size = pool_head_buffer->size - sizeof(struct buffer);
|
||||||
@ -756,7 +756,7 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
|
|||||||
/* slow path, uses malloc() */
|
/* slow path, uses malloc() */
|
||||||
b = __pool_refill_alloc(pool_head_buffer, margin);
|
b = __pool_refill_alloc(pool_head_buffer, margin);
|
||||||
|
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -47,6 +47,13 @@
|
|||||||
#define THREAD_LOCAL
|
#define THREAD_LOCAL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* On architectures supporting threads and double-word CAS, we can implement
|
||||||
|
* lock-less memory pools. This isn't supported for debugging modes however.
|
||||||
|
*/
|
||||||
|
#if !defined(DEBUG_NO_LOCKLESS_POOLS) && defined(USE_THREAD) && defined(HA_HAVE_CAS_DW) && !defined(DEBUG_UAF)
|
||||||
|
#define CONFIG_HAP_LOCKLESS_POOLS
|
||||||
|
#endif
|
||||||
|
|
||||||
/* CONFIG_HAP_INLINE_FD_SET
|
/* CONFIG_HAP_INLINE_FD_SET
|
||||||
* This makes use of inline FD_* macros instead of calling equivalent
|
* This makes use of inline FD_* macros instead of calling equivalent
|
||||||
* functions. Benchmarks on a Pentium-M show that using functions is
|
* functions. Benchmarks on a Pentium-M show that using functions is
|
||||||
|
@ -48,7 +48,7 @@
|
|||||||
#define POOL_LINK(pool, item) ((void **)(item))
|
#define POOL_LINK(pool, item) ((void **)(item))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef HA_HAVE_CAS_DW
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
struct pool_free_list {
|
struct pool_free_list {
|
||||||
void **free_list;
|
void **free_list;
|
||||||
uintptr_t seq;
|
uintptr_t seq;
|
||||||
@ -57,7 +57,7 @@ struct pool_free_list {
|
|||||||
|
|
||||||
struct pool_head {
|
struct pool_head {
|
||||||
void **free_list;
|
void **free_list;
|
||||||
#ifdef HA_HAVE_CAS_DW
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
uintptr_t seq;
|
uintptr_t seq;
|
||||||
#else
|
#else
|
||||||
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
||||||
@ -123,7 +123,7 @@ void pool_gc(struct pool_head *pool_ctx);
|
|||||||
*/
|
*/
|
||||||
void *pool_destroy(struct pool_head *pool);
|
void *pool_destroy(struct pool_head *pool);
|
||||||
|
|
||||||
#ifdef HA_HAVE_CAS_DW
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
/*
|
/*
|
||||||
* Returns a pointer to type <type> taken from the pool <pool_type> if
|
* Returns a pointer to type <type> taken from the pool <pool_type> if
|
||||||
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
|
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
|
||||||
@ -226,7 +226,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else /* CONFIG_HAP_LOCKLESS_POOLS */
|
||||||
/*
|
/*
|
||||||
* Returns a pointer to type <type> taken from the pool <pool_type> if
|
* Returns a pointer to type <type> taken from the pool <pool_type> if
|
||||||
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
|
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
|
||||||
@ -377,7 +377,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
|
|||||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif /* HA_HAVE_CAS_DW */
|
#endif /* CONFIG_HAP_LOCKLESS_POOLS */
|
||||||
#endif /* _COMMON_MEMORY_H */
|
#endif /* _COMMON_MEMORY_H */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
12
src/memory.c
12
src/memory.c
@ -93,13 +93,13 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
|||||||
LIST_ADDQ(start, &pool->list);
|
LIST_ADDQ(start, &pool->list);
|
||||||
}
|
}
|
||||||
pool->users++;
|
pool->users++;
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_INIT(&pool->lock);
|
HA_SPIN_INIT(&pool->lock);
|
||||||
#endif
|
#endif
|
||||||
return pool;
|
return pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HA_HAVE_CAS_DW
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
|
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
|
||||||
* available, then returns the last one for immediate use, so that at least
|
* available, then returns the last one for immediate use, so that at least
|
||||||
* <avail> are left available in the pool upon return. NULL is returned if the
|
* <avail> are left available in the pool upon return. NULL is returned if the
|
||||||
@ -221,7 +221,7 @@ void pool_gc(struct pool_head *pool_ctx)
|
|||||||
|
|
||||||
HA_ATOMIC_STORE(&recurse, 0);
|
HA_ATOMIC_STORE(&recurse, 0);
|
||||||
}
|
}
|
||||||
#else
|
#else /* CONFIG_HAP_LOCKLESS_POOLS */
|
||||||
|
|
||||||
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
|
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
|
||||||
* available, then returns the last one for immediate use, so that at least
|
* available, then returns the last one for immediate use, so that at least
|
||||||
@ -352,7 +352,7 @@ void *pool_destroy(struct pool_head *pool)
|
|||||||
pool->users--;
|
pool->users--;
|
||||||
if (!pool->users) {
|
if (!pool->users) {
|
||||||
LIST_DEL(&pool->list);
|
LIST_DEL(&pool->list);
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_DESTROY(&pool->lock);
|
HA_SPIN_DESTROY(&pool->lock);
|
||||||
#endif
|
#endif
|
||||||
free(pool);
|
free(pool);
|
||||||
@ -371,7 +371,7 @@ void dump_pools_to_trash()
|
|||||||
allocated = used = nbpools = 0;
|
allocated = used = nbpools = 0;
|
||||||
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
|
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
|
||||||
list_for_each_entry(entry, &pools, list) {
|
list_for_each_entry(entry, &pools, list) {
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
|
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
|
||||||
#endif
|
#endif
|
||||||
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
|
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
|
||||||
@ -382,7 +382,7 @@ void dump_pools_to_trash()
|
|||||||
allocated += entry->allocated * entry->size;
|
allocated += entry->allocated * entry->size;
|
||||||
used += entry->used * entry->size;
|
used += entry->used * entry->size;
|
||||||
nbpools++;
|
nbpools++;
|
||||||
#ifndef HA_HAVE_CAS_DW
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user