mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-23 06:41:32 +02:00
MINOR: pools: replace DEBUG_POOL_TRACING with runtime POOL_DBG_CALLER
This option used to allow to store a pointer to the caller of the last pool_alloc() or pool_free() at the end of the area. Now that we can compute the offsets at runtime, let's check it at run time and continue the code simplification. In __pool_alloc() we now always calculate the return address (which is quite cheap), and the POOL_DEBUG_TRACE_CALLER() calls are conditionned on the status of debugging option.
This commit is contained in:
parent
42705d06b7
commit
0271822f17
@ -47,6 +47,7 @@
|
|||||||
#define POOL_DBG_INTEGRITY 0x00000008 // perform integrity checks on cache
|
#define POOL_DBG_INTEGRITY 0x00000008 // perform integrity checks on cache
|
||||||
#define POOL_DBG_NO_GLOBAL 0x00000010 // disable global pools
|
#define POOL_DBG_NO_GLOBAL 0x00000010 // disable global pools
|
||||||
#define POOL_DBG_NO_CACHE 0x00000020 // disable thread-local pool caches
|
#define POOL_DBG_NO_CACHE 0x00000020 // disable thread-local pool caches
|
||||||
|
#define POOL_DBG_CALLER 0x00000040 // trace last caller's location
|
||||||
|
|
||||||
|
|
||||||
/* This is the head of a thread-local cache */
|
/* This is the head of a thread-local cache */
|
||||||
|
@ -90,23 +90,17 @@
|
|||||||
* after the end of the area and the optional mark above, which means the
|
* after the end of the area and the optional mark above, which means the
|
||||||
* end of the allocated array.
|
* end of the allocated array.
|
||||||
*/
|
*/
|
||||||
#if defined(DEBUG_POOL_TRACING)
|
|
||||||
# define POOL_EXTRA_CALLER (sizeof(void *))
|
# define POOL_EXTRA_CALLER (sizeof(void *))
|
||||||
# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) \
|
# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) \
|
||||||
do { \
|
do { \
|
||||||
typeof(pool) __p = (pool); \
|
typeof(pool) __p = (pool); \
|
||||||
typeof(item) __i = (item); \
|
typeof(item) __i = (item); \
|
||||||
typeof(caller) __c = (caller); \
|
typeof(caller) __c = (caller); \
|
||||||
|
if (likely(!(pool_debugging & POOL_DBG_CALLER))) \
|
||||||
|
break; \
|
||||||
*(typeof(caller)*)(((char *)__i) + __p->alloc_sz - sizeof(void*)) = __c; \
|
*(typeof(caller)*)(((char *)__i) + __p->alloc_sz - sizeof(void*)) = __c; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#else // DEBUG_POOL_TRACING
|
|
||||||
|
|
||||||
# define POOL_EXTRA_CALLER (0)
|
|
||||||
# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) do { } while (0)
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* poison each newly allocated area with this byte if >= 0 */
|
/* poison each newly allocated area with this byte if >= 0 */
|
||||||
extern int mem_poison_byte;
|
extern int mem_poison_byte;
|
||||||
|
|
||||||
|
15
src/pool.c
15
src/pool.c
@ -53,6 +53,9 @@ uint pool_debugging __read_mostly = /* set of POOL_DBG_* flags */
|
|||||||
#endif
|
#endif
|
||||||
#ifndef CONFIG_HAP_POOLS
|
#ifndef CONFIG_HAP_POOLS
|
||||||
POOL_DBG_NO_CACHE |
|
POOL_DBG_NO_CACHE |
|
||||||
|
#endif
|
||||||
|
#if defined(DEBUG_POOL_TRACING)
|
||||||
|
POOL_DBG_CALLER |
|
||||||
#endif
|
#endif
|
||||||
0;
|
0;
|
||||||
|
|
||||||
@ -204,7 +207,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
extra_mark = POOL_EXTRA_MARK;
|
extra_mark = POOL_EXTRA_MARK;
|
||||||
extra_caller = POOL_EXTRA_CALLER;
|
extra_caller = (pool_debugging & POOL_DBG_CALLER) ? POOL_EXTRA_CALLER : 0;
|
||||||
extra = extra_mark + extra_caller;
|
extra = extra_mark + extra_caller;
|
||||||
|
|
||||||
if (!(flags & MEM_F_EXACT)) {
|
if (!(flags & MEM_F_EXACT)) {
|
||||||
@ -679,15 +682,12 @@ void pool_gc(struct pool_head *pool_ctx)
|
|||||||
void *__pool_alloc(struct pool_head *pool, unsigned int flags)
|
void *__pool_alloc(struct pool_head *pool, unsigned int flags)
|
||||||
{
|
{
|
||||||
void *p = NULL;
|
void *p = NULL;
|
||||||
void *caller = NULL;
|
void *caller = __builtin_return_address(0);
|
||||||
|
|
||||||
if (unlikely(pool_debugging & POOL_DBG_FAIL_ALLOC))
|
if (unlikely(pool_debugging & POOL_DBG_FAIL_ALLOC))
|
||||||
if (!(flags & POOL_F_NO_FAIL) && mem_should_fail(pool))
|
if (!(flags & POOL_F_NO_FAIL) && mem_should_fail(pool))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
#if defined(DEBUG_POOL_TRACING)
|
|
||||||
caller = __builtin_return_address(0);
|
|
||||||
#endif
|
|
||||||
if (likely(!(pool_debugging & POOL_DBG_NO_CACHE)) && !p)
|
if (likely(!(pool_debugging & POOL_DBG_NO_CACHE)) && !p)
|
||||||
p = pool_get_from_cache(pool, caller);
|
p = pool_get_from_cache(pool, caller);
|
||||||
|
|
||||||
@ -709,11 +709,8 @@ void *__pool_alloc(struct pool_head *pool, unsigned int flags)
|
|||||||
*/
|
*/
|
||||||
void __pool_free(struct pool_head *pool, void *ptr)
|
void __pool_free(struct pool_head *pool, void *ptr)
|
||||||
{
|
{
|
||||||
const void *caller = NULL;
|
const void *caller = __builtin_return_address(0);
|
||||||
|
|
||||||
#if defined(DEBUG_POOL_TRACING)
|
|
||||||
caller = __builtin_return_address(0);
|
|
||||||
#endif
|
|
||||||
/* we'll get late corruption if we refill to the wrong pool or double-free */
|
/* we'll get late corruption if we refill to the wrong pool or double-free */
|
||||||
POOL_DEBUG_CHECK_MARK(pool, ptr);
|
POOL_DEBUG_CHECK_MARK(pool, ptr);
|
||||||
POOL_DEBUG_RESET_MARK(pool, ptr);
|
POOL_DEBUG_RESET_MARK(pool, ptr);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user