CLEANUP: pools: get rid of the POOL_LINK macro

The POOL_LINK macro is now only used for debugging, and it still requires
ifdefs around, which needlessly complicates the code. Let's replace it
and the calling code with a new pair of macros: POOL_DEBUG_SET_MARK()
and POOL_DEBUG_CHECK_MARK(), that respectively store and check the pool
pointer in the extra location at the end of the pool. This removes 4
pairs of ifdefs in the middle of the code.
This commit is contained in:
Willy Tarreau 2022-01-01 17:10:50 +01:00
parent 799f6143ca
commit 8c4927098e
3 changed files with 39 additions and 28 deletions

View File

@ -28,20 +28,6 @@
#define MEM_F_SHARED 0x1 #define MEM_F_SHARED 0x1
#define MEM_F_EXACT 0x2 #define MEM_F_EXACT 0x2
/* By default, free objects are linked by a pointer stored at the beginning of
* the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
* inflated by the size of a pointer so that the link is placed at the end
* of the objects. Hence free objects in pools remain intact. In addition,
* this location is used to keep a pointer to the pool the object was
* allocated from, and verify it's freed into the appropriate one.
*/
#ifdef DEBUG_MEMORY_POOLS
#define POOL_EXTRA (sizeof(void *))
#define POOL_LINK(pool, item) (void **)(((char *)(item)) + ((pool)->size))
#else
#define POOL_EXTRA (0)
#endif
/* A special pointer for the pool's free_list that indicates someone is /* A special pointer for the pool's free_list that indicates someone is
* currently manipulating it. Serves as a short-lived lock. * currently manipulating it. Serves as a short-lived lock.
*/ */

View File

@ -45,6 +45,39 @@
static struct pool_head *(ptr) __read_mostly; \ static struct pool_head *(ptr) __read_mostly; \
REGISTER_POOL(&ptr, name, size) REGISTER_POOL(&ptr, name, size)
/* By default, free objects are linked by a pointer stored at the beginning of
* the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
* inflated by the size of a pointer so that the link is placed at the end
* of the objects. Hence free objects in pools remain intact. In addition,
* this location is used to keep a pointer to the pool the object was
* allocated from, and verify it's freed into the appropriate one.
*/
#ifdef DEBUG_MEMORY_POOLS
# define POOL_EXTRA (sizeof(void *))
# define POOL_DEBUG_SET_MARK(pool, item) \
do { \
typeof(pool) __p = (pool); \
typeof(item) __i = (item); \
*(typeof(pool)*)(((char *)__i) + __p->size) = __p; \
} while (0)
# define POOL_DEBUG_CHECK_MARK(pool, item) \
do { \
typeof(pool) __p = (pool); \
typeof(item) __i = (item); \
if (*(typeof(pool)*)(((char *)__i) + __p->size) != __p) \
ABORT_NOW(); \
} while (0)
#else // DEBUG_MEMORY_POOLS
# define POOL_EXTRA (0)
# define POOL_DEBUG_SET_MARK(pool, item) do { } while (0)
# define POOL_DEBUG_CHECK_MARK(pool, item) do { } while (0)
#endif // DEBUG_MEMORY_POOLS
/* poison each newly allocated area with this byte if >= 0 */ /* poison each newly allocated area with this byte if >= 0 */
extern int mem_poison_byte; extern int mem_poison_byte;
@ -135,11 +168,8 @@ static inline void *pool_get_from_shared_cache(struct pool_head *pool)
_HA_ATOMIC_STORE(&pool->free_list, *(void **)ret); _HA_ATOMIC_STORE(&pool->free_list, *(void **)ret);
_HA_ATOMIC_INC(&pool->used); _HA_ATOMIC_INC(&pool->used);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */ /* keep track of where the element was allocated from */
*POOL_LINK(pool, ret) = (void *)pool; POOL_DEBUG_SET_MARK(pool, ret);
#endif
out: out:
__ha_barrier_atomic_store(); __ha_barrier_atomic_store();
return ret; return ret;
@ -197,10 +227,9 @@ static inline void *pool_get_from_cache(struct pool_head *pool)
pool_cache_count--; pool_cache_count--;
LIST_DELETE(&item->by_pool); LIST_DELETE(&item->by_pool);
LIST_DELETE(&item->by_lru); LIST_DELETE(&item->by_lru);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */ /* keep track of where the element was allocated from */
*POOL_LINK(pool, item) = (void *)pool; POOL_DEBUG_SET_MARK(pool, item);
#endif
return item; return item;
} }
@ -282,11 +311,9 @@ static inline void *pool_zalloc(struct pool_head *pool)
static inline void pool_free(struct pool_head *pool, void *ptr) static inline void pool_free(struct pool_head *pool, void *ptr)
{ {
if (likely(ptr != NULL)) { if (likely(ptr != NULL)) {
#ifdef DEBUG_MEMORY_POOLS
/* we'll get late corruption if we refill to the wrong pool or double-free */ /* we'll get late corruption if we refill to the wrong pool or double-free */
if (*POOL_LINK(pool, ptr) != (void *)pool) POOL_DEBUG_CHECK_MARK(pool, ptr);
ABORT_NOW();
#endif
if (unlikely(mem_poison_byte >= 0)) if (unlikely(mem_poison_byte >= 0))
memset(ptr, mem_poison_byte, pool->size); memset(ptr, mem_poison_byte, pool->size);

View File

@ -282,10 +282,8 @@ void *pool_alloc_nocache(struct pool_head *pool)
swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4); swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
_HA_ATOMIC_INC(&pool->used); _HA_ATOMIC_INC(&pool->used);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */ /* keep track of where the element was allocated from */
*POOL_LINK(pool, ptr) = (void *)pool; POOL_DEBUG_SET_MARK(pool, ptr);
#endif
return ptr; return ptr;
} }