diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h index a1a9f6530..217b7542b 100644 --- a/include/haproxy/pool-t.h +++ b/include/haproxy/pool-t.h @@ -44,6 +44,7 @@ #define POOL_DBG_FAIL_ALLOC 0x00000001 // randomly fail memory allocations #define POOL_DBG_DONT_MERGE 0x00000002 // do not merge same-size pools #define POOL_DBG_COLD_FIRST 0x00000004 // pick cold objects first +#define POOL_DBG_INTEGRITY 0x00000008 // perform integrity checks on cache /* This is the head of a thread-local cache */ @@ -52,9 +53,7 @@ struct pool_cache_head { unsigned int count; /* number of objects in this pool */ unsigned int tid; /* thread id, for debugging only */ struct pool_head *pool; /* assigned pool, for debugging only */ -#if defined(DEBUG_POOL_INTEGRITY) ulong fill_pattern; /* pattern used to fill the area on free */ -#endif } THREAD_ALIGNED(64); /* This represents one item stored in the thread-local cache. links diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index 1d224241b..296747ad1 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -143,6 +143,8 @@ extern THREAD_LOCAL size_t pool_cache_count; /* #cache objects */ void pool_evict_from_local_cache(struct pool_head *pool, int full); void pool_evict_from_local_caches(void); void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller); +void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size); +void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size); #if defined(CONFIG_HAP_NO_GLOBAL_POOLS) @@ -201,64 +203,6 @@ static inline uint pool_releasable(const struct pool_head *pool) * cache first, then from the second level if it exists. */ -#if defined(DEBUG_POOL_INTEGRITY) - -/* Updates 's fill_pattern and fills the free area after with it, - * up to bytes. The item part is left untouched. - */ -static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) -{ - ulong *ptr = (ulong *)item; - uint ofs; - ulong u; - - if (size <= sizeof(*item)) - return; - - /* Upgrade the fill_pattern to change about half of the bits - * (to be sure to catch static flag corruption), and apply it. - */ - u = pch->fill_pattern += ~0UL / 3; // 0x55...55 - ofs = sizeof(*item) / sizeof(*ptr); - while (ofs < size / sizeof(*ptr)) - ptr[ofs++] = u; -} - -/* check for a pool_cache_item integrity after extracting it from the cache. It - * must have been previously initialized using pool_fill_pattern(). If any - * corruption is detected, the function provokes an immediate crash. - */ -static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) -{ - const ulong *ptr = (const ulong *)item; - uint ofs; - ulong u; - - if (size <= sizeof(*item)) - return; - - /* let's check that all words past *item are equal */ - ofs = sizeof(*item) / sizeof(*ptr); - u = ptr[ofs++]; - while (ofs < size / sizeof(*ptr)) { - if (unlikely(ptr[ofs] != u)) - ABORT_NOW(); - ofs++; - } -} - -#else - -static inline void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) -{ -} - -static inline void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) -{ -} - -#endif - /* Tries to retrieve an object from the local pool cache corresponding to pool * . If none is available, tries to allocate from the shared cache, and * returns NULL if nothing is available. @@ -275,19 +219,21 @@ static inline void *pool_get_from_cache(struct pool_head *pool, const void *call return NULL; } - if (unlikely(pool_debugging & POOL_DBG_COLD_FIRST)) { + /* allocate hottest objects first */ + item = LIST_NEXT(&ph->list, typeof(item), by_pool); + + if (unlikely(pool_debugging & (POOL_DBG_COLD_FIRST|POOL_DBG_INTEGRITY))) { /* allocate oldest objects first so as to keep them as long as possible * in the cache before being reused and maximizing the chance to detect * an overwrite. */ - item = LIST_PREV(&ph->list, typeof(item), by_pool); - } else { - /* allocate hottest objects first */ - item = LIST_NEXT(&ph->list, typeof(item), by_pool); + if (pool_debugging & POOL_DBG_COLD_FIRST) + item = LIST_PREV(&ph->list, typeof(item), by_pool); + + if (pool_debugging & POOL_DBG_INTEGRITY) + pool_check_pattern(ph, item, pool->size); } -#if defined(DEBUG_POOL_INTEGRITY) - pool_check_pattern(ph, item, pool->size); -#endif + BUG_ON(&item->by_pool == &ph->list); LIST_DELETE(&item->by_pool); LIST_DELETE(&item->by_lru); diff --git a/src/pool.c b/src/pool.c index 87ab4b65d..4ba7ea799 100644 --- a/src/pool.c +++ b/src/pool.c @@ -46,6 +46,9 @@ uint pool_debugging __read_mostly = /* set of POOL_DBG_* flags */ #endif #ifdef DEBUG_POOL_INTEGRITY POOL_DBG_COLD_FIRST | +#endif +#ifdef DEBUG_POOL_INTEGRITY + POOL_DBG_INTEGRITY | #endif 0; @@ -332,6 +335,50 @@ void pool_free_nocache(struct pool_head *pool, void *ptr) #ifdef CONFIG_HAP_POOLS +/* Updates 's fill_pattern and fills the free area after with it, + * up to bytes. The item part is left untouched. + */ +void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) +{ + ulong *ptr = (ulong *)item; + uint ofs; + ulong u; + + if (size <= sizeof(*item)) + return; + + /* Upgrade the fill_pattern to change about half of the bits + * (to be sure to catch static flag corruption), and apply it. + */ + u = pch->fill_pattern += ~0UL / 3; // 0x55...55 + ofs = sizeof(*item) / sizeof(*ptr); + while (ofs < size / sizeof(*ptr)) + ptr[ofs++] = u; +} + +/* check for a pool_cache_item integrity after extracting it from the cache. It + * must have been previously initialized using pool_fill_pattern(). If any + * corruption is detected, the function provokes an immediate crash. + */ +void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size) +{ + const ulong *ptr = (const ulong *)item; + uint ofs; + ulong u; + + if (size <= sizeof(*item)) + return; + + /* let's check that all words past *item are equal */ + ofs = sizeof(*item) / sizeof(*ptr); + u = ptr[ofs++]; + while (ofs < size / sizeof(*ptr)) { + if (unlikely(ptr[ofs] != u)) + ABORT_NOW(); + ofs++; + } +} + /* removes up to items from the end of the local pool cache for * pool . The shared pool is refilled with these objects in the limit * of the number of acceptable objects, and the rest will be released to the @@ -351,7 +398,8 @@ static void pool_evict_last_items(struct pool_head *pool, struct pool_cache_head while (released < count && !LIST_ISEMPTY(&ph->list)) { item = LIST_PREV(&ph->list, typeof(item), by_pool); BUG_ON(&item->by_pool == &ph->list); - pool_check_pattern(ph, item, pool->size); + if (unlikely(pool_debugging & POOL_DBG_INTEGRITY)) + pool_check_pattern(ph, item, pool->size); LIST_DELETE(&item->by_pool); LIST_DELETE(&item->by_lru); @@ -440,7 +488,8 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller) LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru); POOL_DEBUG_TRACE_CALLER(pool, item, caller); ph->count++; - pool_fill_pattern(ph, item, pool->size); + if (unlikely(pool_debugging & POOL_DBG_INTEGRITY)) + pool_fill_pattern(ph, item, pool->size); pool_cache_count++; pool_cache_bytes += pool->size; @@ -510,7 +559,8 @@ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_hea LIST_INSERT(&pch->list, &item->by_pool); LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru); count++; - pool_fill_pattern(pch, item, pool->size); + if (unlikely(pool_debugging & POOL_DBG_INTEGRITY)) + pool_fill_pattern(pch, item, pool->size); } HA_ATOMIC_ADD(&pool->used, count); pch->count += count;