diff --git a/include/haproxy/pool-os.h b/include/haproxy/pool-os.h index dc86f5318..ef3c9353c 100644 --- a/include/haproxy/pool-os.h +++ b/include/haproxy/pool-os.h @@ -22,11 +22,8 @@ #ifndef _HAPROXY_POOL_OS_H #define _HAPROXY_POOL_OS_H -#include #include #include -#include -#include #ifndef DEBUG_UAF @@ -36,7 +33,7 @@ /* allocates an area of size and returns it. The semantics are similar * to those of malloc(). */ -static inline void *pool_alloc_area(size_t size) +static forceinline void *pool_alloc_area(size_t size) { return malloc(size); } @@ -45,7 +42,7 @@ static inline void *pool_alloc_area(size_t size) * semantics are identical to free() except that the size is specified and * may be ignored. */ -static inline void pool_free_area(void *area, size_t __maybe_unused size) +static forceinline void pool_free_area(void *area, size_t __maybe_unused size) { free(area); } @@ -54,56 +51,25 @@ static inline void pool_free_area(void *area, size_t __maybe_unused size) /************* use-after-free allocator *************/ -/* allocates an area of size and returns it. The semantics are similar - * to those of malloc(). However the allocation is rounded up to 4kB so that a - * full page is allocated. This ensures the object can be freed alone so that - * future dereferences are easily detected. The returned object is always - * 16-bytes aligned to avoid issues with unaligned structure objects. In case - * some padding is added, the area's start address is copied at the end of the - * padding to help detect underflows. - */ -static inline void *pool_alloc_area(size_t size) -{ - size_t pad = (4096 - size) & 0xFF0; - int isolated; - void *ret; +void *pool_alloc_area_uaf(size_t size); +void pool_free_area_uaf(void *area, size_t size); - isolated = thread_isolated(); - if (!isolated) - thread_harmless_now(); - ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ret != MAP_FAILED) { - /* let's dereference the page before returning so that the real - * allocation in the system is performed without holding the lock. - */ - *(int *)ret = 0; - if (pad >= sizeof(void *)) - *(void **)(ret + pad - sizeof(void *)) = ret + pad; - ret += pad; - } else { - ret = NULL; - } - if (!isolated) - thread_harmless_end(); - return ret; + +/* allocates an area of size and returns it. The semantics are similar + * to those of malloc(). + */ +static forceinline void *pool_alloc_area(size_t size) +{ + return pool_alloc_area_uaf(size); } /* frees an area of size allocated by pool_alloc_area(). The - * semantics are identical to free() except that the size must absolutely match - * the one passed to pool_alloc_area(). In case some padding is added, the - * area's start address is compared to the one at the end of the padding, and - * a segfault is triggered if they don't match, indicating an underflow. + * semantics are identical to free() except that the size is specified and + * may be ignored. */ -static inline void pool_free_area(void *area, size_t size) +static forceinline void pool_free_area(void *area, size_t size) { - size_t pad = (4096 - size) & 0xFF0; - - if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area) - ABORT_NOW(); - - thread_harmless_now(); - munmap(area - pad, (size + 4095) & -4096); - thread_harmless_end(); + pool_free_area_uaf(area, size); } #endif /* DEBUG_UAF */ diff --git a/src/pool.c b/src/pool.c index e6669fbae..4bfe01481 100644 --- a/src/pool.c +++ b/src/pool.c @@ -9,6 +9,8 @@ * 2 of the License, or (at your option) any later version. * */ + +#include #include #include @@ -420,6 +422,65 @@ void pool_gc(struct pool_head *pool_ctx) #endif /* CONFIG_HAP_POOLS */ + +#ifdef DEBUG_UAF + +/************* use-after-free allocator *************/ + +/* allocates an area of size and returns it. The semantics are similar + * to those of malloc(). However the allocation is rounded up to 4kB so that a + * full page is allocated. This ensures the object can be freed alone so that + * future dereferences are easily detected. The returned object is always + * 16-bytes aligned to avoid issues with unaligned structure objects. In case + * some padding is added, the area's start address is copied at the end of the + * padding to help detect underflows. + */ +void *pool_alloc_area_uaf(size_t size) +{ + size_t pad = (4096 - size) & 0xFF0; + int isolated; + void *ret; + + isolated = thread_isolated(); + if (!isolated) + thread_harmless_now(); + ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ret != MAP_FAILED) { + /* let's dereference the page before returning so that the real + * allocation in the system is performed without holding the lock. + */ + *(int *)ret = 0; + if (pad >= sizeof(void *)) + *(void **)(ret + pad - sizeof(void *)) = ret + pad; + ret += pad; + } else { + ret = NULL; + } + if (!isolated) + thread_harmless_end(); + return ret; +} + +/* frees an area of size allocated by pool_alloc_area(). The + * semantics are identical to free() except that the size must absolutely match + * the one passed to pool_alloc_area(). In case some padding is added, the + * area's start address is compared to the one at the end of the padding, and + * a segfault is triggered if they don't match, indicating an underflow. + */ +void pool_free_area_uaf(void *area, size_t size) +{ + size_t pad = (4096 - size) & 0xFF0; + + if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area) + ABORT_NOW(); + + thread_harmless_now(); + munmap(area - pad, (size + 4095) & -4096); + thread_harmless_end(); +} + +#endif /* DEBUG_UAF */ + /* * This function destroys a pool by freeing it completely, unless it's still * in use. This should be called only under extreme circumstances. It always