mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2026-01-17 14:50:59 +01:00
MEDIUM: pools: respect pool alignment in allocations
Now pool_alloc_area() takes the alignment in argument and makes use of ha_aligned_malloc() instead of malloc(). pool_alloc_area_uaf() simply applies the alignment before returning the mapped area. The pool_free() functionn calls ha_aligned_free() so as to permit to use a specific API for aligned alloc/free like mingw requires. Note that it's possible to see warnings about mismatching sized during pool_free() since we know both the pool and the type. In pool_free, adding just this is sufficient to detect potential offenders: WARN_ON(__alignof__(*__ptr) > pool->align);
This commit is contained in:
parent
f0d0922aa1
commit
ef915e672a
@ -25,6 +25,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <stdlib.h>
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/tools.h>
|
||||
|
||||
|
||||
/************* normal allocator *************/
|
||||
@ -32,9 +33,9 @@
|
||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||
* to those of malloc().
|
||||
*/
|
||||
static forceinline void *pool_alloc_area(size_t size)
|
||||
static forceinline void *pool_alloc_area(size_t size, size_t align)
|
||||
{
|
||||
return malloc(size);
|
||||
return ha_aligned_alloc(align, size);
|
||||
}
|
||||
|
||||
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
|
||||
@ -43,8 +44,7 @@ static forceinline void *pool_alloc_area(size_t size)
|
||||
*/
|
||||
static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
||||
{
|
||||
will_free(area, size);
|
||||
free(area);
|
||||
ha_aligned_free_size(area, size);
|
||||
}
|
||||
|
||||
/************* use-after-free allocator *************/
|
||||
@ -52,14 +52,15 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||
* to those of malloc(). However the allocation is rounded up to 4kB so that a
|
||||
* full page is allocated. This ensures the object can be freed alone so that
|
||||
* future dereferences are easily detected. The returned object is always
|
||||
* 16-bytes aligned to avoid issues with unaligned structure objects. In case
|
||||
* some padding is added, the area's start address is copied at the end of the
|
||||
* padding to help detect underflows.
|
||||
* future dereferences are easily detected. The returned object is always at
|
||||
* least 16-bytes aligned to avoid issues with unaligned structure objects, and
|
||||
* in any case, is always at least aligned as required by the pool, though no
|
||||
* more than 4096. In case some padding is added, the area's start address is
|
||||
* copied at the end of the padding to help detect underflows.
|
||||
*/
|
||||
static inline void *pool_alloc_area_uaf(size_t size)
|
||||
static inline void *pool_alloc_area_uaf(size_t size, size_t align)
|
||||
{
|
||||
size_t pad = (4096 - size) & 0xFF0;
|
||||
size_t pad = (4096 - size) & 0xFF0 & -align;
|
||||
void *ret;
|
||||
|
||||
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
|
||||
@ -490,9 +490,9 @@ void *pool_get_from_os_noinc(struct pool_head *pool)
|
||||
void *ptr;
|
||||
|
||||
if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF))
|
||||
ptr = pool_alloc_area_uaf(pool->alloc_sz);
|
||||
ptr = pool_alloc_area_uaf(pool->alloc_sz, pool->align);
|
||||
else
|
||||
ptr = pool_alloc_area(pool->alloc_sz);
|
||||
ptr = pool_alloc_area(pool->alloc_sz, pool->align);
|
||||
if (ptr)
|
||||
return ptr;
|
||||
_HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user