diff --git a/include/common/memory.h b/include/common/memory.h index 64a7387f6..cf6df9a88 100644 --- a/include/common/memory.h +++ b/include/common/memory.h @@ -22,15 +22,12 @@ #ifndef _COMMON_MEMORY_H #define _COMMON_MEMORY_H -#include - -#include #include -#include #include #include #include +#include #include #include @@ -239,7 +236,7 @@ static inline void __pool_free(struct pool_head *pool, void *ptr) _HA_ATOMIC_SUB(&pool->used, 1); if (unlikely(pool_is_crowded(pool))) { - free(ptr); + pool_free_area(ptr, pool->size + POOL_EXTRA); _HA_ATOMIC_SUB(&pool->allocated, 1); } else { do { @@ -342,82 +339,6 @@ static inline void *pool_alloc_dirty(struct pool_head *pool) return p; } -#ifndef DEBUG_UAF /* normal allocator */ - -/* allocates an area of size and returns it. The semantics are similar - * to those of malloc(). - */ -static inline void *pool_alloc_area(size_t size) -{ - return malloc(size); -} - -/* frees an area of size allocated by pool_alloc_area(). The - * semantics are identical to free() except that the size is specified and - * may be ignored. - */ -static inline void pool_free_area(void *area, size_t __maybe_unused size) -{ - free(area); -} - -#else /* use-after-free detector */ - -/* allocates an area of size and returns it. The semantics are similar - * to those of malloc(). However the allocation is rounded up to 4kB so that a - * full page is allocated. This ensures the object can be freed alone so that - * future dereferences are easily detected. The returned object is always - * 16-bytes aligned to avoid issues with unaligned structure objects. In case - * some padding is added, the area's start address is copied at the end of the - * padding to help detect underflows. - */ -#include -static inline void *pool_alloc_area(size_t size) -{ - size_t pad = (4096 - size) & 0xFF0; - int isolated; - void *ret; - - isolated = thread_isolated(); - if (!isolated) - thread_harmless_now(); - ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - if (ret != MAP_FAILED) { - /* let's dereference the page before returning so that the real - * allocation in the system is performed without holding the lock. - */ - *(int *)ret = 0; - if (pad >= sizeof(void *)) - *(void **)(ret + pad - sizeof(void *)) = ret + pad; - ret += pad; - } else { - ret = NULL; - } - if (!isolated) - thread_harmless_end(); - return ret; -} - -/* frees an area of size allocated by pool_alloc_area(). The - * semantics are identical to free() except that the size must absolutely match - * the one passed to pool_alloc_area(). In case some padding is added, the - * area's start address is compared to the one at the end of the padding, and - * a segfault is triggered if they don't match, indicating an underflow. - */ -static inline void pool_free_area(void *area, size_t size) -{ - size_t pad = (4096 - size) & 0xFF0; - - if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area) - *DISGUISE((volatile int *)0) = 0; - - thread_harmless_now(); - munmap(area - pad, (size + 4095) & -4096); - thread_harmless_end(); -} - -#endif /* DEBUG_UAF */ - /* * Returns a pointer to type taken from the pool or * dynamically allocated. In the first case, is updated to point to @@ -457,7 +378,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr) HA_SPIN_LOCK(POOL_LOCK, &pool->lock); pool->used--; if (pool_is_crowded(pool)) { - free(ptr); + pool_free_area(ptr, pool->size + POOL_EXTRA); pool->allocated--; } else { *POOL_LINK(pool, ptr) = (void *)pool->free_list; diff --git a/include/haproxy/pool-os.h b/include/haproxy/pool-os.h new file mode 100644 index 000000000..dc86f5318 --- /dev/null +++ b/include/haproxy/pool-os.h @@ -0,0 +1,118 @@ +/* + * include/haproxy/pool-os.h + * OS-level interface for memory management + * + * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation, version 2.1 + * exclusively. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _HAPROXY_POOL_OS_H +#define _HAPROXY_POOL_OS_H + +#include +#include +#include +#include +#include + + +#ifndef DEBUG_UAF + +/************* normal allocator *************/ + +/* allocates an area of size and returns it. The semantics are similar + * to those of malloc(). + */ +static inline void *pool_alloc_area(size_t size) +{ + return malloc(size); +} + +/* frees an area of size allocated by pool_alloc_area(). The + * semantics are identical to free() except that the size is specified and + * may be ignored. + */ +static inline void pool_free_area(void *area, size_t __maybe_unused size) +{ + free(area); +} + +#else + +/************* use-after-free allocator *************/ + +/* allocates an area of size and returns it. The semantics are similar + * to those of malloc(). However the allocation is rounded up to 4kB so that a + * full page is allocated. This ensures the object can be freed alone so that + * future dereferences are easily detected. The returned object is always + * 16-bytes aligned to avoid issues with unaligned structure objects. In case + * some padding is added, the area's start address is copied at the end of the + * padding to help detect underflows. + */ +static inline void *pool_alloc_area(size_t size) +{ + size_t pad = (4096 - size) & 0xFF0; + int isolated; + void *ret; + + isolated = thread_isolated(); + if (!isolated) + thread_harmless_now(); + ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (ret != MAP_FAILED) { + /* let's dereference the page before returning so that the real + * allocation in the system is performed without holding the lock. + */ + *(int *)ret = 0; + if (pad >= sizeof(void *)) + *(void **)(ret + pad - sizeof(void *)) = ret + pad; + ret += pad; + } else { + ret = NULL; + } + if (!isolated) + thread_harmless_end(); + return ret; +} + +/* frees an area of size allocated by pool_alloc_area(). The + * semantics are identical to free() except that the size must absolutely match + * the one passed to pool_alloc_area(). In case some padding is added, the + * area's start address is compared to the one at the end of the padding, and + * a segfault is triggered if they don't match, indicating an underflow. + */ +static inline void pool_free_area(void *area, size_t size) +{ + size_t pad = (4096 - size) & 0xFF0; + + if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area) + ABORT_NOW(); + + thread_harmless_now(); + munmap(area - pad, (size + 4095) & -4096); + thread_harmless_end(); +} + +#endif /* DEBUG_UAF */ + +#endif /* _HAPROXY_POOL_OS_H */ + +/* + * Local variables: + * c-indent-level: 8 + * c-basic-offset: 8 + * End: + */ diff --git a/src/memory.c b/src/memory.c index 453160562..106b04a0b 100644 --- a/src/memory.c +++ b/src/memory.c @@ -172,7 +172,7 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail) swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->allocated, POOL_AVG_SAMPLES/4); - ptr = malloc(size + POOL_EXTRA); + ptr = pool_alloc_area(size + POOL_EXTRA); if (!ptr) { _HA_ATOMIC_ADD(&pool->failed, 1); if (failed) { @@ -235,7 +235,7 @@ void pool_flush(struct pool_head *pool) temp = next; next = *POOL_LINK(pool, temp); removed++; - free(temp); + pool_free_area(temp, pool->size + POOL_EXTRA); } pool->free_list = next; _HA_ATOMIC_SUB(&pool->allocated, removed); @@ -269,7 +269,7 @@ void pool_gc(struct pool_head *pool_ctx) new.seq = cmp.seq + 1; if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0) continue; - free(cmp.free_list); + pool_free_area(cmp.free_list, entry->size + POOL_EXTRA); _HA_ATOMIC_SUB(&entry->allocated, 1); } }