mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-14 11:06:56 +02:00
MINOR: tools: implement ha_aligned_zalloc()
This one is exactly ha_aligned_alloc() followed by a memset(0), as it will be convenient for a number of call places as a replacement for calloc(). Note that ideally we should also have a calloc version that performs basic multiply overflow checks, but these are essentially used with numbers of threads times small structs so that's fine, and we already do the same everywhere in malloc() calls.
This commit is contained in:
parent
55d561042c
commit
746e77d000
@ -639,6 +639,24 @@ struct mem_stats {
|
||||
_ha_aligned_alloc(__a, __s); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_zalloc
|
||||
#define ha_aligned_zalloc(a,s) ({ \
|
||||
size_t __a = (a); \
|
||||
size_t __s = (s); \
|
||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
||||
.caller = { \
|
||||
.file = __FILE__, .line = __LINE__, \
|
||||
.what = MEM_STATS_TYPE_MALLOC, \
|
||||
.func = __func__, \
|
||||
}, \
|
||||
}; \
|
||||
HA_WEAK(__start_mem_stats); \
|
||||
HA_WEAK(__stop_mem_stats); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
||||
_ha_aligned_zalloc(__a, __s); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_alloc_safe
|
||||
#define ha_aligned_alloc_safe(a,s) ({ \
|
||||
size_t __a = (a); \
|
||||
@ -657,6 +675,24 @@ struct mem_stats {
|
||||
_ha_aligned_alloc_safe(__a, __s); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_zalloc_safe
|
||||
#define ha_aligned_zalloc_safe(a,s) ({ \
|
||||
size_t __a = (a); \
|
||||
size_t __s = (s); \
|
||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
||||
.caller = { \
|
||||
.file = __FILE__, .line = __LINE__, \
|
||||
.what = MEM_STATS_TYPE_MALLOC, \
|
||||
.func = __func__, \
|
||||
}, \
|
||||
}; \
|
||||
HA_WEAK(__start_mem_stats); \
|
||||
HA_WEAK(__stop_mem_stats); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
||||
_ha_aligned_zalloc_safe(__a, __s); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_free
|
||||
#define ha_aligned_free(x) ({ \
|
||||
typeof(x) __x = (x); \
|
||||
@ -703,7 +739,9 @@ struct mem_stats {
|
||||
|
||||
#define will_free(x, y) do { } while (0)
|
||||
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
|
||||
#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
|
||||
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
|
||||
#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
|
||||
#define ha_aligned_free(p) _ha_aligned_free(p)
|
||||
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
|
||||
|
||||
|
@ -1212,6 +1212,16 @@ static inline void *_ha_aligned_alloc(size_t alignment, size_t size)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Like above but zeroing the area */
|
||||
static inline void *_ha_aligned_zalloc(size_t alignment, size_t size)
|
||||
{
|
||||
void *ret = _ha_aligned_alloc(alignment, size);
|
||||
|
||||
if (ret)
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* portable memalign(): tries to accommodate OS specificities, and may fall
|
||||
* back to plain malloc() if not supported, meaning that alignment guarantees
|
||||
* are only a performance bonus but not granted. The size will automatically be
|
||||
@ -1239,6 +1249,16 @@ static inline void *_ha_aligned_alloc_safe(size_t alignment, size_t size)
|
||||
return _ha_aligned_alloc(alignment, size);
|
||||
}
|
||||
|
||||
/* Like above but zeroing the area */
|
||||
static inline void *_ha_aligned_zalloc_safe(size_t alignment, size_t size)
|
||||
{
|
||||
void *ret = _ha_aligned_alloc_safe(alignment, size);
|
||||
|
||||
if (ret)
|
||||
memset(ret, 0, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* To be used to free a pointer returned by _ha_aligned_alloc() or
|
||||
* _ha_aligned_alloc_safe(). Please use ha_aligned_free() instead
|
||||
* (which does perform accounting).
|
||||
|
Loading…
Reference in New Issue
Block a user