From bc8e14ec238bacbb48a2b4e610b42ff5a95e2ae2 Mon Sep 17 00:00:00 2001 From: Maxime Henrion Date: Tue, 9 Dec 2025 11:08:58 -0500 Subject: [PATCH] CLEANUP: use the automatic alignment feature - Use the automatic alignment feature instead of hardcoding 64 all over the code. - This also converts a few bare __attribute__((aligned(X))) to using the ALIGNED macro. --- dev/haring/haring.c | 6 +++--- include/haproxy/bug.h | 2 +- include/haproxy/connection-t.h | 2 +- include/haproxy/fd-t.h | 2 +- include/haproxy/htx-t.h | 2 +- include/haproxy/listener-t.h | 2 +- include/haproxy/pattern-t.h | 2 +- include/haproxy/pool-t.h | 6 +++--- include/haproxy/proxy-t.h | 4 ++-- include/haproxy/ring-t.h | 8 ++++---- include/haproxy/server-t.h | 10 +++++----- include/haproxy/stick_table-t.h | 6 +++--- include/haproxy/task.h | 2 +- include/haproxy/thread-t.h | 4 ++-- src/clock.c | 2 +- src/hlua.c | 2 +- src/vars.c | 2 +- 17 files changed, 32 insertions(+), 32 deletions(-) diff --git a/dev/haring/haring.c b/dev/haring/haring.c index 4dfdafab5..d9e43070c 100644 --- a/dev/haring/haring.c +++ b/dev/haring/haring.c @@ -59,9 +59,9 @@ struct ring_v2 { struct ring_v2a { size_t size; // storage size size_t rsvd; // header length (used for file-backed maps) - size_t tail __attribute__((aligned(64))); // storage tail - size_t head __attribute__((aligned(64))); // storage head - char area[0] __attribute__((aligned(64))); // storage area begins immediately here + size_t tail ALIGNED(64); // storage tail + size_t head ALIGNED(64); // storage head + char area[0] ALIGNED(64); // storage area begins immediately here }; /* display the message and exit with the code */ diff --git a/include/haproxy/bug.h b/include/haproxy/bug.h index f3ec08dfb..32db432fb 100644 --- a/include/haproxy/bug.h +++ b/include/haproxy/bug.h @@ -537,7 +537,7 @@ struct mem_stats { size_t size; struct ha_caller caller; const void *extra; // extra info specific to this call (e.g. pool ptr) -} __attribute__((aligned(sizeof(void*)))); +} ALIGNED(sizeof(void*)); #undef calloc #define calloc(x,y) ({ \ diff --git a/include/haproxy/connection-t.h b/include/haproxy/connection-t.h index 2435733c1..f364a6f12 100644 --- a/include/haproxy/connection-t.h +++ b/include/haproxy/connection-t.h @@ -796,7 +796,7 @@ struct idle_conns { struct mt_list toremove_conns; struct task *cleanup_task; __decl_thread(HA_SPINLOCK_T idle_conns_lock); -} THREAD_ALIGNED(64); +} THREAD_ALIGNED(); /* Termination events logs: diff --git a/include/haproxy/fd-t.h b/include/haproxy/fd-t.h index 38e5339f4..6abd763e6 100644 --- a/include/haproxy/fd-t.h +++ b/include/haproxy/fd-t.h @@ -202,7 +202,7 @@ struct fdtab { #ifdef DEBUG_FD unsigned int event_count; /* number of events reported */ #endif -} THREAD_ALIGNED(64); +} THREAD_ALIGNED(); /* polled mask, one bit per thread and per direction for each FD */ struct polled_mask { diff --git a/include/haproxy/htx-t.h b/include/haproxy/htx-t.h index eecd0abad..2a9faf1a4 100644 --- a/include/haproxy/htx-t.h +++ b/include/haproxy/htx-t.h @@ -270,7 +270,7 @@ struct htx { /* XXX 4 bytes unused */ /* Blocks representing the HTTP message itself */ - char blocks[VAR_ARRAY] __attribute__((aligned(8))); + char blocks[VAR_ARRAY] ALIGNED(8); }; #endif /* _HAPROXY_HTX_T_H */ diff --git a/include/haproxy/listener-t.h b/include/haproxy/listener-t.h index c8f1e214b..5f17b34b8 100644 --- a/include/haproxy/listener-t.h +++ b/include/haproxy/listener-t.h @@ -309,7 +309,7 @@ struct bind_kw_list { struct accept_queue_ring { uint32_t idx; /* (head << 16) | tail */ struct tasklet *tasklet; /* tasklet of the thread owning this ring */ - struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64))); + struct connection *entry[ACCEPT_QUEUE_SIZE] THREAD_ALIGNED(); }; diff --git a/include/haproxy/pattern-t.h b/include/haproxy/pattern-t.h index de1a308e1..a78c98443 100644 --- a/include/haproxy/pattern-t.h +++ b/include/haproxy/pattern-t.h @@ -116,7 +116,7 @@ struct pat_ref { int unique_id; /* Each pattern reference have unique id. */ unsigned long long revision; /* updated for each update */ unsigned long long entry_cnt; /* the total number of entries */ - THREAD_ALIGN(64); + THREAD_ALIGN(); __decl_thread(HA_RWLOCK_T lock); /* Lock used to protect pat ref elements */ event_hdl_sub_list e_subs; /* event_hdl: pat_ref's subscribers list (atomically updated) */ }; diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h index a9ef9e828..7f960f15e 100644 --- a/include/haproxy/pool-t.h +++ b/include/haproxy/pool-t.h @@ -63,7 +63,7 @@ struct pool_cache_head { unsigned int tid; /* thread id, for debugging only */ struct pool_head *pool; /* assigned pool, for debugging only */ ulong fill_pattern; /* pattern used to fill the area on free */ -} THREAD_ALIGNED(64); +} THREAD_ALIGNED(); /* This describes a pool registration, which is what was passed to * create_pool() and that might have been merged with an existing pool. @@ -139,7 +139,7 @@ struct pool_head { struct list regs; /* registrations: alt names for this pool */ /* heavily read-write part */ - THREAD_ALIGN(64); + THREAD_ALIGN(); /* these entries depend on the pointer value, they're used to reduce * the contention on fast-changing values. The alignment here is @@ -148,7 +148,7 @@ struct pool_head { * just meant to shard elements and there are no per-free_list stats. */ struct { - THREAD_ALIGN(64); + THREAD_ALIGN(); struct pool_item *free_list; /* list of free shared objects */ unsigned int allocated; /* how many chunks have been allocated */ unsigned int used; /* how many chunks are currently in use */ diff --git a/include/haproxy/proxy-t.h b/include/haproxy/proxy-t.h index f02946552..16d6b4b3f 100644 --- a/include/haproxy/proxy-t.h +++ b/include/haproxy/proxy-t.h @@ -304,7 +304,7 @@ struct error_snapshot { struct proxy_per_tgroup { struct queue queue; struct lbprm_per_tgrp lbprm; -} THREAD_ALIGNED(64); +} THREAD_ALIGNED(); struct proxy { enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */ @@ -505,7 +505,7 @@ struct proxy { EXTRA_COUNTERS(extra_counters_fe); EXTRA_COUNTERS(extra_counters_be); - THREAD_ALIGN(64); + THREAD_ALIGN(); unsigned int queueslength; /* Sum of the length of each queue */ int served; /* # of active sessions currently being served */ int totpend; /* total number of pending connections on this instance (for stats) */ diff --git a/include/haproxy/ring-t.h b/include/haproxy/ring-t.h index 7a9eeca4d..1d9d5b9dd 100644 --- a/include/haproxy/ring-t.h +++ b/include/haproxy/ring-t.h @@ -130,11 +130,11 @@ struct ring_wait_cell { struct ring_storage { size_t size; // storage size size_t rsvd; // header length (used for file-backed maps) - THREAD_ALIGN(64); + THREAD_ALIGN(); size_t tail; // storage tail - THREAD_ALIGN(64); + THREAD_ALIGN(); size_t head; // storage head - THREAD_ALIGN(64); + THREAD_ALIGN(); char area[0]; // storage area begins immediately here }; @@ -149,7 +149,7 @@ struct ring { /* keep the queue in a separate cache line below */ struct { - THREAD_ALIGN(64); + THREAD_ALIGN(); struct ring_wait_cell *ptr; } queue[RING_WAIT_QUEUES + 1]; // wait queue + 1 spacer }; diff --git a/include/haproxy/server-t.h b/include/haproxy/server-t.h index 7a2a95400..8164cea8e 100644 --- a/include/haproxy/server-t.h +++ b/include/haproxy/server-t.h @@ -294,7 +294,7 @@ struct srv_per_tgroup { struct eb_root *lb_tree; /* For LB algos with split between thread groups, the tree to be used, for each group */ unsigned npos, lpos; /* next and last positions in the LB tree, protected by LB lock */ unsigned rweight; /* remainder of weight in the current LB tree */ -} THREAD_ALIGNED(64); +} THREAD_ALIGNED(); /* Configure the protocol selection for websocket */ enum __attribute__((__packed__)) srv_ws_mode { @@ -396,7 +396,7 @@ struct server { /* The elements below may be changed on every single request by any * thread, and generally at the same time. */ - THREAD_ALIGN(64); + THREAD_ALIGN(); struct eb32_node idle_node; /* When to next do cleanup in the idle connections */ unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */ unsigned int curr_idle_nb; /* Current number of connections in the idle list */ @@ -414,7 +414,7 @@ struct server { /* Element below are usd by LB algorithms and must be doable in * parallel to other threads reusing connections above. */ - THREAD_ALIGN(64); + THREAD_ALIGN(); __decl_thread(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */ union { struct eb32_node lb_node; /* node used for tree-based load balancing */ @@ -428,7 +428,7 @@ struct server { }; /* usually atomically updated by any thread during parsing or on end of request */ - THREAD_ALIGN(64); + THREAD_ALIGN(); int cur_sess; /* number of currently active sessions (including syn_sent) */ int served; /* # of active sessions currently being served (ie not pending) */ int consecutive_errors; /* current number of consecutive errors */ @@ -436,7 +436,7 @@ struct server { struct be_counters counters; /* statistics counters */ /* Below are some relatively stable settings, only changed under the lock */ - THREAD_ALIGN(64); + THREAD_ALIGN(); struct eb_root *lb_tree; /* we want to know in what tree the server is */ struct tree_occ *lb_nodes; /* lb_nodes_tot * struct tree_occ */ diff --git a/include/haproxy/stick_table-t.h b/include/haproxy/stick_table-t.h index a8b906d10..f1784ba50 100644 --- a/include/haproxy/stick_table-t.h +++ b/include/haproxy/stick_table-t.h @@ -206,7 +206,7 @@ struct stktable { void *ptr; /* generic ptr to check if set or not */ } write_to; /* updates received on the source table will also update write_to */ - THREAD_ALIGN(64); + THREAD_ALIGN(); struct { struct eb_root keys; /* head of sticky session tree */ @@ -221,7 +221,7 @@ struct stktable { unsigned int refcnt; /* number of local peer over all peers sections attached to this table */ unsigned int current; /* number of sticky sessions currently in table */ - THREAD_ALIGN(64); + THREAD_ALIGN(); struct eb_root updates; /* head of sticky updates sequence tree, uses updt_lock */ struct mt_list *pend_updts; /* list of updates to be added to the update sequence tree, one per thread-group */ @@ -229,7 +229,7 @@ struct stktable { unsigned int localupdate; /* uses updt_lock */ struct tasklet *updt_task;/* tasklet responsible for pushing the pending updates into the tree */ - THREAD_ALIGN(64); + THREAD_ALIGN(); /* this lock is heavily used and must be on its own cache line */ __decl_thread(HA_RWLOCK_T updt_lock); /* lock protecting the updates part */ diff --git a/include/haproxy/task.h b/include/haproxy/task.h index e56af6e99..115f6e794 100644 --- a/include/haproxy/task.h +++ b/include/haproxy/task.h @@ -91,7 +91,7 @@ extern struct pool_head *pool_head_task; extern struct pool_head *pool_head_tasklet; extern struct pool_head *pool_head_notification; -__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED(64)); +__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED()); void __tasklet_wakeup_on(struct tasklet *tl, int thr); struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl); diff --git a/include/haproxy/thread-t.h b/include/haproxy/thread-t.h index 2c99a4cb9..41dc99f91 100644 --- a/include/haproxy/thread-t.h +++ b/include/haproxy/thread-t.h @@ -51,7 +51,7 @@ /* declare a self-initializing spinlock, aligned on a cache line */ #define __decl_aligned_spinlock(lock) \ - HA_SPINLOCK_T (lock) __attribute__((aligned(64))) = 0; + HA_SPINLOCK_T (lock) ALIGNED(64) = 0; /* declare a self-initializing rwlock */ #define __decl_rwlock(lock) \ @@ -59,7 +59,7 @@ /* declare a self-initializing rwlock, aligned on a cache line */ #define __decl_aligned_rwlock(lock) \ - HA_RWLOCK_T (lock) __attribute__((aligned(64))) = 0; + HA_RWLOCK_T (lock) ALIGNED(64) = 0; #else /* !USE_THREAD */ diff --git a/src/clock.c b/src/clock.c index 6dbbd0ff9..28268026c 100644 --- a/src/clock.c +++ b/src/clock.c @@ -35,7 +35,7 @@ volatile uint _global_now_ms; /* locally stored common mono volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */ /* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */ -THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */ +THREAD_ALIGNED() static llong now_offset; /* global offset between system time and global time in ns */ THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */ THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */ diff --git a/src/hlua.c b/src/hlua.c index ec2a8ee7b..5109d3c04 100644 --- a/src/hlua.c +++ b/src/hlua.c @@ -676,7 +676,7 @@ struct hlua_mem_allocator { size_t limit; }; -static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED(64); +static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED(); /* hlua event subscription */ struct hlua_event_sub { diff --git a/src/vars.c b/src/vars.c index 878ad1a5e..1755bf8ad 100644 --- a/src/vars.c +++ b/src/vars.c @@ -27,7 +27,7 @@ DECLARE_STATIC_TYPED_POOL(var_pool, "vars", struct var); /* list of variables for the process scope. */ -struct vars proc_vars THREAD_ALIGNED(64); +struct vars proc_vars THREAD_ALIGNED(); /* This array of int contains the system limits per context. */ static unsigned int var_global_limit = 0;