mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-12-13 05:30:59 +01:00
CLEANUP: use the automatic alignment feature
- Use the automatic alignment feature instead of hardcoding 64 all over the code. - This also converts a few bare __attribute__((aligned(X))) to using the ALIGNED macro.
This commit is contained in:
parent
74719dc457
commit
bc8e14ec23
@ -59,9 +59,9 @@ struct ring_v2 {
|
||||
struct ring_v2a {
|
||||
size_t size; // storage size
|
||||
size_t rsvd; // header length (used for file-backed maps)
|
||||
size_t tail __attribute__((aligned(64))); // storage tail
|
||||
size_t head __attribute__((aligned(64))); // storage head
|
||||
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
|
||||
size_t tail ALIGNED(64); // storage tail
|
||||
size_t head ALIGNED(64); // storage head
|
||||
char area[0] ALIGNED(64); // storage area begins immediately here
|
||||
};
|
||||
|
||||
/* display the message and exit with the code */
|
||||
|
||||
@ -537,7 +537,7 @@ struct mem_stats {
|
||||
size_t size;
|
||||
struct ha_caller caller;
|
||||
const void *extra; // extra info specific to this call (e.g. pool ptr)
|
||||
} __attribute__((aligned(sizeof(void*))));
|
||||
} ALIGNED(sizeof(void*));
|
||||
|
||||
#undef calloc
|
||||
#define calloc(x,y) ({ \
|
||||
|
||||
@ -796,7 +796,7 @@ struct idle_conns {
|
||||
struct mt_list toremove_conns;
|
||||
struct task *cleanup_task;
|
||||
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
|
||||
} THREAD_ALIGNED(64);
|
||||
} THREAD_ALIGNED();
|
||||
|
||||
|
||||
/* Termination events logs:
|
||||
|
||||
@ -202,7 +202,7 @@ struct fdtab {
|
||||
#ifdef DEBUG_FD
|
||||
unsigned int event_count; /* number of events reported */
|
||||
#endif
|
||||
} THREAD_ALIGNED(64);
|
||||
} THREAD_ALIGNED();
|
||||
|
||||
/* polled mask, one bit per thread and per direction for each FD */
|
||||
struct polled_mask {
|
||||
|
||||
@ -270,7 +270,7 @@ struct htx {
|
||||
/* XXX 4 bytes unused */
|
||||
|
||||
/* Blocks representing the HTTP message itself */
|
||||
char blocks[VAR_ARRAY] __attribute__((aligned(8)));
|
||||
char blocks[VAR_ARRAY] ALIGNED(8);
|
||||
};
|
||||
|
||||
#endif /* _HAPROXY_HTX_T_H */
|
||||
|
||||
@ -309,7 +309,7 @@ struct bind_kw_list {
|
||||
struct accept_queue_ring {
|
||||
uint32_t idx; /* (head << 16) | tail */
|
||||
struct tasklet *tasklet; /* tasklet of the thread owning this ring */
|
||||
struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64)));
|
||||
struct connection *entry[ACCEPT_QUEUE_SIZE] THREAD_ALIGNED();
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -116,7 +116,7 @@ struct pat_ref {
|
||||
int unique_id; /* Each pattern reference have unique id. */
|
||||
unsigned long long revision; /* updated for each update */
|
||||
unsigned long long entry_cnt; /* the total number of entries */
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
__decl_thread(HA_RWLOCK_T lock); /* Lock used to protect pat ref elements */
|
||||
event_hdl_sub_list e_subs; /* event_hdl: pat_ref's subscribers list (atomically updated) */
|
||||
};
|
||||
|
||||
@ -63,7 +63,7 @@ struct pool_cache_head {
|
||||
unsigned int tid; /* thread id, for debugging only */
|
||||
struct pool_head *pool; /* assigned pool, for debugging only */
|
||||
ulong fill_pattern; /* pattern used to fill the area on free */
|
||||
} THREAD_ALIGNED(64);
|
||||
} THREAD_ALIGNED();
|
||||
|
||||
/* This describes a pool registration, which is what was passed to
|
||||
* create_pool() and that might have been merged with an existing pool.
|
||||
@ -139,7 +139,7 @@ struct pool_head {
|
||||
struct list regs; /* registrations: alt names for this pool */
|
||||
|
||||
/* heavily read-write part */
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
|
||||
/* these entries depend on the pointer value, they're used to reduce
|
||||
* the contention on fast-changing values. The alignment here is
|
||||
@ -148,7 +148,7 @@ struct pool_head {
|
||||
* just meant to shard elements and there are no per-free_list stats.
|
||||
*/
|
||||
struct {
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
struct pool_item *free_list; /* list of free shared objects */
|
||||
unsigned int allocated; /* how many chunks have been allocated */
|
||||
unsigned int used; /* how many chunks are currently in use */
|
||||
|
||||
@ -304,7 +304,7 @@ struct error_snapshot {
|
||||
struct proxy_per_tgroup {
|
||||
struct queue queue;
|
||||
struct lbprm_per_tgrp lbprm;
|
||||
} THREAD_ALIGNED(64);
|
||||
} THREAD_ALIGNED();
|
||||
|
||||
struct proxy {
|
||||
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
|
||||
@ -505,7 +505,7 @@ struct proxy {
|
||||
EXTRA_COUNTERS(extra_counters_fe);
|
||||
EXTRA_COUNTERS(extra_counters_be);
|
||||
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
unsigned int queueslength; /* Sum of the length of each queue */
|
||||
int served; /* # of active sessions currently being served */
|
||||
int totpend; /* total number of pending connections on this instance (for stats) */
|
||||
|
||||
@ -130,11 +130,11 @@ struct ring_wait_cell {
|
||||
struct ring_storage {
|
||||
size_t size; // storage size
|
||||
size_t rsvd; // header length (used for file-backed maps)
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
size_t tail; // storage tail
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
size_t head; // storage head
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
char area[0]; // storage area begins immediately here
|
||||
};
|
||||
|
||||
@ -149,7 +149,7 @@ struct ring {
|
||||
|
||||
/* keep the queue in a separate cache line below */
|
||||
struct {
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
struct ring_wait_cell *ptr;
|
||||
} queue[RING_WAIT_QUEUES + 1]; // wait queue + 1 spacer
|
||||
};
|
||||
|
||||
@ -294,7 +294,7 @@ struct srv_per_tgroup {
|
||||
struct eb_root *lb_tree; /* For LB algos with split between thread groups, the tree to be used, for each group */
|
||||
unsigned npos, lpos; /* next and last positions in the LB tree, protected by LB lock */
|
||||
unsigned rweight; /* remainder of weight in the current LB tree */
|
||||
} THREAD_ALIGNED(64);
|
||||
} THREAD_ALIGNED();
|
||||
|
||||
/* Configure the protocol selection for websocket */
|
||||
enum __attribute__((__packed__)) srv_ws_mode {
|
||||
@ -396,7 +396,7 @@ struct server {
|
||||
/* The elements below may be changed on every single request by any
|
||||
* thread, and generally at the same time.
|
||||
*/
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
|
||||
unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */
|
||||
unsigned int curr_idle_nb; /* Current number of connections in the idle list */
|
||||
@ -414,7 +414,7 @@ struct server {
|
||||
/* Element below are usd by LB algorithms and must be doable in
|
||||
* parallel to other threads reusing connections above.
|
||||
*/
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
__decl_thread(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */
|
||||
union {
|
||||
struct eb32_node lb_node; /* node used for tree-based load balancing */
|
||||
@ -428,7 +428,7 @@ struct server {
|
||||
};
|
||||
|
||||
/* usually atomically updated by any thread during parsing or on end of request */
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
int cur_sess; /* number of currently active sessions (including syn_sent) */
|
||||
int served; /* # of active sessions currently being served (ie not pending) */
|
||||
int consecutive_errors; /* current number of consecutive errors */
|
||||
@ -436,7 +436,7 @@ struct server {
|
||||
struct be_counters counters; /* statistics counters */
|
||||
|
||||
/* Below are some relatively stable settings, only changed under the lock */
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
|
||||
struct eb_root *lb_tree; /* we want to know in what tree the server is */
|
||||
struct tree_occ *lb_nodes; /* lb_nodes_tot * struct tree_occ */
|
||||
|
||||
@ -206,7 +206,7 @@ struct stktable {
|
||||
void *ptr; /* generic ptr to check if set or not */
|
||||
} write_to; /* updates received on the source table will also update write_to */
|
||||
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
|
||||
struct {
|
||||
struct eb_root keys; /* head of sticky session tree */
|
||||
@ -221,7 +221,7 @@ struct stktable {
|
||||
unsigned int refcnt; /* number of local peer over all peers sections
|
||||
attached to this table */
|
||||
unsigned int current; /* number of sticky sessions currently in table */
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
|
||||
struct eb_root updates; /* head of sticky updates sequence tree, uses updt_lock */
|
||||
struct mt_list *pend_updts; /* list of updates to be added to the update sequence tree, one per thread-group */
|
||||
@ -229,7 +229,7 @@ struct stktable {
|
||||
unsigned int localupdate; /* uses updt_lock */
|
||||
struct tasklet *updt_task;/* tasklet responsible for pushing the pending updates into the tree */
|
||||
|
||||
THREAD_ALIGN(64);
|
||||
THREAD_ALIGN();
|
||||
/* this lock is heavily used and must be on its own cache line */
|
||||
__decl_thread(HA_RWLOCK_T updt_lock); /* lock protecting the updates part */
|
||||
|
||||
|
||||
@ -91,7 +91,7 @@ extern struct pool_head *pool_head_task;
|
||||
extern struct pool_head *pool_head_tasklet;
|
||||
extern struct pool_head *pool_head_notification;
|
||||
|
||||
__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED(64));
|
||||
__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED());
|
||||
|
||||
void __tasklet_wakeup_on(struct tasklet *tl, int thr);
|
||||
struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl);
|
||||
|
||||
@ -51,7 +51,7 @@
|
||||
|
||||
/* declare a self-initializing spinlock, aligned on a cache line */
|
||||
#define __decl_aligned_spinlock(lock) \
|
||||
HA_SPINLOCK_T (lock) __attribute__((aligned(64))) = 0;
|
||||
HA_SPINLOCK_T (lock) ALIGNED(64) = 0;
|
||||
|
||||
/* declare a self-initializing rwlock */
|
||||
#define __decl_rwlock(lock) \
|
||||
@ -59,7 +59,7 @@
|
||||
|
||||
/* declare a self-initializing rwlock, aligned on a cache line */
|
||||
#define __decl_aligned_rwlock(lock) \
|
||||
HA_RWLOCK_T (lock) __attribute__((aligned(64))) = 0;
|
||||
HA_RWLOCK_T (lock) ALIGNED(64) = 0;
|
||||
|
||||
#else /* !USE_THREAD */
|
||||
|
||||
|
||||
@ -35,7 +35,7 @@ volatile uint _global_now_ms; /* locally stored common mono
|
||||
volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
|
||||
|
||||
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
|
||||
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
|
||||
THREAD_ALIGNED() static llong now_offset; /* global offset between system time and global time in ns */
|
||||
|
||||
THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
|
||||
THREAD_LOCAL uint now_ms; /* internal monotonic date in milliseconds (may wrap) */
|
||||
|
||||
@ -676,7 +676,7 @@ struct hlua_mem_allocator {
|
||||
size_t limit;
|
||||
};
|
||||
|
||||
static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED(64);
|
||||
static struct hlua_mem_allocator hlua_global_allocator THREAD_ALIGNED();
|
||||
|
||||
/* hlua event subscription */
|
||||
struct hlua_event_sub {
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
DECLARE_STATIC_TYPED_POOL(var_pool, "vars", struct var);
|
||||
|
||||
/* list of variables for the process scope. */
|
||||
struct vars proc_vars THREAD_ALIGNED(64);
|
||||
struct vars proc_vars THREAD_ALIGNED();
|
||||
|
||||
/* This array of int contains the system limits per context. */
|
||||
static unsigned int var_global_limit = 0;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user