mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
MEDIUM: init: use self-initializing spinlocks and rwlocks
This patch replaces a number of __decl_hathread() followed by HA_SPIN_INIT or HA_RWLOCK_INIT by the new __decl_spinlock() or __decl_rwlock() which automatically registers the lock for initialization in during the STG_LOCK init stage. A few static modifiers were lost in the process, but since they were not essential at all it was not worth extending the API to provide such a variant.
This commit is contained in:
parent
90fa97b65e
commit
86abe44e42
@ -24,7 +24,7 @@ struct pool_head *pool_head_buffer;
|
||||
|
||||
/* list of objects waiting for at least one buffer */
|
||||
struct list buffer_wq = LIST_HEAD_INIT(buffer_wq);
|
||||
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) buffer_wq_lock);
|
||||
__decl_aligned_spinlock(buffer_wq_lock);
|
||||
|
||||
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
||||
int init_buffer()
|
||||
@ -46,8 +46,6 @@ int init_buffer()
|
||||
if (global.tune.buf_limit)
|
||||
pool_head_buffer->limit = global.tune.buf_limit;
|
||||
|
||||
HA_SPIN_INIT(&buffer_wq_lock);
|
||||
|
||||
buffer = pool_refill_alloc(pool_head_buffer, pool_head_buffer->minavail - 1);
|
||||
if (!buffer)
|
||||
return 0;
|
||||
|
@ -1632,7 +1632,7 @@ static int connect_conn_chk(struct task *t)
|
||||
|
||||
static struct list pid_list = LIST_HEAD_INIT(pid_list);
|
||||
static struct pool_head *pool_head_pid_list;
|
||||
__decl_hathreads(HA_SPINLOCK_T pid_list_lock);
|
||||
__decl_spinlock(pid_list_lock);
|
||||
|
||||
void block_sigchld(void)
|
||||
{
|
||||
@ -1736,8 +1736,6 @@ static int init_pid_list(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
HA_SPIN_INIT(&pid_list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@
|
||||
|
||||
|
||||
#if defined(USE_ZLIB)
|
||||
__decl_hathreads(static HA_SPINLOCK_T comp_pool_lock);
|
||||
__decl_spinlock(comp_pool_lock);
|
||||
#endif
|
||||
|
||||
#ifdef USE_ZLIB
|
||||
@ -721,7 +721,6 @@ static void __comp_fetch_init(void)
|
||||
global.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U;
|
||||
#endif
|
||||
#ifdef USE_ZLIB
|
||||
HA_SPIN_INIT(&comp_pool_lock);
|
||||
memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION);
|
||||
memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion());
|
||||
#elif defined(USE_SLZ)
|
||||
|
@ -121,7 +121,7 @@
|
||||
* and RESET_SAFE_LJMP manipulates the Lua stack, so it will be careful
|
||||
* to set mutex around these functions.
|
||||
*/
|
||||
__decl_hathreads(HA_SPINLOCK_T hlua_global_lock);
|
||||
__decl_spinlock(hlua_global_lock);
|
||||
THREAD_LOCAL jmp_buf safe_ljmp_env;
|
||||
static int hlua_panic_safe(lua_State *L) { return 0; }
|
||||
static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); }
|
||||
@ -7670,8 +7670,6 @@ void hlua_init(void)
|
||||
};
|
||||
#endif
|
||||
|
||||
HA_SPIN_INIT(&hlua_global_lock);
|
||||
|
||||
/* Initialise struct hlua and com signals pool */
|
||||
pool_head_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED);
|
||||
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <proto/task.h>
|
||||
|
||||
/* listner_queue lock (same for global and per proxy queues) */
|
||||
__decl_hathreads(static HA_SPINLOCK_T lq_lock);
|
||||
__decl_spinlock(lq_lock);
|
||||
|
||||
/* List head of all known bind keywords */
|
||||
static struct bind_kw_list bind_keywords = {
|
||||
@ -1047,12 +1047,6 @@ static struct bind_kw_list bind_kws = { "ALL", { }, {
|
||||
|
||||
INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws);
|
||||
|
||||
__attribute__((constructor))
|
||||
static void __listener_init(void)
|
||||
{
|
||||
HA_SPIN_INIT(&lq_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Local variables:
|
||||
* c-indent-level: 8
|
||||
|
@ -155,7 +155,7 @@ static THREAD_LOCAL struct sample_data static_sample_data;
|
||||
struct list pattern_reference = LIST_HEAD_INIT(pattern_reference);
|
||||
|
||||
static struct lru64_head *pat_lru_tree;
|
||||
__decl_hathreads(HA_SPINLOCK_T pat_lru_tree_lock);
|
||||
__decl_spinlock(pat_lru_tree_lock);
|
||||
static unsigned long long pat_lru_seed;
|
||||
|
||||
/*
|
||||
@ -2696,7 +2696,6 @@ void pattern_finalize_config(void)
|
||||
pat_lru_seed = random();
|
||||
if (global.tune.pattern_cache) {
|
||||
pat_lru_tree = lru64_new(global.tune.pattern_cache);
|
||||
HA_SPIN_INIT(&pat_lru_tree_lock);
|
||||
}
|
||||
|
||||
list_for_each_entry(ref, &pattern_reference, list) {
|
||||
|
@ -23,7 +23,7 @@
|
||||
struct pool_head *pool_head_pipe = NULL;
|
||||
struct pipe *pipes_live = NULL; /* pipes which are still ready to use */
|
||||
|
||||
__decl_hathreads(HA_SPINLOCK_T pipes_lock); /* lock used to protect pipes list */
|
||||
__decl_spinlock(pipes_lock); /* lock used to protect pipes list */
|
||||
|
||||
int pipes_used = 0; /* # of pipes in use (2 fds each) */
|
||||
int pipes_free = 0; /* # of pipes unused */
|
||||
@ -32,9 +32,6 @@ int pipes_free = 0; /* # of pipes unused */
|
||||
static void init_pipe()
|
||||
{
|
||||
pool_head_pipe = create_pool("pipe", sizeof(struct pipe), MEM_F_SHARED);
|
||||
pipes_used = 0;
|
||||
pipes_free = 0;
|
||||
HA_SPIN_INIT(&pipes_lock);
|
||||
}
|
||||
|
||||
/* return a pre-allocated empty pipe. Try to allocate one if there isn't any
|
||||
|
@ -322,7 +322,7 @@ static char *x509v3_ext_values[X509V3_EXT_SIZE] = {
|
||||
static struct lru64_head *ssl_ctx_lru_tree = NULL;
|
||||
static unsigned int ssl_ctx_lru_seed = 0;
|
||||
static unsigned int ssl_ctx_serial;
|
||||
__decl_hathreads(static HA_RWLOCK_T ssl_ctx_lru_rwlock);
|
||||
__decl_rwlock(ssl_ctx_lru_rwlock);
|
||||
|
||||
#endif // SSL_CTRL_SET_TLSEXT_HOSTNAME
|
||||
|
||||
@ -4929,7 +4929,6 @@ ssl_sock_load_ca(struct bind_conf *bind_conf)
|
||||
#if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES)
|
||||
if (global_ssl.ctx_cache) {
|
||||
ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache);
|
||||
HA_RWLOCK_INIT(&ssl_ctx_lru_rwlock);
|
||||
}
|
||||
ssl_ctx_lru_seed = (unsigned int)time(NULL);
|
||||
ssl_ctx_serial = now_ms;
|
||||
|
@ -65,7 +65,7 @@
|
||||
|
||||
struct pool_head *pool_head_stream;
|
||||
struct list streams;
|
||||
__decl_hathreads(HA_SPINLOCK_T streams_lock);
|
||||
__decl_spinlock(streams_lock);
|
||||
|
||||
/* List of all use-service keywords. */
|
||||
static struct list service_keywords = LIST_HEAD_INIT(service_keywords);
|
||||
@ -515,7 +515,6 @@ void stream_release_buffers(struct stream *s)
|
||||
int init_stream()
|
||||
{
|
||||
LIST_INIT(&streams);
|
||||
HA_SPIN_INIT(&streams_lock);
|
||||
pool_head_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED);
|
||||
return pool_head_stream != NULL;
|
||||
}
|
||||
|
@ -44,8 +44,8 @@ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
||||
THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
|
||||
THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */
|
||||
|
||||
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
|
||||
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
|
||||
__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
|
||||
__decl_aligned_spinlock(wq_lock); /* spin lock related to wait queue */
|
||||
|
||||
#ifdef USE_THREAD
|
||||
struct eb_root timers; /* sorted timers tree, global */
|
||||
@ -478,8 +478,6 @@ int init_task()
|
||||
memset(&timers, 0, sizeof(timers));
|
||||
memset(&rqueue, 0, sizeof(rqueue));
|
||||
#endif
|
||||
HA_SPIN_INIT(&wq_lock);
|
||||
HA_SPIN_INIT(&rq_lock);
|
||||
memset(&task_per_thread, 0, sizeof(task_per_thread));
|
||||
for (i = 0; i < MAX_THREADS; i++) {
|
||||
LIST_INIT(&task_per_thread[i].task_list);
|
||||
|
@ -34,8 +34,7 @@ static unsigned int var_sess_limit = 0;
|
||||
static unsigned int var_txn_limit = 0;
|
||||
static unsigned int var_reqres_limit = 0;
|
||||
|
||||
|
||||
__decl_hathreads(HA_RWLOCK_T var_names_rwlock);
|
||||
__decl_rwlock(var_names_rwlock);
|
||||
|
||||
/* This function adds or remove memory size from the accounting. The inner
|
||||
* pointers may be null when setting the outer ones only.
|
||||
@ -927,6 +926,4 @@ __attribute__((constructor))
|
||||
static void __vars_init(void)
|
||||
{
|
||||
var_pool = create_pool("vars", sizeof(struct var), MEM_F_SHARED);
|
||||
|
||||
HA_RWLOCK_INIT(&var_names_rwlock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user