From 86abe44e42fba96e8a46651f1e64a2e8a6d9f514 Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sun, 25 Nov 2018 20:12:18 +0100 Subject: [PATCH] MEDIUM: init: use self-initializing spinlocks and rwlocks This patch replaces a number of __decl_hathread() followed by HA_SPIN_INIT or HA_RWLOCK_INIT by the new __decl_spinlock() or __decl_rwlock() which automatically registers the lock for initialization in during the STG_LOCK init stage. A few static modifiers were lost in the process, but since they were not essential at all it was not worth extending the API to provide such a variant. --- src/buffer.c | 4 +--- src/checks.c | 4 +--- src/compression.c | 3 +-- src/hlua.c | 4 +--- src/listener.c | 8 +------- src/pattern.c | 3 +-- src/pipe.c | 5 +---- src/ssl_sock.c | 3 +-- src/stream.c | 3 +-- src/task.c | 6 ++---- src/vars.c | 5 +---- 11 files changed, 12 insertions(+), 36 deletions(-) diff --git a/src/buffer.c b/src/buffer.c index d6bd242d4..e55c5bdd2 100644 --- a/src/buffer.c +++ b/src/buffer.c @@ -24,7 +24,7 @@ struct pool_head *pool_head_buffer; /* list of objects waiting for at least one buffer */ struct list buffer_wq = LIST_HEAD_INIT(buffer_wq); -__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) buffer_wq_lock); +__decl_aligned_spinlock(buffer_wq_lock); /* perform minimal intializations, report 0 in case of error, 1 if OK. */ int init_buffer() @@ -46,8 +46,6 @@ int init_buffer() if (global.tune.buf_limit) pool_head_buffer->limit = global.tune.buf_limit; - HA_SPIN_INIT(&buffer_wq_lock); - buffer = pool_refill_alloc(pool_head_buffer, pool_head_buffer->minavail - 1); if (!buffer) return 0; diff --git a/src/checks.c b/src/checks.c index 4829a5446..53950932d 100644 --- a/src/checks.c +++ b/src/checks.c @@ -1632,7 +1632,7 @@ static int connect_conn_chk(struct task *t) static struct list pid_list = LIST_HEAD_INIT(pid_list); static struct pool_head *pool_head_pid_list; -__decl_hathreads(HA_SPINLOCK_T pid_list_lock); +__decl_spinlock(pid_list_lock); void block_sigchld(void) { @@ -1736,8 +1736,6 @@ static int init_pid_list(void) return 1; } - HA_SPIN_INIT(&pid_list_lock); - return 0; } diff --git a/src/compression.c b/src/compression.c index b725e3130..af44f4d6d 100644 --- a/src/compression.c +++ b/src/compression.c @@ -42,7 +42,7 @@ #if defined(USE_ZLIB) -__decl_hathreads(static HA_SPINLOCK_T comp_pool_lock); +__decl_spinlock(comp_pool_lock); #endif #ifdef USE_ZLIB @@ -721,7 +721,6 @@ static void __comp_fetch_init(void) global.maxzlibmem = DEFAULT_MAXZLIBMEM * 1024U * 1024U; #endif #ifdef USE_ZLIB - HA_SPIN_INIT(&comp_pool_lock); memprintf(&ptr, "Built with zlib version : " ZLIB_VERSION); memprintf(&ptr, "%s\nRunning on zlib version : %s", ptr, zlibVersion()); #elif defined(USE_SLZ) diff --git a/src/hlua.c b/src/hlua.c index d34aa798d..820ddf58a 100644 --- a/src/hlua.c +++ b/src/hlua.c @@ -121,7 +121,7 @@ * and RESET_SAFE_LJMP manipulates the Lua stack, so it will be careful * to set mutex around these functions. */ -__decl_hathreads(HA_SPINLOCK_T hlua_global_lock); +__decl_spinlock(hlua_global_lock); THREAD_LOCAL jmp_buf safe_ljmp_env; static int hlua_panic_safe(lua_State *L) { return 0; } static int hlua_panic_ljmp(lua_State *L) { longjmp(safe_ljmp_env, 1); } @@ -7670,8 +7670,6 @@ void hlua_init(void) }; #endif - HA_SPIN_INIT(&hlua_global_lock); - /* Initialise struct hlua and com signals pool */ pool_head_hlua = create_pool("hlua", sizeof(struct hlua), MEM_F_SHARED); diff --git a/src/listener.c b/src/listener.c index 2a38cfd2b..4bcf2d8ac 100644 --- a/src/listener.c +++ b/src/listener.c @@ -43,7 +43,7 @@ #include /* listner_queue lock (same for global and per proxy queues) */ -__decl_hathreads(static HA_SPINLOCK_T lq_lock); +__decl_spinlock(lq_lock); /* List head of all known bind keywords */ static struct bind_kw_list bind_keywords = { @@ -1047,12 +1047,6 @@ static struct bind_kw_list bind_kws = { "ALL", { }, { INITCALL1(STG_REGISTER, bind_register_keywords, &bind_kws); -__attribute__((constructor)) -static void __listener_init(void) -{ - HA_SPIN_INIT(&lq_lock); -} - /* * Local variables: * c-indent-level: 8 diff --git a/src/pattern.c b/src/pattern.c index 6b00385f0..93cdede11 100644 --- a/src/pattern.c +++ b/src/pattern.c @@ -155,7 +155,7 @@ static THREAD_LOCAL struct sample_data static_sample_data; struct list pattern_reference = LIST_HEAD_INIT(pattern_reference); static struct lru64_head *pat_lru_tree; -__decl_hathreads(HA_SPINLOCK_T pat_lru_tree_lock); +__decl_spinlock(pat_lru_tree_lock); static unsigned long long pat_lru_seed; /* @@ -2696,7 +2696,6 @@ void pattern_finalize_config(void) pat_lru_seed = random(); if (global.tune.pattern_cache) { pat_lru_tree = lru64_new(global.tune.pattern_cache); - HA_SPIN_INIT(&pat_lru_tree_lock); } list_for_each_entry(ref, &pattern_reference, list) { diff --git a/src/pipe.c b/src/pipe.c index a3d7ccd18..a82283a93 100644 --- a/src/pipe.c +++ b/src/pipe.c @@ -23,7 +23,7 @@ struct pool_head *pool_head_pipe = NULL; struct pipe *pipes_live = NULL; /* pipes which are still ready to use */ -__decl_hathreads(HA_SPINLOCK_T pipes_lock); /* lock used to protect pipes list */ +__decl_spinlock(pipes_lock); /* lock used to protect pipes list */ int pipes_used = 0; /* # of pipes in use (2 fds each) */ int pipes_free = 0; /* # of pipes unused */ @@ -32,9 +32,6 @@ int pipes_free = 0; /* # of pipes unused */ static void init_pipe() { pool_head_pipe = create_pool("pipe", sizeof(struct pipe), MEM_F_SHARED); - pipes_used = 0; - pipes_free = 0; - HA_SPIN_INIT(&pipes_lock); } /* return a pre-allocated empty pipe. Try to allocate one if there isn't any diff --git a/src/ssl_sock.c b/src/ssl_sock.c index 174ed1f95..3be4b2aed 100644 --- a/src/ssl_sock.c +++ b/src/ssl_sock.c @@ -322,7 +322,7 @@ static char *x509v3_ext_values[X509V3_EXT_SIZE] = { static struct lru64_head *ssl_ctx_lru_tree = NULL; static unsigned int ssl_ctx_lru_seed = 0; static unsigned int ssl_ctx_serial; -__decl_hathreads(static HA_RWLOCK_T ssl_ctx_lru_rwlock); +__decl_rwlock(ssl_ctx_lru_rwlock); #endif // SSL_CTRL_SET_TLSEXT_HOSTNAME @@ -4929,7 +4929,6 @@ ssl_sock_load_ca(struct bind_conf *bind_conf) #if (defined SSL_CTRL_SET_TLSEXT_HOSTNAME && !defined SSL_NO_GENERATE_CERTIFICATES) if (global_ssl.ctx_cache) { ssl_ctx_lru_tree = lru64_new(global_ssl.ctx_cache); - HA_RWLOCK_INIT(&ssl_ctx_lru_rwlock); } ssl_ctx_lru_seed = (unsigned int)time(NULL); ssl_ctx_serial = now_ms; diff --git a/src/stream.c b/src/stream.c index 31146d27f..6b097c325 100644 --- a/src/stream.c +++ b/src/stream.c @@ -65,7 +65,7 @@ struct pool_head *pool_head_stream; struct list streams; -__decl_hathreads(HA_SPINLOCK_T streams_lock); +__decl_spinlock(streams_lock); /* List of all use-service keywords. */ static struct list service_keywords = LIST_HEAD_INIT(service_keywords); @@ -515,7 +515,6 @@ void stream_release_buffers(struct stream *s) int init_stream() { LIST_INIT(&streams); - HA_SPIN_INIT(&streams_lock); pool_head_stream = create_pool("stream", sizeof(struct stream), MEM_F_SHARED); return pool_head_stream != NULL; } diff --git a/src/task.c b/src/task.c index 5c5e10438..ef1db82b6 100644 --- a/src/task.c +++ b/src/task.c @@ -44,8 +44,8 @@ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */ THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */ -__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */ -__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */ +__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */ +__decl_aligned_spinlock(wq_lock); /* spin lock related to wait queue */ #ifdef USE_THREAD struct eb_root timers; /* sorted timers tree, global */ @@ -478,8 +478,6 @@ int init_task() memset(&timers, 0, sizeof(timers)); memset(&rqueue, 0, sizeof(rqueue)); #endif - HA_SPIN_INIT(&wq_lock); - HA_SPIN_INIT(&rq_lock); memset(&task_per_thread, 0, sizeof(task_per_thread)); for (i = 0; i < MAX_THREADS; i++) { LIST_INIT(&task_per_thread[i].task_list); diff --git a/src/vars.c b/src/vars.c index e44a78600..2bc46b164 100644 --- a/src/vars.c +++ b/src/vars.c @@ -34,8 +34,7 @@ static unsigned int var_sess_limit = 0; static unsigned int var_txn_limit = 0; static unsigned int var_reqres_limit = 0; - -__decl_hathreads(HA_RWLOCK_T var_names_rwlock); +__decl_rwlock(var_names_rwlock); /* This function adds or remove memory size from the accounting. The inner * pointers may be null when setting the outer ones only. @@ -927,6 +926,4 @@ __attribute__((constructor)) static void __vars_init(void) { var_pool = create_pool("vars", sizeof(struct var), MEM_F_SHARED); - - HA_RWLOCK_INIT(&var_names_rwlock); }