MEDIUM: task: replace the global rq_lock with a per-rq one

There's no point having a global rq_lock now that we have one shared RQ
per thread, let's have one lock per runqueue instead.
This commit is contained in:
Willy Tarreau 2022-06-16 16:58:17 +02:00
parent 6f78038d72
commit b17dd6cc19
3 changed files with 8 additions and 8 deletions

View File

@ -101,7 +101,6 @@ extern struct pool_head *pool_head_notification;
extern struct eb_root timers; /* sorted timers tree, global */
#endif
__decl_thread(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
__decl_thread(extern HA_RWLOCK_T wq_lock); /* RW lock related to the wait queue */
void __tasklet_wakeup_on(struct tasklet *tl, int thr);

View File

@ -25,6 +25,7 @@
#include <import/ebtree-t.h>
#include <haproxy/api-t.h>
#include <haproxy/thread-t.h>
/* tasklet classes */
enum {
@ -104,6 +105,7 @@ struct thread_ctx {
uint64_t prev_mono_time; /* previous system wide monotonic time */
struct eb_root rqueue_shared; /* run queue fed by other threads */
__decl_thread(HA_SPINLOCK_T rqsh_lock); /* lock protecting the shared runqueue */
ALWAYS_ALIGN(128);
};

View File

@ -38,7 +38,6 @@ DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification)
volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
__decl_aligned_spinlock(rq_lock); /* spin lock related to run queue */
__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
#ifdef USE_THREAD
@ -235,7 +234,7 @@ void __task_wakeup(struct task *t)
root = &ha_thread_ctx[thr].rqueue_shared;
_HA_ATOMIC_INC(&grq_total);
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
if (t->tid < 0)
global_tasks_mask = all_threads_mask;
@ -265,7 +264,7 @@ void __task_wakeup(struct task *t)
#ifdef USE_THREAD
if (thr != tid) {
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
/* If all threads that are supposed to handle this task are sleeping,
* wake one.
@ -835,13 +834,13 @@ void process_runnable_tasks()
while (lpicked + gpicked < budget) {
if ((global_tasks_mask & tid_bit) && !grq) {
#ifdef USE_THREAD
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit);
if (unlikely(!grq)) {
grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
if (!grq) {
global_tasks_mask &= ~tid_bit;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
}
}
#endif
@ -876,7 +875,7 @@ void process_runnable_tasks()
grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
if (!grq) {
global_tasks_mask &= ~tid_bit;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
}
}
gpicked++;
@ -891,7 +890,7 @@ void process_runnable_tasks()
/* release the rqueue lock */
if (grq) {
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
grq = NULL;
}