MINOR: task: replace global_tasks_mask with a check for tree's emptiness

This bit field used to be a per-thread cache of the result of the last
lookup of the presence of a task for each thread in the shared cache.
Since we now know that each thread has its own shared cache, a test of
emptiness is now sufficient to decide whether or not the shared tree
has a task for the current thread. Let's just remove this mask.
This commit is contained in:
Willy Tarreau 2022-06-16 15:59:36 +02:00
parent da195e8aab
commit c958c70ec8
3 changed files with 8 additions and 19 deletions

View File

@ -89,7 +89,6 @@
/* a few exported variables */ /* a few exported variables */
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool_head_task; extern struct pool_head *pool_head_task;
@ -182,8 +181,8 @@ static inline int task_in_wq(struct task *t)
/* returns true if the current thread has some work to do */ /* returns true if the current thread has some work to do */
static inline int thread_has_tasks(void) static inline int thread_has_tasks(void)
{ {
return ((int)!!(global_tasks_mask & tid_bit) | return ((int)!eb_is_empty(&th_ctx->rqueue) |
(int)!eb_is_empty(&th_ctx->rqueue) | (int)!eb_is_empty(&th_ctx->rqueue_shared) |
(int)!!th_ctx->tl_class_mask | (int)!!th_ctx->tl_class_mask |
(int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list)); (int)!MT_LIST_ISEMPTY(&th_ctx->shared_tasklet_list));
} }

View File

@ -168,7 +168,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
(thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1, (thr == calling_tid) ? '*' : ' ', stuck ? '>' : ' ', thr + 1,
ha_get_pthread_id(thr), ha_get_pthread_id(thr),
thread_has_tasks(), thread_has_tasks(),
!!(global_tasks_mask & thr_bit), !eb_is_empty(&ha_thread_ctx[thr].rqueue_shared),
!eb_is_empty(&ha_thread_ctx[thr].timers), !eb_is_empty(&ha_thread_ctx[thr].timers),
!eb_is_empty(&ha_thread_ctx[thr].rqueue), !eb_is_empty(&ha_thread_ctx[thr].rqueue),
!(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) && !(LIST_ISEMPTY(&ha_thread_ctx[thr].tasklets[TL_URGENT]) &&

View File

@ -35,7 +35,6 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
*/ */
DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification)); DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
__decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */ __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
@ -235,10 +234,6 @@ void __task_wakeup(struct task *t)
_HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total); _HA_ATOMIC_INC(&ha_thread_ctx[thr].rq_total);
HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &ha_thread_ctx[thr].rqsh_lock);
if (t->tid < 0)
global_tasks_mask = all_threads_mask;
else
global_tasks_mask |= 1UL << thr;
t->rq.key = _HA_ATOMIC_ADD_FETCH(&ha_thread_ctx[thr].rqueue_ticks, 1); t->rq.key = _HA_ATOMIC_ADD_FETCH(&ha_thread_ctx[thr].rqueue_ticks, 1);
__ha_barrier_store(); __ha_barrier_store();
} else } else
@ -562,8 +557,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
if (unlikely(queue > TL_NORMAL && if (unlikely(queue > TL_NORMAL &&
budget_mask & (1 << TL_NORMAL) && budget_mask & (1 << TL_NORMAL) &&
(!eb_is_empty(&th_ctx->rqueue) || (!eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared)))) {
(global_tasks_mask & tid_bit)))) {
/* a task was woken up by a bulk tasklet or another thread */ /* a task was woken up by a bulk tasklet or another thread */
break; break;
} }
@ -784,7 +778,7 @@ void process_runnable_tasks()
/* normal tasklets list gets a default weight of ~37% */ /* normal tasklets list gets a default weight of ~37% */
if ((tt->tl_class_mask & (1 << TL_NORMAL)) || if ((tt->tl_class_mask & (1 << TL_NORMAL)) ||
!eb_is_empty(&th_ctx->rqueue) || (global_tasks_mask & tid_bit)) !eb_is_empty(&th_ctx->rqueue) || !eb_is_empty(&th_ctx->rqueue_shared))
max[TL_NORMAL] = default_weights[TL_NORMAL]; max[TL_NORMAL] = default_weights[TL_NORMAL];
/* bulk tasklets list gets a default weight of ~13% */ /* bulk tasklets list gets a default weight of ~13% */
@ -831,16 +825,14 @@ void process_runnable_tasks()
lpicked = gpicked = 0; lpicked = gpicked = 0;
budget = max[TL_NORMAL] - tt->tasks_in_list; budget = max[TL_NORMAL] - tt->tasks_in_list;
while (lpicked + gpicked < budget) { while (lpicked + gpicked < budget) {
if ((global_tasks_mask & tid_bit) && !grq) { if (!eb_is_empty(&th_ctx->rqueue_shared) && !grq) {
#ifdef USE_THREAD #ifdef USE_THREAD
HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit); grq = eb32sc_lookup_ge(&th_ctx->rqueue_shared, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit);
if (unlikely(!grq)) { if (unlikely(!grq)) {
grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit); grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
if (!grq) { if (!grq)
global_tasks_mask &= ~tid_bit;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
}
} }
#endif #endif
} }
@ -872,10 +864,8 @@ void process_runnable_tasks()
if (unlikely(!grq)) { if (unlikely(!grq)) {
grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit); grq = eb32sc_first(&th_ctx->rqueue_shared, tid_bit);
if (!grq) { if (!grq)
global_tasks_mask &= ~tid_bit;
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &th_ctx->rqsh_lock);
}
} }
gpicked++; gpicked++;
} }