MINOR: task: make grq_total atomic to move it outside of the grq_lock

Instead of decrementing grq_total once per task picked from the global
run queue, let's do it at once after the loop like we do for other
counters. This simplifies the code everywhere. It is not expected to
bring noticeable improvements however, since global tasks tend to be
less common nowadays.
This commit is contained in:
Willy Tarreau 2021-02-25 07:51:18 +01:00
parent c03fbeb358
commit 45499c56d3
2 changed files with 9 additions and 8 deletions

View File

@ -88,7 +88,7 @@
/* a few exported variables */
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
extern unsigned int grq_total; /* total number of entries in the global run queue */
extern unsigned int grq_total; /* total number of entries in the global run queue, atomic */
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
extern struct pool_head *pool_head_task;
extern struct pool_head *pool_head_tasklet;
@ -324,8 +324,6 @@ static inline struct task *task_unlink_rq(struct task *t)
if (likely(task_in_rq(t))) {
eb32sc_delete(&t->rq);
if (is_global) {
grq_total--;
done = 1;
}
@ -333,8 +331,10 @@ static inline struct task *task_unlink_rq(struct task *t)
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
if (done) {
if (is_global)
if (is_global) {
_HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
_HA_ATOMIC_SUB(&grq_total, 1);
}
else
_HA_ATOMIC_SUB(&sched->rq_total, 1);
if (t->nice)

View File

@ -46,7 +46,7 @@ __decl_aligned_rwlock(wq_lock); /* RW lock related to the wait queue */
#ifdef USE_THREAD
struct eb_root timers; /* sorted timers tree, global, accessed under wq_lock */
struct eb_root rqueue; /* tree constituting the global run queue, accessed under rq_lock */
unsigned int grq_total; /* total number of entries in the global run queue, use grq_lock */
unsigned int grq_total; /* total number of entries in the global run queue, atomic */
static unsigned int global_rqueue_ticks; /* insertion count in the grq, use rq_lock */
#endif
@ -159,10 +159,10 @@ void __task_wakeup(struct task *t)
if (t->thread_mask != tid_bit && global.nbthread != 1) {
root = &rqueue;
_HA_ATOMIC_ADD(&grq_total, 1);
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
global_tasks_mask |= t->thread_mask;
grq_total++;
t->rq.key = ++global_rqueue_ticks;
__ha_barrier_store();
} else
@ -708,7 +708,6 @@ void process_runnable_tasks()
else {
t = eb32sc_entry(grq, struct task, rq);
grq = eb32sc_next(grq, tid_bit);
grq_total--;
_HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
eb32sc_delete(&t->rq);
@ -738,8 +737,10 @@ void process_runnable_tasks()
if (lpicked + gpicked) {
tt->tl_class_mask |= 1 << TL_NORMAL;
_HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
if (gpicked)
if (gpicked) {
_HA_ATOMIC_SUB(&grq_total, gpicked);
_HA_ATOMIC_ADD(&tt->rq_total, gpicked);
}
activity[tid].tasksw += lpicked + gpicked;
}