MINOR: task: split the counts of local and global tasks picked

In process_runnable_tasks() we're still calling __task_unlink_rq() to
pick a task, and this function tries to guess where to pick the task
from and which counter to update while the caller's context already
has everything. Worse, the number of local tasks is decremented then
recredited, doubling the operations. In order to avoid this we first
need to keep separate counters for local and global tasks that were
picked. This is what this patch does.
This commit is contained in:
Willy Tarreau 2021-02-25 07:09:08 +01:00
parent baf2273345
commit e7923c1d22

View File

@ -617,7 +617,7 @@ void process_runnable_tasks()
struct mt_list *tmp_list; struct mt_list *tmp_list;
unsigned int queue; unsigned int queue;
int max_processed; int max_processed;
int picked; int lpicked, gpicked;
int budget; int budget;
ti->flags &= ~TI_FL_STUCK; // this thread is still running ti->flags &= ~TI_FL_STUCK; // this thread is still running
@ -668,9 +668,9 @@ void process_runnable_tasks()
/* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */ /* pick up to max[TL_NORMAL] regular tasks from prio-ordered run queues */
/* Note: the grq lock is always held when grq is not null */ /* Note: the grq lock is always held when grq is not null */
picked = 0; lpicked = gpicked = 0;
budget = max[TL_NORMAL] - tt->tasks_in_list; budget = max[TL_NORMAL] - tt->tasks_in_list;
while (picked < budget) { while (lpicked + gpicked < budget) {
if ((global_tasks_mask & tid_bit) && !grq) { if ((global_tasks_mask & tid_bit) && !grq) {
#ifdef USE_THREAD #ifdef USE_THREAD
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
@ -702,6 +702,7 @@ void process_runnable_tasks()
t = eb32sc_entry(lrq, struct task, rq); t = eb32sc_entry(lrq, struct task, rq);
lrq = eb32sc_next(lrq, tid_bit); lrq = eb32sc_next(lrq, tid_bit);
__task_unlink_rq(t); __task_unlink_rq(t);
lpicked++;
} }
#ifdef USE_THREAD #ifdef USE_THREAD
else { else {
@ -715,12 +716,12 @@ void process_runnable_tasks()
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock); HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
} }
} }
gpicked++;
} }
#endif #endif
/* Add it to the local task list */ /* Add it to the local task list */
LIST_ADDQ(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list); LIST_ADDQ(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
picked++;
} }
/* release the rqueue lock */ /* release the rqueue lock */
@ -729,11 +730,11 @@ void process_runnable_tasks()
grq = NULL; grq = NULL;
} }
if (picked) { if (lpicked + gpicked) {
tt->tl_class_mask |= 1 << TL_NORMAL; tt->tl_class_mask |= 1 << TL_NORMAL;
_HA_ATOMIC_ADD(&tt->tasks_in_list, picked); _HA_ATOMIC_ADD(&tt->tasks_in_list, lpicked + gpicked);
_HA_ATOMIC_ADD(&tt->rq_total, picked); _HA_ATOMIC_ADD(&tt->rq_total, lpicked + gpicked);
activity[tid].tasksw += picked; activity[tid].tasksw += lpicked + gpicked;
} }
/* Merge the list of tasklets waken up by other threads to the /* Merge the list of tasklets waken up by other threads to the