CLEANUP: task: move the tree root detection from __task_wakeup() to task_wakeup()

Historically we used to call __task_wakeup() with a known tree root but
this is not the case and the code has remained needlessly complicated
with the root calculation in task_wakeup() passed in argument to
__task_wakeup() which compares it again.

Let's get rid of this and just move the detection code there. This
eliminates some ifdefs and allows to simplify the test conditions quite
a bit.
This commit is contained in:
Willy Tarreau 2021-02-24 16:41:11 +01:00
parent 1f3b1417b8
commit 018564eaa2
2 changed files with 11 additions and 19 deletions

View File

@ -108,7 +108,7 @@ __decl_thread(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue
__decl_thread(extern HA_RWLOCK_T wq_lock); /* RW lock related to the wait queue */
void task_kill(struct task *t);
void __task_wakeup(struct task *t, struct eb_root *);
void __task_wakeup(struct task *t);
void __task_queue(struct task *task, struct eb_root *wq);
struct work_list *work_list_create(int nbthread,
@ -195,17 +195,6 @@ static inline void _task_wakeup(struct task *t, unsigned int f, const char *file
{
unsigned short state;
#ifdef USE_THREAD
struct eb_root *root;
if (t->thread_mask == tid_bit || global.nbthread == 1)
root = &sched->rqueue;
else
root = &rqueue;
#else
struct eb_root *root = &sched->rqueue;
#endif
state = _HA_ATOMIC_OR(&t->state, f);
while (!(state & (TASK_RUNNING | TASK_QUEUED))) {
if (_HA_ATOMIC_CAS(&t->state, &state, state | TASK_QUEUED)) {
@ -216,7 +205,7 @@ static inline void _task_wakeup(struct task *t, unsigned int f, const char *file
t->debug.caller_file[t->debug.caller_idx] = file;
t->debug.caller_line[t->debug.caller_idx] = line;
#endif
__task_wakeup(t, root);
__task_wakeup(t);
break;
}
}

View File

@ -115,14 +115,16 @@ void task_kill(struct task *t)
* The task must not already be in the run queue. If unsure, use the safer
* task_wakeup() function.
*/
void __task_wakeup(struct task *t, struct eb_root *root)
void __task_wakeup(struct task *t)
{
#ifdef USE_THREAD
if (root == &rqueue) {
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
}
struct eb_root *root = &sched->rqueue;
#ifdef USE_THREAD
if (t->thread_mask != tid_bit && global.nbthread != 1) {
root = &rqueue;
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
if (root == &rqueue) {
global_tasks_mask |= t->thread_mask;
grq_total++;
t->rq.key = ++global_rqueue_ticks;
@ -146,6 +148,7 @@ void __task_wakeup(struct task *t, struct eb_root *root)
t->call_date = now_mono_time();
eb32sc_insert(root, &t->rq, t->thread_mask);
#ifdef USE_THREAD
if (root == &rqueue) {
_HA_ATOMIC_OR(&t->state, TASK_GLOBAL);