BUG/MEDIUM: tasks: Make sure there's no task left before considering inactive.

We may remove the thread's bit in active_tasks_mask despite tasks for that
thread still being present in the global runqueue. To fix that, introduce
global_tasks_mask, and set the correspnding bits when we add a task to the
runqueue.
This commit is contained in:
Olivier Houchard 2018-07-26 15:25:49 +02:00 committed by Willy Tarreau
parent 189ea856a7
commit c4aac9effe

View File

@ -34,6 +34,7 @@ struct pool_head *pool_head_notification;
unsigned int nb_tasks = 0; unsigned int nb_tasks = 0;
unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */ unsigned long active_tasks_mask = 0; /* Mask of threads with active tasks */
unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
unsigned int tasks_run_queue = 0; unsigned int tasks_run_queue = 0;
unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */ unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
@ -118,6 +119,12 @@ redo:
return; return;
} }
HA_ATOMIC_ADD(&tasks_run_queue, 1); HA_ATOMIC_ADD(&tasks_run_queue, 1);
#ifdef USE_THREAD
if (root == &rqueue) {
HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask);
__ha_barrier_store();
}
#endif
HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask); HA_ATOMIC_OR(&active_tasks_mask, t->thread_mask);
t->rq.key = HA_ATOMIC_ADD(&rqueue_ticks, 1); t->rq.key = HA_ATOMIC_ADD(&rqueue_ticks, 1);
@ -281,8 +288,10 @@ void process_runnable_tasks()
* of the tree now. * of the tree now.
*/ */
rq_next = eb32sc_first(&rqueue, tid_bit); rq_next = eb32sc_first(&rqueue, tid_bit);
if (!rq_next) if (!rq_next) {
HA_ATOMIC_AND(&global_tasks_mask, ~tid_bit);
break; break;
}
} }
t = eb32sc_entry(rq_next, struct task, rq); t = eb32sc_entry(rq_next, struct task, rq);
@ -302,7 +311,6 @@ void process_runnable_tasks()
return; return;
} }
} }
HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
/* Get some tasks from the run queue, make sure we don't /* Get some tasks from the run queue, make sure we don't
* get too much in the task list, but put a bit more than * get too much in the task list, but put a bit more than
* the max that will be run, to give a bit more fairness * the max that will be run, to give a bit more fairness
@ -337,6 +345,12 @@ void process_runnable_tasks()
/* And add it to the local task list */ /* And add it to the local task list */
task_insert_into_tasklet_list(t); task_insert_into_tasklet_list(t);
} }
if (!(global_tasks_mask & tid_bit) && rqueue_size[tid] == 0) {
HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
__ha_barrier_load();
if (global_tasks_mask & tid_bit)
HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
}
while (max_processed > 0 && !LIST_ISEMPTY(&task_list[tid])) { while (max_processed > 0 && !LIST_ISEMPTY(&task_list[tid])) {
struct task *t; struct task *t;
unsigned short state; unsigned short state;