mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-12-12 13:11:11 +01:00
BUG/MEDIUM: task: Don't free a task that is about to be run.
While running a task, we may try to delete and free a task that is about to be run, because it's part of the local tasks list, or because rq_next points to it. So flag any task that is in the local tasks list to be deleted, instead of run, by setting t->process to NULL, and re-make rq_next a global, thread-local variable, that is modified if we attempt to delete that task. Many thanks to PiBa-NL for reporting this and analysing the problem. This should be backported to 1.8.
This commit is contained in:
parent
336a11f755
commit
9b36cb4a41
@ -90,6 +90,8 @@ extern unsigned int nb_tasks_cur;
|
|||||||
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
||||||
extern struct pool_head *pool_head_task;
|
extern struct pool_head *pool_head_task;
|
||||||
extern struct pool_head *pool_head_notification;
|
extern struct pool_head *pool_head_notification;
|
||||||
|
extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
|
||||||
|
extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
|
||||||
|
|
||||||
__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
|
__decl_hathreads(extern HA_SPINLOCK_T rq_lock); /* spin lock related to run queue */
|
||||||
__decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
|
__decl_hathreads(extern HA_SPINLOCK_T wq_lock); /* spin lock related to wait queue */
|
||||||
@ -177,8 +179,11 @@ static inline struct task *__task_unlink_rq(struct task *t)
|
|||||||
static inline struct task *task_unlink_rq(struct task *t)
|
static inline struct task *task_unlink_rq(struct task *t)
|
||||||
{
|
{
|
||||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
if (likely(task_in_rq(t)))
|
if (likely(task_in_rq(t))) {
|
||||||
|
if (&t->rq == rq_next)
|
||||||
|
rq_next = eb32sc_next(rq_next, tid_bit);
|
||||||
__task_unlink_rq(t);
|
__task_unlink_rq(t);
|
||||||
|
}
|
||||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
@ -230,7 +235,7 @@ static inline struct task *task_new(unsigned long thread_mask)
|
|||||||
* Free a task. Its context must have been freed since it will be lost.
|
* Free a task. Its context must have been freed since it will be lost.
|
||||||
* The task count is decremented.
|
* The task count is decremented.
|
||||||
*/
|
*/
|
||||||
static inline void task_free(struct task *t)
|
static inline void __task_free(struct task *t)
|
||||||
{
|
{
|
||||||
pool_free(pool_head_task, t);
|
pool_free(pool_head_task, t);
|
||||||
if (unlikely(stopping))
|
if (unlikely(stopping))
|
||||||
@ -238,6 +243,18 @@ static inline void task_free(struct task *t)
|
|||||||
HA_ATOMIC_SUB(&nb_tasks, 1);
|
HA_ATOMIC_SUB(&nb_tasks, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void task_free(struct task *t)
|
||||||
|
{
|
||||||
|
/* There's no need to protect t->state with a lock, as the task
|
||||||
|
* has to run on the current thread.
|
||||||
|
*/
|
||||||
|
if (t == curr_task || !(t->state & TASK_RUNNING))
|
||||||
|
__task_free(t);
|
||||||
|
else
|
||||||
|
t->process = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/* Place <task> into the wait queue, where it may already be. If the expiration
|
/* Place <task> into the wait queue, where it may already be. If the expiration
|
||||||
* timer is infinite, do nothing and rely on wake_expired_task to clean up.
|
* timer is infinite, do nothing and rely on wake_expired_task to clean up.
|
||||||
*/
|
*/
|
||||||
|
|||||||
18
src/task.c
18
src/task.c
@ -39,6 +39,7 @@ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
|||||||
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
||||||
|
|
||||||
THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
|
THREAD_LOCAL struct task *curr_task = NULL; /* task currently running or NULL */
|
||||||
|
THREAD_LOCAL struct eb32sc_node *rq_next = NULL; /* Next task to be potentially run */
|
||||||
|
|
||||||
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
|
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lock related to run queue */
|
||||||
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
|
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
|
||||||
@ -186,7 +187,6 @@ void process_runnable_tasks()
|
|||||||
struct task *t;
|
struct task *t;
|
||||||
int i;
|
int i;
|
||||||
int max_processed;
|
int max_processed;
|
||||||
struct eb32sc_node *rq_next;
|
|
||||||
struct task *local_tasks[16];
|
struct task *local_tasks[16];
|
||||||
int local_tasks_count;
|
int local_tasks_count;
|
||||||
int final_tasks_count;
|
int final_tasks_count;
|
||||||
@ -227,8 +227,14 @@ void process_runnable_tasks()
|
|||||||
*/
|
*/
|
||||||
if (likely(t->process == process_stream))
|
if (likely(t->process == process_stream))
|
||||||
t = process_stream(t);
|
t = process_stream(t);
|
||||||
else
|
else {
|
||||||
|
if (t->process != NULL)
|
||||||
t = t->process(t);
|
t = t->process(t);
|
||||||
|
else {
|
||||||
|
__task_free(t);
|
||||||
|
t = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
curr_task = NULL;
|
curr_task = NULL;
|
||||||
|
|
||||||
if (likely(t != NULL)) {
|
if (likely(t != NULL)) {
|
||||||
@ -309,8 +315,14 @@ void process_runnable_tasks()
|
|||||||
curr_task = t;
|
curr_task = t;
|
||||||
if (likely(t->process == process_stream))
|
if (likely(t->process == process_stream))
|
||||||
t = process_stream(t);
|
t = process_stream(t);
|
||||||
else
|
else {
|
||||||
|
if (t->process != NULL)
|
||||||
t = t->process(t);
|
t = t->process(t);
|
||||||
|
else {
|
||||||
|
__task_free(t);
|
||||||
|
t = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
curr_task = NULL;
|
curr_task = NULL;
|
||||||
if (t)
|
if (t)
|
||||||
local_tasks[final_tasks_count++] = t;
|
local_tasks[final_tasks_count++] = t;
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user