mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-22 06:11:32 +02:00
MINOR: tasks: Don't define rqueue if we're building without threads.
To make sure we don't inadvertently insert task in the global runqueue, while only the local runqueue is used without threads, make its definition and usage conditional on USE_THREAD.
This commit is contained in:
parent
e13ab8b3c6
commit
b1ca58b245
@ -93,7 +93,9 @@ extern struct pool_head *pool_head_tasklet;
|
|||||||
extern struct pool_head *pool_head_notification;
|
extern struct pool_head *pool_head_notification;
|
||||||
extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
|
extern THREAD_LOCAL struct task *curr_task; /* task currently running or NULL */
|
||||||
extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
|
extern THREAD_LOCAL struct eb32sc_node *rq_next; /* Next task to be potentially run */
|
||||||
|
#ifdef USE_THREAD
|
||||||
extern struct eb_root rqueue; /* tree constituting the run queue */
|
extern struct eb_root rqueue; /* tree constituting the run queue */
|
||||||
|
#endif
|
||||||
extern struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
|
extern struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
|
||||||
extern struct list task_list[MAX_THREADS]; /* List of tasks to be run, mixing tasks and tasklets */
|
extern struct list task_list[MAX_THREADS]; /* List of tasks to be run, mixing tasks and tasklets */
|
||||||
extern int task_list_size[MAX_THREADS]; /* Number of task sin the task_list */
|
extern int task_list_size[MAX_THREADS]; /* Number of task sin the task_list */
|
||||||
|
28
src/task.c
28
src/task.c
@ -49,9 +49,11 @@ __decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) rq_lock); /* spin lo
|
|||||||
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
|
__decl_hathreads(HA_SPINLOCK_T __attribute__((aligned(64))) wq_lock); /* spin lock related to wait queue */
|
||||||
|
|
||||||
static struct eb_root timers; /* sorted timers tree */
|
static struct eb_root timers; /* sorted timers tree */
|
||||||
|
#ifdef USE_THREAD
|
||||||
struct eb_root rqueue; /* tree constituting the run queue */
|
struct eb_root rqueue; /* tree constituting the run queue */
|
||||||
struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
|
|
||||||
static int global_rqueue_size; /* Number of element sin the global runqueue */
|
static int global_rqueue_size; /* Number of element sin the global runqueue */
|
||||||
|
#endif
|
||||||
|
struct eb_root rqueue_local[MAX_THREADS]; /* tree constituting the per-thread run queue */
|
||||||
static int rqueue_size[MAX_THREADS]; /* Number of elements in the per-thread run queue */
|
static int rqueue_size[MAX_THREADS]; /* Number of elements in the per-thread run queue */
|
||||||
static unsigned int rqueue_ticks; /* insertion count */
|
static unsigned int rqueue_ticks; /* insertion count */
|
||||||
|
|
||||||
@ -68,10 +70,13 @@ void __task_wakeup(struct task *t, struct eb_root *root)
|
|||||||
void *expected = NULL;
|
void *expected = NULL;
|
||||||
int *rq_size;
|
int *rq_size;
|
||||||
|
|
||||||
|
#ifdef USE_THREAD
|
||||||
if (root == &rqueue) {
|
if (root == &rqueue) {
|
||||||
rq_size = &global_rqueue_size;
|
rq_size = &global_rqueue_size;
|
||||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
} else {
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
int nb = root - &rqueue_local[0];
|
int nb = root - &rqueue_local[0];
|
||||||
rq_size = &rqueue_size[nb];
|
rq_size = &rqueue_size[nb];
|
||||||
}
|
}
|
||||||
@ -80,8 +85,10 @@ void __task_wakeup(struct task *t, struct eb_root *root)
|
|||||||
*/
|
*/
|
||||||
redo:
|
redo:
|
||||||
if (unlikely(!HA_ATOMIC_CAS(&t->rq.node.leaf_p, &expected, (void *)0x1))) {
|
if (unlikely(!HA_ATOMIC_CAS(&t->rq.node.leaf_p, &expected, (void *)0x1))) {
|
||||||
|
#ifdef USE_THREAD
|
||||||
if (root == &rqueue)
|
if (root == &rqueue)
|
||||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* There's a small race condition, when running a task, the thread
|
/* There's a small race condition, when running a task, the thread
|
||||||
@ -104,8 +111,10 @@ redo:
|
|||||||
state = (volatile unsigned short)(t->state);
|
state = (volatile unsigned short)(t->state);
|
||||||
if (unlikely(state != 0 && !(state & TASK_RUNNING)))
|
if (unlikely(state != 0 && !(state & TASK_RUNNING)))
|
||||||
goto redo;
|
goto redo;
|
||||||
|
#ifdef USE_THREAD
|
||||||
if (root == &rqueue)
|
if (root == &rqueue)
|
||||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
HA_ATOMIC_ADD(&tasks_run_queue, 1);
|
||||||
@ -124,10 +133,13 @@ redo:
|
|||||||
}
|
}
|
||||||
|
|
||||||
eb32sc_insert(root, &t->rq, t->thread_mask);
|
eb32sc_insert(root, &t->rq, t->thread_mask);
|
||||||
|
#ifdef USE_THREAD
|
||||||
if (root == &rqueue) {
|
if (root == &rqueue) {
|
||||||
global_rqueue_size++;
|
global_rqueue_size++;
|
||||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
} else {
|
} else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
int nb = root - &rqueue_local[0];
|
int nb = root - &rqueue_local[0];
|
||||||
|
|
||||||
rqueue_size[nb]++;
|
rqueue_size[nb]++;
|
||||||
@ -239,7 +251,9 @@ void process_runnable_tasks()
|
|||||||
{
|
{
|
||||||
struct task *t;
|
struct task *t;
|
||||||
int max_processed;
|
int max_processed;
|
||||||
|
#ifdef USE_THREAD
|
||||||
uint64_t average = 0;
|
uint64_t average = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
|
tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */
|
||||||
nb_tasks_cur = nb_tasks;
|
nb_tasks_cur = nb_tasks;
|
||||||
@ -253,6 +267,7 @@ void process_runnable_tasks()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef USE_THREAD
|
||||||
average = tasks_run_queue / global.nbthread;
|
average = tasks_run_queue / global.nbthread;
|
||||||
|
|
||||||
/* Get some elements from the global run queue and put it in the
|
/* Get some elements from the global run queue and put it in the
|
||||||
@ -284,6 +299,7 @@ void process_runnable_tasks()
|
|||||||
__task_unlink_rq(t);
|
__task_unlink_rq(t);
|
||||||
__task_wakeup(t, &rqueue_local[tid]);
|
__task_wakeup(t, &rqueue_local[tid]);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
HA_SPIN_UNLOCK(TASK_RQ_LOCK, &rq_lock);
|
||||||
} else {
|
} else {
|
||||||
@ -361,8 +377,12 @@ void process_runnable_tasks()
|
|||||||
if (t != NULL) {
|
if (t != NULL) {
|
||||||
state = HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
|
state = HA_ATOMIC_AND(&t->state, ~TASK_RUNNING);
|
||||||
if (state)
|
if (state)
|
||||||
|
#ifdef USE_THREAD
|
||||||
__task_wakeup(t, (t->thread_mask == tid_bit) ?
|
__task_wakeup(t, (t->thread_mask == tid_bit) ?
|
||||||
&rqueue_local[tid] : &rqueue);
|
&rqueue_local[tid] : &rqueue);
|
||||||
|
#else
|
||||||
|
__task_wakeup(t, &rqueue_local[tid]);
|
||||||
|
#endif
|
||||||
else
|
else
|
||||||
task_queue(t);
|
task_queue(t);
|
||||||
}
|
}
|
||||||
@ -382,7 +402,9 @@ int init_task()
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
memset(&timers, 0, sizeof(timers));
|
memset(&timers, 0, sizeof(timers));
|
||||||
|
#ifdef USE_THREAD
|
||||||
memset(&rqueue, 0, sizeof(rqueue));
|
memset(&rqueue, 0, sizeof(rqueue));
|
||||||
|
#endif
|
||||||
HA_SPIN_INIT(&wq_lock);
|
HA_SPIN_INIT(&wq_lock);
|
||||||
HA_SPIN_INIT(&rq_lock);
|
HA_SPIN_INIT(&rq_lock);
|
||||||
for (i = 0; i < MAX_THREADS; i++) {
|
for (i = 0; i < MAX_THREADS; i++) {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user