diff --git a/include/proto/task.h b/include/proto/task.h index 6a89cd964..24edaac0f 100644 --- a/include/proto/task.h +++ b/include/proto/task.h @@ -32,6 +32,7 @@ #include #include +#include #include #include @@ -163,7 +164,7 @@ static inline struct task *task_unlink_wq(struct task *t) */ static inline struct task *__task_unlink_rq(struct task *t) { - eb32_delete(&t->rq); + eb32sc_delete(&t->rq); tasks_run_queue--; if (likely(t->nice)) niced_tasks--; diff --git a/include/types/task.h b/include/types/task.h index 236324d1b..70fc68119 100644 --- a/include/types/task.h +++ b/include/types/task.h @@ -26,6 +26,7 @@ #include #include +#include #include /* values for task->state */ @@ -63,7 +64,7 @@ struct notification { /* The base for all tasks */ struct task { - struct eb32_node rq; /* ebtree node used to hold the task in the run queue */ + struct eb32sc_node rq; /* ebtree node used to hold the task in the run queue */ unsigned short state; /* task state : bit field of TASK_* */ unsigned short pending_state; /* pending states for running talk */ short nice; /* the task's current nice value from -1024 to +1024 */ diff --git a/src/task.c b/src/task.c index 1768e075d..0db0dc495 100644 --- a/src/task.c +++ b/src/task.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -74,7 +75,7 @@ struct task *__task_wakeup(struct task *t) * if task is running */ t->state = t->pending_state; - eb32_insert(&rqueue, &t->rq); + eb32sc_insert(&rqueue, &t->rq, t->thread_mask); return t; } @@ -188,7 +189,7 @@ void process_runnable_tasks() struct task *t; int i; int max_processed; - struct eb32_node *rq_next; + struct eb32sc_node *rq_next; int rewind; struct task *local_tasks[16]; int local_tasks_count; @@ -212,13 +213,13 @@ void process_runnable_tasks() */ rewind = 0; - rq_next = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK); + rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit); if (!rq_next) { /* we might have reached the end of the tree, typically because * is in the first half and we're first scanning * the last half. Let's loop back to the beginning of the tree now. */ - rq_next = eb32_first(&rqueue); + rq_next = eb32sc_first(&rqueue, tid_bit); if (!rq_next) { break; } @@ -227,8 +228,8 @@ void process_runnable_tasks() local_tasks_count = 0; while (local_tasks_count < 16) { - t = eb32_entry(rq_next, struct task, rq); - rq_next = eb32_next(rq_next); + t = eb32sc_entry(rq_next, struct task, rq); + rq_next = eb32sc_next(rq_next, tid_bit); if (t->thread_mask & tid_bit) { /* detach the task from the queue */ __task_unlink_rq(t); @@ -238,7 +239,7 @@ void process_runnable_tasks() local_tasks[local_tasks_count++] = t; } if (!rq_next) { - if (rewind || !(rq_next = eb32_first(&rqueue))) { + if (rewind || !(rq_next = eb32sc_first(&rqueue, tid_bit))) { break; } rewind = 1;