mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-23 06:41:32 +02:00
MINOR: task: make rqueue_ticks atomic
The runqueue ticks counter is per-thread and wasn't initially meant to be shared. We'll soon have to share it so let's make it atomic. It's only updated when waking up a task, and no performance difference was observed. It was moved in the thread_ctx struct so that it doesn't pollute the local cache line when it's later updated by other threads.
This commit is contained in:
parent
fc5de15baa
commit
a4fb79b4a2
@ -79,7 +79,6 @@ struct thread_ctx {
|
|||||||
struct eb_root timers; /* tree constituting the per-thread wait queue */
|
struct eb_root timers; /* tree constituting the per-thread wait queue */
|
||||||
struct eb_root rqueue; /* tree constituting the per-thread run queue */
|
struct eb_root rqueue; /* tree constituting the per-thread run queue */
|
||||||
struct task *current; /* current task (not tasklet) */
|
struct task *current; /* current task (not tasklet) */
|
||||||
unsigned int rqueue_ticks; /* Insertion counter for the run queue */
|
|
||||||
int current_queue; /* points to current tasklet list being run, -1 if none */
|
int current_queue; /* points to current tasklet list being run, -1 if none */
|
||||||
unsigned int nb_tasks; /* number of tasks allocated on this thread */
|
unsigned int nb_tasks; /* number of tasks allocated on this thread */
|
||||||
uint flags; /* thread flags, TH_FL_* */
|
uint flags; /* thread flags, TH_FL_* */
|
||||||
@ -96,11 +95,13 @@ struct thread_ctx {
|
|||||||
// third cache line here on 64 bits: accessed mostly using atomic ops
|
// third cache line here on 64 bits: accessed mostly using atomic ops
|
||||||
ALWAYS_ALIGN(64);
|
ALWAYS_ALIGN(64);
|
||||||
struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
|
struct mt_list shared_tasklet_list; /* Tasklet to be run, woken up by other threads */
|
||||||
|
unsigned int rqueue_ticks; /* Insertion counter for the run queue */
|
||||||
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
|
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
|
||||||
int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
|
int tasks_in_list; /* Number of tasks in the per-thread tasklets list */
|
||||||
|
uint idle_pct; /* idle to total ratio over last sample (percent) */
|
||||||
|
|
||||||
uint64_t prev_cpu_time; /* previous per thread CPU time */
|
uint64_t prev_cpu_time; /* previous per thread CPU time */
|
||||||
uint64_t prev_mono_time; /* previous system wide monotonic time */
|
uint64_t prev_mono_time; /* previous system wide monotonic time */
|
||||||
uint idle_pct; /* idle to total ratio over last sample (percent) */
|
|
||||||
ALWAYS_ALIGN(128);
|
ALWAYS_ALIGN(128);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -249,7 +249,7 @@ void __task_wakeup(struct task *t)
|
|||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
_HA_ATOMIC_INC(&th_ctx->rq_total);
|
_HA_ATOMIC_INC(&th_ctx->rq_total);
|
||||||
t->rq.key = ++th_ctx->rqueue_ticks;
|
t->rq.key = _HA_ATOMIC_ADD_FETCH(&th_ctx->rqueue_ticks, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(t->nice)) {
|
if (likely(t->nice)) {
|
||||||
@ -854,7 +854,7 @@ void process_runnable_tasks()
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (!lrq) {
|
if (!lrq) {
|
||||||
lrq = eb32sc_lookup_ge(&tt->rqueue, tt->rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
|
lrq = eb32sc_lookup_ge(&tt->rqueue, _HA_ATOMIC_LOAD(&tt->rqueue_ticks) - TIMER_LOOK_BACK, tid_bit);
|
||||||
if (unlikely(!lrq))
|
if (unlikely(!lrq))
|
||||||
lrq = eb32sc_first(&tt->rqueue, tid_bit);
|
lrq = eb32sc_first(&tt->rqueue, tid_bit);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user