MAJOR: task: make use of the scope-aware ebtree functions

Currently the task scheduler suffers from an O(n) lookup when
skipping tasks that are not for the current thread. The reason
is that eb32_lookup_ge() has no information about the current
thread so it always revisits many tasks for other threads before
finding its own tasks.

This is particularly visible with HTTP/2 since the number of
concurrent streams created at once causes long series of tasks
for the same stream in the scheduler. With only 10 connections
and 100 streams each, by running on two threads, the performance
drops from 640kreq/s to 11.2kreq/s! Lookup metrics show that for
only 200000 task lookups, 430 million skips had to be performed,
which means that on average, each lookup leads to 2150 nodes to
be visited.

This commit backports the principle of scope lookups for ebtrees
from the ebtree_v7 development tree. The idea is that each node
contains a mask indicating the union of the scopes for the nodes
below it, which is fed during insertion, and used during lookups.

Then during lookups, branches that do not contain any leaf matching
the requested scope are simply ignored. This perfectly matches a
thread mask, allowing a thread to only extract the tasks it cares
about from the run queue, and to always find them in O(log(n))
instead of O(n). Thus the scheduler uses tid_bit and
task->thread_mask as the ebtree scope here.

Doing this has recovered most of the performance, as can be seen on
the test below with two threads, 10 connections, 100 streams each,
and 1 million requests total :

                              Before     After    Gain
              test duration : 89.6s      4.73s     x19
    HTTP requests/s (DEBUG) : 11200     211300     x19
     HTTP requests/s (PROD) : 15900     447000     x28
             spin_lock time : 85.2s      0.46s    /185
            time per lookup : 13us       40ns     /325

Even when going to 6 threads (on 3 hyperthreaded CPU cores), the
performance stays around 284000 req/s, showing that the contention
is much lower.

A test showed that there's no benefit in using this for the wait queue
though.
This commit is contained in:
Willy Tarreau 2017-11-05 13:34:20 +01:00
parent 8878b6c4cb
commit 8d38805d3d
3 changed files with 12 additions and 9 deletions

View File

@ -32,6 +32,7 @@
#include <common/ticks.h> #include <common/ticks.h>
#include <common/hathreads.h> #include <common/hathreads.h>
#include <eb32sctree.h>
#include <eb32tree.h> #include <eb32tree.h>
#include <types/global.h> #include <types/global.h>
@ -163,7 +164,7 @@ static inline struct task *task_unlink_wq(struct task *t)
*/ */
static inline struct task *__task_unlink_rq(struct task *t) static inline struct task *__task_unlink_rq(struct task *t)
{ {
eb32_delete(&t->rq); eb32sc_delete(&t->rq);
tasks_run_queue--; tasks_run_queue--;
if (likely(t->nice)) if (likely(t->nice))
niced_tasks--; niced_tasks--;

View File

@ -26,6 +26,7 @@
#include <common/config.h> #include <common/config.h>
#include <common/mini-clist.h> #include <common/mini-clist.h>
#include <eb32sctree.h>
#include <eb32tree.h> #include <eb32tree.h>
/* values for task->state */ /* values for task->state */
@ -63,7 +64,7 @@ struct notification {
/* The base for all tasks */ /* The base for all tasks */
struct task { struct task {
struct eb32_node rq; /* ebtree node used to hold the task in the run queue */ struct eb32sc_node rq; /* ebtree node used to hold the task in the run queue */
unsigned short state; /* task state : bit field of TASK_* */ unsigned short state; /* task state : bit field of TASK_* */
unsigned short pending_state; /* pending states for running talk */ unsigned short pending_state; /* pending states for running talk */
short nice; /* the task's current nice value from -1024 to +1024 */ short nice; /* the task's current nice value from -1024 to +1024 */

View File

@ -17,6 +17,7 @@
#include <common/mini-clist.h> #include <common/mini-clist.h>
#include <common/standard.h> #include <common/standard.h>
#include <common/time.h> #include <common/time.h>
#include <eb32sctree.h>
#include <eb32tree.h> #include <eb32tree.h>
#include <proto/proxy.h> #include <proto/proxy.h>
@ -74,7 +75,7 @@ struct task *__task_wakeup(struct task *t)
* if task is running * if task is running
*/ */
t->state = t->pending_state; t->state = t->pending_state;
eb32_insert(&rqueue, &t->rq); eb32sc_insert(&rqueue, &t->rq, t->thread_mask);
return t; return t;
} }
@ -188,7 +189,7 @@ void process_runnable_tasks()
struct task *t; struct task *t;
int i; int i;
int max_processed; int max_processed;
struct eb32_node *rq_next; struct eb32sc_node *rq_next;
int rewind; int rewind;
struct task *local_tasks[16]; struct task *local_tasks[16];
int local_tasks_count; int local_tasks_count;
@ -212,13 +213,13 @@ void process_runnable_tasks()
*/ */
rewind = 0; rewind = 0;
rq_next = eb32_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK); rq_next = eb32sc_lookup_ge(&rqueue, rqueue_ticks - TIMER_LOOK_BACK, tid_bit);
if (!rq_next) { if (!rq_next) {
/* we might have reached the end of the tree, typically because /* we might have reached the end of the tree, typically because
* <rqueue_ticks> is in the first half and we're first scanning * <rqueue_ticks> is in the first half and we're first scanning
* the last half. Let's loop back to the beginning of the tree now. * the last half. Let's loop back to the beginning of the tree now.
*/ */
rq_next = eb32_first(&rqueue); rq_next = eb32sc_first(&rqueue, tid_bit);
if (!rq_next) { if (!rq_next) {
break; break;
} }
@ -227,8 +228,8 @@ void process_runnable_tasks()
local_tasks_count = 0; local_tasks_count = 0;
while (local_tasks_count < 16) { while (local_tasks_count < 16) {
t = eb32_entry(rq_next, struct task, rq); t = eb32sc_entry(rq_next, struct task, rq);
rq_next = eb32_next(rq_next); rq_next = eb32sc_next(rq_next, tid_bit);
if (t->thread_mask & tid_bit) { if (t->thread_mask & tid_bit) {
/* detach the task from the queue */ /* detach the task from the queue */
__task_unlink_rq(t); __task_unlink_rq(t);
@ -238,7 +239,7 @@ void process_runnable_tasks()
local_tasks[local_tasks_count++] = t; local_tasks[local_tasks_count++] = t;
} }
if (!rq_next) { if (!rq_next) {
if (rewind || !(rq_next = eb32_first(&rqueue))) { if (rewind || !(rq_next = eb32sc_first(&rqueue, tid_bit))) {
break; break;
} }
rewind = 1; rewind = 1;