MEDIUM: muxes: mark idle conns tasklets with TASK_F_USR1

The muxes are touching the idle_conns_lock all the time now because
they need to be careful that no other thread has stolen their tasklet's
context.

This patch changes this a little bit by setting the TASK_F_USR1 flag on
the tasklet before marking a connection idle, and removing it once it's
not idle anymore. Thanks to this we have the guarantee that a tasklet
without this flag cannot be present in an idle list and does not need
to go through this costly lock. This is especially true for front
connections.
This commit is contained in:
Willy Tarreau 2021-03-02 16:51:09 +01:00
parent 6fa8bcdc78
commit e388f2fbca
3 changed files with 112 additions and 68 deletions

View File

@ -2975,15 +2975,18 @@ schedule:
} }
/* this is the tasklet referenced in fconn->wait_event.tasklet */ /* this is the tasklet referenced in fconn->wait_event.tasklet */
struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int status) struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
{ {
struct connection *conn; struct connection *conn;
struct fcgi_conn *fconn; struct fcgi_conn *fconn = ctx;
struct tasklet *tl = (struct tasklet *)t; struct tasklet *tl = (struct tasklet *)t;
int conn_in_list; int conn_in_list;
int ret = 0; int ret = 0;
if (state & TASK_F_USR1) {
/* the tasklet was idling on an idle connection, it might have
* been stolen, let's be careful!
*/
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) { if (tl->context == NULL) {
/* The connection has been taken over by another thread, /* The connection has been taken over by another thread,
@ -2993,11 +2996,8 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int status)
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
tasklet_free(tl); tasklet_free(tl);
return NULL; return NULL;
} }
fconn = ctx;
conn = fconn->conn; conn = fconn->conn;
TRACE_POINT(FCGI_EV_FCONN_WAKE, conn); TRACE_POINT(FCGI_EV_FCONN_WAKE, conn);
conn_in_list = conn->flags & CO_FL_LIST_MASK; conn_in_list = conn->flags & CO_FL_LIST_MASK;
@ -3005,6 +3005,12 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int status)
conn_delete_from_tree(&conn->hash_node->node); conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} else {
/* we're certain the connection was not in an idle list */
conn = fconn->conn;
TRACE_ENTER(FCGI_EV_FCONN_WAKE, conn);
conn_in_list = 0;
}
if (!(fconn->wait_event.events & SUB_RETRY_SEND)) if (!(fconn->wait_event.events & SUB_RETRY_SEND))
ret = fcgi_send(fconn); ret = fcgi_send(fconn);
@ -3480,6 +3486,10 @@ static struct conn_stream *fcgi_attach(struct connection *conn, struct session *
cs_free(cs); cs_free(cs);
goto err; goto err;
} }
/* the connection is not idle anymore, let's mark this */
HA_ATOMIC_AND(&fconn->wait_event.tasklet->state, ~TASK_F_USR1);
TRACE_LEAVE(FCGI_EV_FSTRM_NEW, conn, fstrm); TRACE_LEAVE(FCGI_EV_FSTRM_NEW, conn, fstrm);
return cs; return cs;
@ -3606,6 +3616,10 @@ static void fcgi_detach(struct conn_stream *cs)
fconn->conn->owner = NULL; fconn->conn->owner = NULL;
} }
/* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock.
*/
HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1);
if (!srv_add_to_idle_list(objt_server(fconn->conn->target), fconn->conn, 1)) { if (!srv_add_to_idle_list(objt_server(fconn->conn->target), fconn->conn, 1)) {
/* The server doesn't want it, let's kill the connection right away */ /* The server doesn't want it, let's kill the connection right away */
fconn->conn->mux->destroy(fconn); fconn->conn->mux->destroy(fconn);

View File

@ -2799,15 +2799,18 @@ static int h1_process(struct h1c * h1c)
return -1; return -1;
} }
struct task *h1_io_cb(struct task *t, void *ctx, unsigned int status) struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
{ {
struct connection *conn; struct connection *conn;
struct tasklet *tl = (struct tasklet *)t; struct tasklet *tl = (struct tasklet *)t;
int conn_in_list; int conn_in_list;
struct h1c *h1c; struct h1c *h1c = ctx;
int ret = 0; int ret = 0;
if (state & TASK_F_USR1) {
/* the tasklet was idling on an idle connection, it might have
* been stolen, let's be careful!
*/
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (tl->context == NULL) { if (tl->context == NULL) {
/* The connection has been taken over by another thread, /* The connection has been taken over by another thread,
@ -2818,9 +2821,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int status)
tasklet_free(tl); tasklet_free(tl);
return NULL; return NULL;
} }
h1c = ctx;
conn = h1c->conn; conn = h1c->conn;
TRACE_POINT(H1_EV_H1C_WAKE, conn); TRACE_POINT(H1_EV_H1C_WAKE, conn);
/* Remove the connection from the list, to be sure nobody attempts /* Remove the connection from the list, to be sure nobody attempts
@ -2831,6 +2832,12 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int status)
conn_delete_from_tree(&conn->hash_node->node); conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} else {
/* we're certain the connection was not in an idle list */
conn = h1c->conn;
TRACE_ENTER(H1_EV_H1C_WAKE, conn);
conn_in_list = 0;
}
if (!(h1c->wait_event.events & SUB_RETRY_SEND)) if (!(h1c->wait_event.events & SUB_RETRY_SEND))
ret = h1_send(h1c); ret = h1_send(h1c);
@ -2998,6 +3005,9 @@ static struct conn_stream *h1_attach(struct connection *conn, struct session *se
goto err; goto err;
} }
/* the connection is not idle anymore, let's mark this */
HA_ATOMIC_AND(&h1c->wait_event.tasklet->state, ~TASK_F_USR1);
TRACE_LEAVE(H1_EV_STRM_NEW, conn, h1s); TRACE_LEAVE(H1_EV_STRM_NEW, conn, h1s);
return cs; return cs;
err: err:
@ -3090,6 +3100,11 @@ static void h1_detach(struct conn_stream *cs)
else { else {
if (h1c->conn->owner == sess) if (h1c->conn->owner == sess)
h1c->conn->owner = NULL; h1c->conn->owner = NULL;
/* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock.
*/
HA_ATOMIC_OR(&h1c->wait_event.tasklet->state, TASK_F_USR1);
h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event); h1c->conn->xprt->subscribe(h1c->conn, h1c->conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
if (!srv_add_to_idle_list(objt_server(h1c->conn->target), h1c->conn, is_not_first)) { if (!srv_add_to_idle_list(objt_server(h1c->conn->target), h1c->conn, is_not_first)) {
/* The server doesn't want it, let's kill the connection right away */ /* The server doesn't want it, let's kill the connection right away */

View File

@ -3773,15 +3773,18 @@ schedule:
} }
/* this is the tasklet referenced in h2c->wait_event.tasklet */ /* this is the tasklet referenced in h2c->wait_event.tasklet */
struct task *h2_io_cb(struct task *t, void *ctx, unsigned int status) struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
{ {
struct connection *conn; struct connection *conn;
struct tasklet *tl = (struct tasklet *)t; struct tasklet *tl = (struct tasklet *)t;
int conn_in_list; int conn_in_list;
struct h2c *h2c; struct h2c *h2c = ctx;
int ret = 0; int ret = 0;
if (state & TASK_F_USR1) {
/* the tasklet was idling on an idle connection, it might have
* been stolen, let's be careful!
*/
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
if (t->context == NULL) { if (t->context == NULL) {
/* The connection has been taken over by another thread, /* The connection has been taken over by another thread,
@ -3792,9 +3795,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int status)
tasklet_free(tl); tasklet_free(tl);
goto leave; goto leave;
} }
h2c = ctx;
conn = h2c->conn; conn = h2c->conn;
TRACE_ENTER(H2_EV_H2C_WAKE, conn); TRACE_ENTER(H2_EV_H2C_WAKE, conn);
conn_in_list = conn->flags & CO_FL_LIST_MASK; conn_in_list = conn->flags & CO_FL_LIST_MASK;
@ -3806,6 +3807,12 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int status)
conn_delete_from_tree(&conn->hash_node->node); conn_delete_from_tree(&conn->hash_node->node);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} else {
/* we're certain the connection was not in an idle list */
conn = h2c->conn;
TRACE_ENTER(H2_EV_H2C_WAKE, conn);
conn_in_list = 0;
}
if (!(h2c->wait_event.events & SUB_RETRY_SEND)) if (!(h2c->wait_event.events & SUB_RETRY_SEND))
ret = h2_send(h2c); ret = h2_send(h2c);
@ -4096,6 +4103,10 @@ static struct conn_stream *h2_attach(struct connection *conn, struct session *se
cs_free(cs); cs_free(cs);
return NULL; return NULL;
} }
/* the connection is not idle anymore, let's mark this */
HA_ATOMIC_AND(&h2c->wait_event.tasklet->state, ~TASK_F_USR1);
TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s); TRACE_LEAVE(H2_EV_H2S_NEW, conn, h2s);
return cs; return cs;
} }
@ -4239,6 +4250,10 @@ static void h2_detach(struct conn_stream *cs)
h2c->conn->owner = NULL; h2c->conn->owner = NULL;
} }
/* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock.
*/
HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) { if (!srv_add_to_idle_list(objt_server(h2c->conn->target), h2c->conn, 1)) {
/* The server doesn't want it, let's kill the connection right away */ /* The server doesn't want it, let's kill the connection right away */
h2c->conn->mux->destroy(h2c); h2c->conn->mux->destroy(h2c);