mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-11-28 22:31:06 +01:00
CLEANUP: task: make sure tasklet handlers always indicate their statuses
When tasklets were derived from tasks, there was no immediate need for the scheduler to know their status after execution, and in a spirit of simplicity they just started to always return NULL. The problem is that it simply prevents the scheduler from 1) accounting their execution time, and 2) keeping track of their current execution status. Indeed, a remote wake-up could very well end up manipulating a tasklet that's currently being executed. And this is the reason why those handlers have to take the idle lock before checking their context. In 2.5 we'll take care of making tasklets and tasks work more similarly, but trouble is to be expected if we continue to propagate the trend of returning NULL everywhere, especially if some fixes relying on a stricter model later need to be backported. For this reason this patch updates all known tasklet handlers to make them return NULL only when the tasklet was freed. It has no effect for now and isn't even guaranteed to always be 100% safe but it puts the code into the right direction for this.
This commit is contained in:
parent
4975d1482f
commit
7416314145
@ -114,6 +114,11 @@ struct task_per_thread {
|
||||
|
||||
/* This part is common between struct task and struct tasklet so that tasks
|
||||
* can be used as-is as tasklets.
|
||||
*
|
||||
* Note that the process() function must ALWAYS return the task/tasklet's
|
||||
* pointer if the task/tasklet remains valid, and return NULL if it has been
|
||||
* deleted. The scheduler relies on this to know if it should update its state
|
||||
* on return.
|
||||
*/
|
||||
#define TASK_COMMON \
|
||||
struct { \
|
||||
|
||||
@ -3024,6 +3024,9 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
* the connection (testing !ret is enough, if fcgi_process() wasn't
|
||||
* called then ret will be 0 anyway.
|
||||
*/
|
||||
if (ret < 0)
|
||||
t = NULL;
|
||||
|
||||
if (!ret && conn_in_list) {
|
||||
struct server *srv = objt_server(conn->target);
|
||||
|
||||
@ -3034,7 +3037,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* callback called on any event by the connection handler.
|
||||
|
||||
@ -2845,11 +2845,15 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
ret |= h1_recv(h1c);
|
||||
if (ret || b_data(&h1c->ibuf))
|
||||
ret = h1_process(h1c);
|
||||
|
||||
/* If we were in an idle list, we want to add it back into it,
|
||||
* unless h1_process() returned -1, which mean it has destroyed
|
||||
* the connection (testing !ret is enough, if h1_process() wasn't
|
||||
* called then ret will be 0 anyway.
|
||||
*/
|
||||
if (ret < 0)
|
||||
t = NULL;
|
||||
|
||||
if (!ret && conn_in_list) {
|
||||
struct server *srv = objt_server(conn->target);
|
||||
|
||||
@ -2860,7 +2864,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
static void h1_reset(struct connection *conn)
|
||||
|
||||
12
src/mux_h2.c
12
src/mux_h2.c
@ -3793,6 +3793,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
*/
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
tasklet_free(tl);
|
||||
t = NULL;
|
||||
goto leave;
|
||||
}
|
||||
conn = h2c->conn;
|
||||
@ -3826,6 +3827,9 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
* the connection (testing !ret is enough, if h2_process() wasn't
|
||||
* called then ret will be 0 anyway.
|
||||
*/
|
||||
if (ret < 0)
|
||||
t = NULL;
|
||||
|
||||
if (!ret && conn_in_list) {
|
||||
struct server *srv = objt_server(conn->target);
|
||||
|
||||
@ -3839,7 +3843,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
|
||||
leave:
|
||||
TRACE_LEAVE(H2_EV_H2C_WAKE);
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* callback called on any event by the connection handler.
|
||||
@ -4473,13 +4477,15 @@ struct task *h2_deferred_shut(struct task *t, void *ctx, unsigned int state)
|
||||
|
||||
if (!h2s->cs) {
|
||||
h2s_destroy(h2s);
|
||||
if (h2c_is_dead(h2c))
|
||||
if (h2c_is_dead(h2c)) {
|
||||
h2_release(h2c);
|
||||
t = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
end:
|
||||
TRACE_LEAVE(H2_EV_STRM_SHUT);
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* shutr() called by the conn_stream (mux_ops.shutr) */
|
||||
|
||||
@ -75,16 +75,18 @@ struct task *mux_pt_io_cb(struct task *t, void *tctx, unsigned int status)
|
||||
ctx->conn->subs = NULL;
|
||||
} else if (ctx->cs->data_cb->wake)
|
||||
ctx->cs->data_cb->wake(ctx->cs);
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
conn_ctrl_drain(ctx->conn);
|
||||
if (ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))
|
||||
if (ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH)) {
|
||||
mux_pt_destroy(ctx);
|
||||
t = NULL;
|
||||
}
|
||||
else
|
||||
ctx->conn->xprt->subscribe(ctx->conn, ctx->conn->xprt_ctx, SUB_RETRY_RECV,
|
||||
&ctx->wait_event);
|
||||
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* Initialize the mux once it's attached. It is expected that conn->ctx
|
||||
|
||||
@ -5915,7 +5915,7 @@ leave:
|
||||
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
}
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* Receive up to <count> bytes from connection <conn>'s socket and store them
|
||||
|
||||
@ -774,7 +774,7 @@ struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
int ret = 0;
|
||||
|
||||
if (!cs)
|
||||
return NULL;
|
||||
return t;
|
||||
|
||||
if (!(si->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(si_oc(si)))
|
||||
ret = si_cs_send(cs);
|
||||
@ -784,7 +784,7 @@ struct task *si_cs_io_cb(struct task *t, void *ctx, unsigned int state)
|
||||
si_cs_process(cs);
|
||||
|
||||
stream_release_buffers(si_strm(si));
|
||||
return (NULL);
|
||||
return t;
|
||||
}
|
||||
|
||||
/* This function is designed to be called from within the stream handler to
|
||||
|
||||
@ -122,8 +122,9 @@ out:
|
||||
}
|
||||
tasklet_free(ctx->wait_event.tasklet);
|
||||
pool_free(xprt_handshake_ctx_pool, ctx);
|
||||
t = NULL;
|
||||
}
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
static int xprt_handshake_init(struct connection *conn, void **xprt_ctx)
|
||||
|
||||
@ -3864,7 +3864,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
|
||||
qc_send_ppkts(ctx);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* Receive up to <count> bytes from connection <conn>'s socket and store them
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user