MINOR: conn/muxes/ssl: add ASSUME_NONNULL() prior to _srv_add_idle

When manipulating idle backend connections for input/output processing,
special care is taken to ensure the connection cannot be accessed by
another thread, for example via a takeover. When processing is over,
connection is reinserted in its original list.

A connection can either be attached to a session (private ones) or a
server idle tree. In the latter case, <srv> is guaranteed to be non null
prior to _srv_add_idle() thanks to CO_FL_LIST_MASK comparison with conn
flags. This patch adds an ASSUME_NONNULL() to better reflect this.

This should fix coverity reports from github issue #3095.
This commit is contained in:
Amaury Denoyelle 2025-09-01 13:32:24 +02:00
parent dcf2261612
commit 1868ca9a95
6 changed files with 6 additions and 0 deletions

View File

@ -234,6 +234,7 @@ int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake)
}
}
else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);

View File

@ -3106,6 +3106,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
}
}
else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);

View File

@ -4351,6 +4351,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
}
}
else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);

View File

@ -5010,6 +5010,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
}
}
else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);

View File

@ -2602,6 +2602,7 @@ static struct task *spop_io_cb(struct task *t, void *ctx, unsigned int state)
}
}
else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);

View File

@ -6505,6 +6505,7 @@ leave:
}
}
else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
TRACE_DEVEL("adding conn back to idle list", SSL_EV_CONN_IO_CB, conn);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);