MINOR: server: move the lock inside srv_add_idle()

Almost all callers of _srv_add_idle() lock the list then call the
function. It's not the most efficient and it requires some care from
the caller to take care of that lock. Let's change this a little bit by
having srv_add_idle() that takes the lock and calls _srv_add_idle() that
is now inlined. This way callers don't have to handle the lock themselves
anymore, and the lock is only taken around the sensitive parts, not the
function call+return.

Interestingly, perf tests show a small perf increase from 2.28-2.32M RPS
to 2.32-2.37M RPS on a 128-thread system.
This commit is contained in:
Willy Tarreau 2025-11-06 13:12:04 +01:00
parent a8498cde74
commit 5fe4677231
9 changed files with 25 additions and 23 deletions

View File

@ -99,7 +99,7 @@ void srv_release_conn(struct server *srv, struct connection *conn);
struct connection *srv_lookup_conn(struct ceb_root **tree, uint64_t hash); struct connection *srv_lookup_conn(struct ceb_root **tree, uint64_t hash);
struct connection *srv_lookup_conn_next(struct ceb_root **tree, struct connection *conn); struct connection *srv_lookup_conn_next(struct ceb_root **tree, struct connection *conn);
void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe); void srv_add_idle(struct server *srv, struct connection *conn, int is_safe);
int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe); int srv_add_to_idle_list(struct server *srv, struct connection *conn, int is_safe);
void srv_add_to_avail_list(struct server *srv, struct connection *conn); void srv_add_to_avail_list(struct server *srv, struct connection *conn);
struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state); struct task *srv_cleanup_toremove_conns(struct task *task, void *context, unsigned int state);

View File

@ -257,9 +257,7 @@ int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake)
} }
else { else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
} }
} }

View File

@ -3107,9 +3107,7 @@ struct task *fcgi_io_cb(struct task *t, void *ctx, unsigned int state)
} }
else { else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
} }
else { else {

View File

@ -4374,9 +4374,7 @@ struct task *h1_io_cb(struct task *t, void *ctx, unsigned int state)
} }
else { else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
} }
else { else {

View File

@ -5027,9 +5027,7 @@ struct task *h2_io_cb(struct task *t, void *ctx, unsigned int state)
} }
else { else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
} }
else { else {

View File

@ -3448,9 +3448,7 @@ struct task *qcc_io_cb(struct task *t, void *ctx, unsigned int state)
} }
} }
else { else {
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
/* Do not access conn without protection as soon as it is reinserted in idle list. */ /* Do not access conn without protection as soon as it is reinserted in idle list. */

View File

@ -2603,9 +2603,7 @@ static struct task *spop_io_cb(struct task *t, void *ctx, unsigned int state)
} }
else { else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
} }
else { else {

View File

@ -7318,7 +7318,7 @@ struct connection *srv_lookup_conn_next(struct ceb_root **tree, struct connectio
* *
* Must be called with idle_conns_lock. * Must be called with idle_conns_lock.
*/ */
void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe) static inline void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe)
{ {
struct ceb_root **tree = is_safe ? &srv->per_thr[tid].safe_conns : struct ceb_root **tree = is_safe ? &srv->per_thr[tid].safe_conns :
&srv->per_thr[tid].idle_conns; &srv->per_thr[tid].idle_conns;
@ -7330,6 +7330,22 @@ void _srv_add_idle(struct server *srv, struct connection *conn, int is_safe)
LIST_APPEND(&srv->per_thr[tid].idle_conn_list, &conn->idle_list); LIST_APPEND(&srv->per_thr[tid].idle_conn_list, &conn->idle_list);
} }
/* Add <conn> in <srv> idle trees. Set <is_safe> if connection is deemed safe
* for reuse.
*
* This function is a simple wrapper for tree insert. It should only be used
* for internal usage or when removing briefly the connection to avoid takeover
* on it before reinserting it with this function. In other context, prefer to
* use the full feature srv_add_to_idle_list(). This function takes the idle
* conns lock for the current thread (thus the owner must not already have it).
*/
void srv_add_idle(struct server *srv, struct connection *conn, int is_safe)
{
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
_srv_add_idle(srv, conn, is_safe);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
}
/* This adds an idle connection to the server's list if the connection is /* This adds an idle connection to the server's list if the connection is
* reusable, not held by any owner anymore, but still has available streams. * reusable, not held by any owner anymore, but still has available streams.
*/ */

View File

@ -6878,9 +6878,7 @@ leave:
else { else {
ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */ ASSUME_NONNULL(srv); /* srv is guaranteed by CO_FL_LIST_MASK */
TRACE_DEVEL("adding conn back to idle list", SSL_EV_CONN_IO_CB, conn); TRACE_DEVEL("adding conn back to idle list", SSL_EV_CONN_IO_CB, conn);
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock); srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
_srv_add_idle(srv, conn, conn_in_list == CO_FL_SAFE_LIST);
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
} }
} }
else { else {