From 4be073b99bb69a871c74dc5ebb55edb664f4b2ed Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Tue, 11 Oct 2022 18:10:27 +0000 Subject: [PATCH] MINOR: stick-table: do not take an exclusive lock when downing ref_cnt At plenty of places we decrement ts->ref_cnt under the write lock because it's held. We don't technically need it to be done that way if there's contention and an atomic could suffice. However until all places are turned to atomic, we at least need to do that under a read lock for now, so that we don't mix atomic and non-atomic uses. Regardless it already brings ~1.5% req rate improvement with 3 trackers on the same table under 48 threads at 184k->187k rps. --- src/stick_table.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/stick_table.c b/src/stick_table.c index 4e26a3ffd..1a24ed1cd 100644 --- a/src/stick_table.c +++ b/src/stick_table.c @@ -460,14 +460,18 @@ void stktable_touch_local(struct stktable *t, struct stksess *ts, int decrefcnt) ts->ref_cnt--; HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock); } -/* Just decrease the ref_cnt of the current session. Does nothing if is NULL */ +/* Just decrease the ref_cnt of the current session. Does nothing if is NULL. + * Note that we still need to take the read lock because a number of other places + * (including in Lua and peers) update the ref_cnt non-atomically under the write + * lock. + */ static void stktable_release(struct stktable *t, struct stksess *ts) { if (!ts) return; - HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &t->lock); - ts->ref_cnt--; - HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &t->lock); + HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &t->lock); + HA_ATOMIC_DEC(&ts->ref_cnt); + HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &t->lock); } /* Insert new sticky session in the table. It is assumed that it does not