From e62885237c7515ae6284f54ea07cdb68c133231a Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Wed, 12 Oct 2022 09:13:14 +0000 Subject: [PATCH] MEDIUM: stick-table: make stktable_set_entry() look up under a read lock On a 24-core machine having some "stick-store response" rules, a lot of time is spent in the write lock in stktable_set_entry(). Let's apply the same mechanism as for the stktable_get_entry() consisting in looking up the value under the read lock and upgrading it to a write lock only to perform modifications. Here we even have the luxury of upgrading the lock since there are no alloc/free in the path. All this increases the performance by 40% (from 363k to 510k rps). --- src/stick_table.c | 47 ++++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/src/stick_table.c b/src/stick_table.c index 1050a0fa8..f4f1de1eb 100644 --- a/src/stick_table.c +++ b/src/stick_table.c @@ -552,35 +552,40 @@ struct stksess *stktable_get_entry(struct stktable *table, struct stktable_key * } /* Lookup for an entry with the same key and store the submitted - * stksess if not found. - */ -struct stksess *__stktable_set_entry(struct stktable *table, struct stksess *nts) -{ - struct stksess *ts; - - ts = __stktable_lookup(table, nts); - if (ts == NULL) { - ts = nts; - __stktable_store(table, ts); - } - return ts; -} - -/* Lookup for an entry with the same key and store the submitted - * stksess if not found. - * This function locks the table, and the refcount of the entry is increased. + * stksess if not found. This function locks the table either shared or + * exclusively, and the refcount of the entry is increased. */ struct stksess *stktable_set_entry(struct stktable *table, struct stksess *nts) { struct stksess *ts; - HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->lock); - ts = __stktable_set_entry(table, nts); - ts->ref_cnt++; - HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->lock); + HA_RWLOCK_RDLOCK(STK_TABLE_LOCK, &table->lock); + ts = __stktable_lookup(table, nts); + if (ts) { + HA_ATOMIC_INC(&ts->ref_cnt); + HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->lock); + return ts; + } + ts = nts; + /* let's increment it before switching to exclusive */ + HA_ATOMIC_INC(&ts->ref_cnt); + + if (HA_RWLOCK_TRYRDTOSK(STK_TABLE_LOCK, &table->lock) != 0) { + /* upgrade to seek lock failed, let's drop and take */ + HA_RWLOCK_RDUNLOCK(STK_TABLE_LOCK, &table->lock); + HA_RWLOCK_WRLOCK(STK_TABLE_LOCK, &table->lock); + } + else + HA_RWLOCK_SKTOWR(STK_TABLE_LOCK, &table->lock); + + /* now we're write-locked */ + + __stktable_store(table, ts); + HA_RWLOCK_WRUNLOCK(STK_TABLE_LOCK, &table->lock); return ts; } + /* * Trash expired sticky sessions from table . The next expiration date is * returned.