From ae99aeb135ac18b1442ddf9eeccb16cb35d2d04d Mon Sep 17 00:00:00 2001 From: Willy Tarreau Date: Sat, 17 Oct 2020 18:55:18 +0200 Subject: [PATCH] MINOR: lb/map: use seek lock and read locks where appropriate - map_get_server_hash() doesn't need a write lock since it only reads the array, let's only use a read lock here. - map_get_server_rr() only needs exclusivity to adjust the rr_idx while looking for its entry. Since this one is not used by map_get_server_hash(), let's turn this lock to a seek lock that doesn't block reads. With 8 threads, no significant performance difference was noticed given that lookups are usually instant with this LB algo so the lock contention is rare. --- src/lb_map.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lb_map.c b/src/lb_map.c index 143291331..b735678a8 100644 --- a/src/lb_map.c +++ b/src/lb_map.c @@ -216,7 +216,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid) int newidx, avoididx; struct server *srv, *avoided; - HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_SKLOCK(LBPRM_LOCK, &px->lbprm.lock); if (px->lbprm.tot_weight == 0) { avoided = NULL; goto out; @@ -248,7 +248,7 @@ struct server *map_get_server_rr(struct proxy *px, struct server *srvtoavoid) px->lbprm.map.rr_idx = avoididx; out: - HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_SKUNLOCK(LBPRM_LOCK, &px->lbprm.lock); /* return NULL or srvtoavoid if found */ return avoided; } @@ -265,10 +265,10 @@ struct server *map_get_server_hash(struct proxy *px, unsigned int hash) { struct server *srv = NULL; - HA_RWLOCK_WRLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_RDLOCK(LBPRM_LOCK, &px->lbprm.lock); if (px->lbprm.tot_weight) srv = px->lbprm.map.srv[hash % px->lbprm.tot_weight]; - HA_RWLOCK_WRUNLOCK(LBPRM_LOCK, &px->lbprm.lock); + HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &px->lbprm.lock); return srv; }