MEDIUM: fd/threads: only grab the fd's lock if the FD has more than one thread

The vast majority of FDs are only seen by one thread. Currently the lock
on FDs costs a lot because it's touched often, though there should be very
little contention. This patch ensures that the lock is only grabbed if the
FD is shared by more than one thread, since otherwise the situation is safe.
Doing so resulted in a 15% performance boost on a 12-threads test.
This commit is contained in:
Willy Tarreau 2018-10-15 09:44:46 +02:00
parent 9504dd64c6
commit 87d54a9a6d
2 changed files with 59 additions and 29 deletions

View File

@ -289,8 +289,10 @@ static inline void fd_stop_recv(int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -310,8 +312,10 @@ static inline void fd_stop_send(int fd)
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -331,8 +335,10 @@ static inline void fd_stop_both(int fd)
if ((old ^ new) & FD_EV_POLLED_RW)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -353,8 +359,10 @@ static inline void fd_cant_recv(const int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -364,8 +372,10 @@ static inline void fd_may_recv(const int fd)
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_R);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -390,8 +400,10 @@ static inline void fd_done_recv(const int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -412,8 +424,10 @@ static inline void fd_cant_send(const int fd)
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -423,8 +437,10 @@ static inline void fd_may_send(const int fd)
/* marking ready never changes polled status */
HA_ATOMIC_OR(&fdtab[fd].state, FD_EV_READY_W);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -445,8 +461,10 @@ static inline void fd_want_recv(int fd)
if ((old ^ new) & FD_EV_POLLED_R)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -467,8 +485,10 @@ static inline void fd_want_send(int fd)
if ((old ^ new) & FD_EV_POLLED_W)
updt_fd_polling(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fd_update_cache(fd); /* need an update entry to change the state */
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -476,9 +496,11 @@ static inline void fd_want_send(int fd)
* by the poller to set FD_POLL_* flags. */
static inline void fd_update_events(int fd, int evts)
{
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].ev &= FD_POLL_STICKY;
fdtab[fd].ev |= evts;
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].ev & (FD_POLL_IN | FD_POLL_HUP | FD_POLL_ERR))
@ -491,6 +513,7 @@ static inline void fd_update_events(int fd, int evts)
/* Prepares <fd> for being polled */
static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned long thread_mask)
{
if (atleast2(thread_mask))
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].owner = owner;
fdtab[fd].iocb = iocb;
@ -501,6 +524,7 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned
/* note: do not reset polled_mask here as it indicates which poller
* still knows this FD from a possible previous round.
*/
if (atleast2(thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}

View File

@ -359,6 +359,9 @@ void fd_rm_from_fd_list(volatile struct fdlist *list, int fd, int off)
*/
static void fd_dodelete(int fd, int do_close)
{
unsigned long locked = atleast2(fdtab[fd].thread_mask);
if (locked)
HA_SPIN_LOCK(FD_LOCK, &fdtab[fd].lock);
if (fdtab[fd].linger_risk) {
/* this is generally set when connecting to servers */
@ -379,6 +382,7 @@ static void fd_dodelete(int fd, int do_close)
polled_mask[fd] = 0;
close(fd);
}
if (locked)
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
@ -417,7 +421,7 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
continue;
HA_ATOMIC_OR(&fd_cache_mask, tid_bit);
if (HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
if (atleast2(fdtab[fd].thread_mask) && HA_SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock)) {
activity[tid].fd_lock++;
continue;
}
@ -432,11 +436,13 @@ static inline void fdlist_process_cached_events(volatile struct fdlist *fdlist)
fdtab[fd].ev |= FD_POLL_OUT;
if (fdtab[fd].iocb && fdtab[fd].owner && fdtab[fd].ev) {
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
fdtab[fd].iocb(fd);
}
else {
fd_release_cache_entry(fd);
if (atleast2(fdtab[fd].thread_mask))
HA_SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
}
}