diff --git a/Makefile b/Makefile index aa1a7fee8..c6f047de8 100644 --- a/Makefile +++ b/Makefile @@ -165,8 +165,8 @@ SMALL_OPTS = #### Debug settings # You can enable debugging on specific code parts by setting DEBUG=-DDEBUG_xxx. # Currently defined DEBUG macros include DEBUG_FULL, DEBUG_MEMORY, DEBUG_FSM, -# DEBUG_HASH, DEBUG_AUTH and DEBUG_SPOE. Please check sources for exact meaning -# or do not use at all. +# DEBUG_HASH, DEBUG_AUTH, DEBUG_SPOE and DEBUG_THREAD. Please check sources for +# exact meaning or do not use at all. DEBUG = #### Trace options @@ -849,7 +849,8 @@ OBJS = src/cfgparse.o src/proto_http.o src/stats.o src/server.o src/stream.o \ src/regex.o src/queue.o src/frontend.o src/arg.o src/proto_uxst.o \ src/raw_sock.o src/lb_chash.o src/lb_fwlc.o src/lb_fwrr.o \ src/lb_fas.o src/applet.o src/hdr_idx.o src/ev_select.o src/hash.o \ - src/lb_map.o src/base64.o src/sha1.o src/protocol.o src/h1.o src/action.o + src/lb_map.o src/base64.o src/sha1.o src/protocol.o src/h1.o \ + src/action.o src/hathreads.o EBTREE_OBJS = $(EBTREE_DIR)/ebtree.o \ $(EBTREE_DIR)/eb32tree.o $(EBTREE_DIR)/eb64tree.o \ diff --git a/include/common/hathreads.h b/include/common/hathreads.h new file mode 100644 index 000000000..6323e9a36 --- /dev/null +++ b/include/common/hathreads.h @@ -0,0 +1,507 @@ +/* + * include/common/hathreads.h + * definitions, macros and inline functions about threads. + * + * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation, version 2.1 + * exclusively. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef _COMMON_HATHREADS_H +#define _COMMON_HATHREADS_H + +#include + +#define MAX_THREADS_MASK ((unsigned long)-1) +extern THREAD_LOCAL unsigned int tid; /* The thread id */ +extern THREAD_LOCAL unsigned int tid_bit; /* The bit corresponding to the thread id */ + +#ifndef USE_THREAD + +#define HA_ATOMIC_CAS(val, old, new) ({((*val) == (*old)) ? (*(val) = (new) , 1) : (*(old) = *(val), 0);}) +#define HA_ATOMIC_ADD(val, i) ({*(val) += (i);}) +#define HA_ATOMIC_SUB(val, i) ({*(val) -= (i);}) +#define HA_ATOMIC_AND(val, flags) ({*(val) &= (flags);}) +#define HA_ATOMIC_OR(val, flags) ({*(val) |= (flags);}) +#define HA_ATOMIC_XCHG(val, new) \ + ({ \ + typeof(*(val)) __old = *(val); \ + *(val) = new; \ + __old; \ + }) +#define HA_ATOMIC_STORE(val, new) ({*(val) = new;}) +#define HA_ATOMIC_UPDATE_MAX(val, new) \ + ({ \ + typeof(*(val)) __new = (new); \ + \ + if (*(val) < __new) \ + *(val) = __new; \ + *(val); \ + }) + +#define HA_ATOMIC_UPDATE_MIN(val, new) \ + ({ \ + typeof(*(val)) __new = (new); \ + \ + if (*(val) > __new) \ + *(val) = __new; \ + *(val); \ + }) + +#define SPIN_INIT(l) do { /* do nothing */ } while(0) +#define SPIN_DESTROY(l) do { /* do nothing */ } while(0) +#define SPIN_LOCK(lbl, l) do { /* do nothing */ } while(0) +#define SPIN_TRYLOCK(lbl, l) ({ 0; }) +#define SPIN_UNLOCK(lbl, l) do { /* do nothing */ } while(0) + +#define RWLOCK_INIT(l) do { /* do nothing */ } while(0) +#define RWLOCK_DESTROY(l) do { /* do nothing */ } while(0) +#define RWLOCK_WRLOCK(lbl, l) do { /* do nothing */ } while(0) +#define RWLOCK_TRYWRLOCK(lbl, l) ({ 0; }) +#define RWLOCK_WRUNLOCK(lbl, l) do { /* do nothing */ } while(0) +#define RWLOCK_RDLOCK(lbl, l) do { /* do nothing */ } while(0) +#define RWLOCK_TRYRDLOCK(lbl, l) ({ 0; }) +#define RWLOCK_RDUNLOCK(lbl, l) do { /* do nothing */ } while(0) + +#else /* USE_THREAD */ + +#include +#include +#include +#include +#include + +/* TODO: thread: For now, we rely on GCC builtins but it could be a good idea to + * have a header file regrouping all functions dealing with threads. */ +#define HA_ATOMIC_CAS(val, old, new) __atomic_compare_exchange_n(val, old, new, 0, 0, 0) +#define HA_ATOMIC_ADD(val, i) __atomic_add_fetch(val, i, 0) +#define HA_ATOMIC_SUB(val, i) __atomic_sub_fetch(val, i, 0) +#define HA_ATOMIC_AND(val, flags) __atomic_and_fetch(val, flags, 0) +#define HA_ATOMIC_OR(val, flags) __atomic_or_fetch(val, flags, 0) +#define HA_ATOMIC_XCHG(val, new) __atomic_exchange_n(val, new, 0) +#define HA_ATOMIC_STORE(val, new) __atomic_store_n(val, new, 0) +#define HA_ATOMIC_UPDATE_MAX(val, new) \ + ({ \ + typeof(*(val)) __old = *(val); \ + typeof(*(val)) __new = (new); \ + \ + while (__old < __new && !HA_ATOMIC_CAS(val, &__old, __new)); \ + (*val); \ + }) +#define HA_ATOMIC_UPDATE_MIN(val, new) \ + ({ \ + typeof((*val)) __old = *(val); \ + typeof((*val)) __new = (new); \ + \ + while (__old > __new && !HA_ATOMIC_CAS(val, &__old, __new)); \ + (*val); \ + }) + +#if defined(DEBUG_THREAD) || defined(DEBUG_FULL) + +enum lock_label { + LOCK_LABELS = 0 +}; +struct lock_stat { + uint64_t nsec_wait_for_write; + uint64_t nsec_wait_for_read; + uint64_t num_write_locked; + uint64_t num_write_unlocked; + uint64_t num_read_locked; + uint64_t num_read_unlocked; +}; + +extern struct lock_stat lock_stats[LOCK_LABELS]; + +#define __HA_SPINLOCK_T unsigned long + +#define __SPIN_INIT(l) ({ (*l) = 0; }) +#define __SPIN_DESTROY(l) ({ (*l) = 0; }) +#define __SPIN_LOCK(l) pl_take_w(l) +#define __SPIN_TRYLOCK(l) !pl_try_w(l) +#define __SPIN_UNLOCK(l) pl_drop_w(l) + +#define __HA_RWLOCK_T unsigned long + +#define __RWLOCK_INIT(l) ({ (*l) = 0; }) +#define __RWLOCK_DESTROY(l) ({ (*l) = 0; }) +#define __RWLOCK_WRLOCK(l) pl_take_w(l) +#define __RWLOCK_TRYWRLOCK(l) !pl_try_w(l) +#define __RWLOCK_WRUNLOCK(l) pl_drop_w(l) +#define __RWLOCK_RDLOCK(l) pl_take_r(l) +#define __RWLOCK_TRYRDLOCK(l) !pl_try_r(l) +#define __RWLOCK_RDUNLOCK(l) pl_drop_r(l) + +#define HA_SPINLOCK_T struct ha_spinlock + +#define SPIN_INIT(l) __spin_init(l) +#define SPIN_DESTROY(l) __spin_destroy(l) + +#define SPIN_LOCK(lbl, l) __spin_lock(lbl, l, __func__, __FILE__, __LINE__) +#define SPIN_TRYLOCK(lbl, l) __spin_trylock(lbl, l, __func__, __FILE__, __LINE__) +#define SPIN_UNLOCK(lbl, l) __spin_unlock(lbl, l, __func__, __FILE__, __LINE__) + +#define HA_RWLOCK_T struct ha_rwlock + +#define RWLOCK_INIT(l) __ha_rwlock_init((l)) +#define RWLOCK_DESTROY(l) __ha_rwlock_destroy((l)) +#define RWLOCK_WRLOCK(lbl,l) __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__) +#define RWLOCK_TRYWRLOCK(lbl,l) __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__) +#define RWLOCK_WRUNLOCK(lbl,l) __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__) +#define RWLOCK_RDLOCK(lbl,l) __ha_rwlock_rdlock(lbl, l) +#define RWLOCK_TRYRDLOCK(lbl,l) __ha_rwlock_tryrdlock(lbl, l) +#define RWLOCK_RDUNLOCK(lbl,l) __ha_rwlock_rdunlock(lbl, l) + +struct ha_spinlock { + __HA_SPINLOCK_T lock; + struct { + unsigned long owner; /* a bit is set to 1 << tid for the lock owner */ + unsigned long waiters; /* a bit is set to 1 << tid for waiting threads */ + struct { + const char *function; + const char *file; + int line; + } last_location; /* location of the last owner */ + } info; +}; + +struct ha_rwlock { + __HA_RWLOCK_T lock; + struct { + unsigned long cur_writer; /* a bit is set to 1 << tid for the lock owner */ + unsigned long wait_writers; /* a bit is set to 1 << tid for waiting writers */ + unsigned long cur_readers; /* a bit is set to 1 << tid for current readers */ + unsigned long wait_readers; /* a bit is set to 1 << tid for waiting waiters */ + struct { + const char *function; + const char *file; + int line; + } last_location; /* location of the last write owner */ + } info; +}; + +static inline void show_lock_stats() +{ + const char *labels[LOCK_LABELS] = {}; + int lbl; + + for (lbl = 0; lbl < LOCK_LABELS; lbl++) { + fprintf(stderr, + "Stats about Lock %s: \n" + "\t # write lock : %lu\n" + "\t # write unlock: %lu (%ld)\n" + "\t # wait time for write : %.3f msec\n" + "\t # wait time for write/lock: %.3f nsec\n" + "\t # read lock : %lu\n" + "\t # read unlock : %lu (%ld)\n" + "\t # wait time for read : %.3f msec\n" + "\t # wait time for read/lock : %.3f nsec\n", + labels[lbl], + lock_stats[lbl].num_write_locked, + lock_stats[lbl].num_write_unlocked, + lock_stats[lbl].num_write_unlocked - lock_stats[lbl].num_write_locked, + (double)lock_stats[lbl].nsec_wait_for_write / 1000000.0, + lock_stats[lbl].num_write_locked ? ((double)lock_stats[lbl].nsec_wait_for_write / (double)lock_stats[lbl].num_write_locked) : 0, + lock_stats[lbl].num_read_locked, + lock_stats[lbl].num_read_unlocked, + lock_stats[lbl].num_read_unlocked - lock_stats[lbl].num_read_locked, + (double)lock_stats[lbl].nsec_wait_for_read / 1000000.0, + lock_stats[lbl].num_read_locked ? ((double)lock_stats[lbl].nsec_wait_for_read / (double)lock_stats[lbl].num_read_locked) : 0); + } +} + +/* Following functions are used to collect some stats about locks. We wrap + * pthread functions to known how much time we wait in a lock. */ + +static uint64_t nsec_now(void) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + return ((uint64_t) ts.tv_sec * 1000000000ULL + + (uint64_t) ts.tv_nsec); +} + +static inline void __ha_rwlock_init(struct ha_rwlock *l) +{ + memset(l, 0, sizeof(struct ha_rwlock)); + __RWLOCK_INIT(&l->lock); +} + +static inline void __ha_rwlock_destroy(struct ha_rwlock *l) +{ + __RWLOCK_DESTROY(&l->lock); + memset(l, 0, sizeof(struct ha_rwlock)); +} + + +static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l, + const char *func, const char *file, int line) +{ + uint64_t start_time; + + if (unlikely(l->info.cur_writer & tid_bit)) { + /* the thread is already owning the lock for write */ + abort(); + } + + if (unlikely(l->info.cur_readers & tid_bit)) { + /* the thread is already owning the lock for read */ + abort(); + } + + HA_ATOMIC_OR(&l->info.wait_writers, tid_bit); + + start_time = nsec_now(); + __RWLOCK_WRLOCK(&l->lock); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + + HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1); + + l->info.cur_writer = tid_bit; + l->info.last_location.function = func; + l->info.last_location.file = file; + l->info.last_location.line = line; + + HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit); +} + +static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l, + const char *func, const char *file, int line) +{ + uint64_t start_time; + int r; + + if (unlikely(l->info.cur_writer & tid_bit)) { + /* the thread is already owning the lock for write */ + abort(); + } + + if (unlikely(l->info.cur_readers & tid_bit)) { + /* the thread is already owning the lock for read */ + abort(); + } + + /* We set waiting writer because trywrlock could wait for readers to quit */ + HA_ATOMIC_OR(&l->info.wait_writers, tid_bit); + + start_time = nsec_now(); + r = __RWLOCK_TRYWRLOCK(&l->lock); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + if (unlikely(r)) { + HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit); + return r; + } + HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1); + + l->info.cur_writer = tid_bit; + l->info.last_location.function = func; + l->info.last_location.file = file; + l->info.last_location.line = line; + + HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit); + + return 0; +} + +static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l, + const char *func, const char *file, int line) +{ + if (unlikely(!(l->info.cur_writer & tid_bit))) { + /* the thread is not owning the lock for write */ + abort(); + } + + l->info.cur_writer = 0; + l->info.last_location.function = func; + l->info.last_location.file = file; + l->info.last_location.line = line; + + __RWLOCK_WRUNLOCK(&l->lock); + + HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1); +} + +static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l) +{ + uint64_t start_time; + + if (unlikely(l->info.cur_writer & tid_bit)) { + /* the thread is already owning the lock for write */ + abort(); + } + + if (unlikely(l->info.cur_readers & tid_bit)) { + /* the thread is already owning the lock for read */ + abort(); + } + + HA_ATOMIC_OR(&l->info.wait_readers, tid_bit); + + start_time = nsec_now(); + __RWLOCK_RDLOCK(&l->lock); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time)); + HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1); + + HA_ATOMIC_OR(&l->info.cur_readers, tid_bit); + + HA_ATOMIC_AND(&l->info.wait_readers, ~tid_bit); +} + +static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l) +{ + int r; + + if (unlikely(l->info.cur_writer & tid_bit)) { + /* the thread is already owning the lock for write */ + abort(); + } + + if (unlikely(l->info.cur_readers & tid_bit)) { + /* the thread is already owning the lock for read */ + abort(); + } + + /* try read should never wait */ + r = __RWLOCK_TRYRDLOCK(&l->lock); + if (unlikely(r)) + return r; + HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1); + + HA_ATOMIC_OR(&l->info.cur_readers, tid_bit); + + return 0; +} + +static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l) +{ + if (unlikely(!(l->info.cur_readers & tid_bit))) { + /* the thread is not owning the lock for read */ + abort(); + } + + HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit); + + __RWLOCK_RDUNLOCK(&l->lock); + + HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1); +} + +static inline void __spin_init(struct ha_spinlock *l) +{ + memset(l, 0, sizeof(struct ha_spinlock)); + __SPIN_INIT(&l->lock); +} + +static inline void __spin_destroy(struct ha_spinlock *l) +{ + __SPIN_DESTROY(&l->lock); + memset(l, 0, sizeof(struct ha_spinlock)); +} + +static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l, + const char *func, const char *file, int line) +{ + uint64_t start_time; + + if (unlikely(l->info.owner & tid_bit)) { + /* the thread is already owning the lock */ + abort(); + } + + HA_ATOMIC_OR(&l->info.waiters, tid_bit); + + start_time = nsec_now(); + __SPIN_LOCK(&l->lock); + HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time)); + + HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1); + + + l->info.owner = tid_bit; + l->info.last_location.function = func; + l->info.last_location.file = file; + l->info.last_location.line = line; + + HA_ATOMIC_AND(&l->info.waiters, ~tid_bit); +} + +static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l, + const char *func, const char *file, int line) +{ + int r; + + if (unlikely(l->info.owner & tid_bit)) { + /* the thread is already owning the lock */ + abort(); + } + + /* try read should never wait */ + r = __SPIN_TRYLOCK(&l->lock); + if (unlikely(r)) + return r; + HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1); + + l->info.owner = tid_bit; + l->info.last_location.function = func; + l->info.last_location.file = file; + l->info.last_location.line = line; + + return 0; +} + +static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l, + const char *func, const char *file, int line) +{ + if (unlikely(!(l->info.owner & tid_bit))) { + /* the thread is not owning the lock */ + abort(); + } + + l->info.owner = 0; + l->info.last_location.function = func; + l->info.last_location.file = file; + l->info.last_location.line = line; + + __RWLOCK_WRUNLOCK(&l->lock); + + HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1); +} + +#else /* DEBUG_THREAD */ + +#define HA_SPINLOCK_T unsigned long + +#define SPIN_INIT(l) ({ (*l) = 0; }) +#define SPIN_DESTROY(l) ({ (*l) = 0; }) +#define SPIN_LOCK(lbl, l) pl_take_w(l) +#define SPIN_TRYLOCK(lbl, l) !pl_try_w(l) +#define SPIN_UNLOCK(lbl, l) pl_drop_w(l) + +#define HA_RWLOCK_T unsigned long + +#define RWLOCK_INIT(l) ({ (*l) = 0; }) +#define RWLOCK_DESTROY(l) ({ (*l) = 0; }) +#define RWLOCK_WRLOCK(lbl,l) pl_take_w(l) +#define RWLOCK_TRYWRLOCK(lbl,l) !pl_try_w(l) +#define RWLOCK_WRUNLOCK(lbl,l) pl_drop_w(l) +#define RWLOCK_RDLOCK(lbl,l) pl_take_r(l) +#define RWLOCK_TRYRDLOCK(lbl,l) !pl_try_r(l) +#define RWLOCK_RDUNLOCK(lbl,l) pl_drop_r(l) + +#endif /* DEBUG_THREAD */ + +#endif /* USE_THREAD */ + +#endif /* _COMMON_HATHREADS_H */ diff --git a/src/hathreads.c b/src/hathreads.c new file mode 100644 index 000000000..ea48ce58e --- /dev/null +++ b/src/hathreads.c @@ -0,0 +1,34 @@ +/* + * functions about threads. + * + * Copyright (C) 2017 Christopher Fauet - cfaulet@haproxy.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include + +THREAD_LOCAL unsigned int tid = 0; +THREAD_LOCAL unsigned int tid_bit = (1UL << 0); + +#ifdef USE_THREAD + +#if defined(DEBUG_THREAD) || defined(DEBUG_FULL) +struct lock_stat lock_stats[LOCK_LABELS]; +#endif + +__attribute__((constructor)) +static void __hathreads_init(void) +{ + +#if defined(DEBUG_THREAD) || defined(DEBUG_FULL) + memset(lock_stats, 0, sizeof(lock_stats)); +#endif + +} + +#endif