MINOR: thread: add a lock level information in the thread_ctx

The new lock_level field indicates the number of cumulated locks that
are held by the current thread. It's fed as soon as DEBUG_THREAD is at
least 1. In addition, thread_isolate() adds 128, so that it's even
possible to check for combinations of both. The value is also reported
in thread dumps (warnings and panics).
This commit is contained in:
Willy Tarreau 2025-09-11 08:29:25 +02:00
parent 503084643f
commit 4b23b2ed32
4 changed files with 53 additions and 43 deletions

View File

@ -331,8 +331,8 @@ static inline unsigned long thread_isolated()
#if (DEBUG_THREAD < 1) && !defined(DEBUG_FULL)
#define _lock_wait(_LK_, lbl, expr) do { (void)(expr); } while (0)
#define _lock_cond(_LK_, lbl, expr) ({ typeof(expr) _expr = (expr); _expr; })
#define _lock_wait(_LK_, bal, lbl, expr) do { (void)(expr); } while (0)
#define _lock_cond(_LK_, bal, lbl, expr) ({ typeof(expr) _expr = (expr); _expr; })
#else
@ -359,21 +359,26 @@ static inline unsigned long thread_isolated()
th_ctx->lock_history = (th_ctx->lock_history << 8) + _lck; \
} while (0)
#define _lock_wait(_LK_, lbl, expr) do { \
#define _lock_wait(_LK_, bal, lbl, expr) do { \
uint64_t lock_start = 0; \
extern uint64_t now_mono_time(void); \
if (_LK_ != _LK_UN) { \
th_ctx->lock_level += bal; \
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) \
lock_start = now_mono_time(); \
} \
(void)(expr); \
if (_LK_ != _LK_UN && unlikely(lock_start)) \
if (_LK_ == _LK_UN) \
th_ctx->lock_level += bal; \
else if (unlikely(lock_start)) \
th_ctx->lock_wait_total += now_mono_time() - lock_start; \
if (lbl != OTHER_LOCK) \
_lock_wait_common(_LK_, lbl); \
} while (0)
#define _lock_cond(_LK_, lbl, expr) ({ \
#define _lock_cond(_LK_, bal, lbl, expr) ({ \
typeof(expr) _expr = (expr); \
if (_expr == 0) \
th_ctx->lock_level += bal; \
if (lbl != OTHER_LOCK && !_expr) \
_lock_wait_common(_LK_, lbl); \
_expr; \
@ -414,29 +419,29 @@ static inline uint64_t cshared_read(struct cshared *ctr)
#define HA_SPIN_INIT(l) ({ (*l) = 0; })
#define HA_SPIN_DESTROY(l) ({ (*l) = 0; })
#define HA_SPIN_LOCK(lbl, l) _lock_wait(_LK_SK, lbl, pl_take_s(l))
#define HA_SPIN_TRYLOCK(lbl, l) _lock_cond(_LK_SK, lbl, !pl_try_s(l))
#define HA_SPIN_UNLOCK(lbl, l) _lock_wait(_LK_UN, lbl, pl_drop_s(l))
#define HA_SPIN_LOCK(lbl, l) _lock_wait(_LK_SK, 1, lbl, pl_take_s(l))
#define HA_SPIN_TRYLOCK(lbl, l) _lock_cond(_LK_SK, 1, lbl, !pl_try_s(l))
#define HA_SPIN_UNLOCK(lbl, l) _lock_wait(_LK_UN, -1, lbl, pl_drop_s(l))
#define HA_RWLOCK_INIT(l) ({ (*l) = 0; })
#define HA_RWLOCK_DESTROY(l) ({ (*l) = 0; })
#define HA_RWLOCK_WRLOCK(lbl,l) _lock_wait(_LK_WR, lbl, pl_take_w(l))
#define HA_RWLOCK_TRYWRLOCK(lbl,l) _lock_cond(_LK_WR, lbl, !pl_try_w(l))
#define HA_RWLOCK_WRUNLOCK(lbl,l) _lock_wait(_LK_UN, lbl, pl_drop_w(l))
#define HA_RWLOCK_RDLOCK(lbl,l) _lock_wait(_LK_RD, lbl, pl_take_r(l))
#define HA_RWLOCK_TRYRDLOCK(lbl,l) _lock_cond(_LK_RD, lbl, (!pl_try_r(l)))
#define HA_RWLOCK_RDUNLOCK(lbl,l) _lock_wait(_LK_UN, lbl, pl_drop_r(l))
#define HA_RWLOCK_WRLOCK(lbl,l) _lock_wait(_LK_WR, 1, lbl, pl_take_w(l))
#define HA_RWLOCK_TRYWRLOCK(lbl,l) _lock_cond(_LK_WR, 1, lbl, !pl_try_w(l))
#define HA_RWLOCK_WRUNLOCK(lbl,l) _lock_wait(_LK_UN, -1, lbl, pl_drop_w(l))
#define HA_RWLOCK_RDLOCK(lbl,l) _lock_wait(_LK_RD, 1, lbl, pl_take_r(l))
#define HA_RWLOCK_TRYRDLOCK(lbl,l) _lock_cond(_LK_RD, 1, lbl, (!pl_try_r(l)))
#define HA_RWLOCK_RDUNLOCK(lbl,l) _lock_wait(_LK_UN, -1, lbl, pl_drop_r(l))
/* rwlock upgrades via seek locks */
#define HA_RWLOCK_SKLOCK(lbl,l) _lock_wait(_LK_SK, lbl, pl_take_s(l)) /* N --> S */
#define HA_RWLOCK_SKTOWR(lbl,l) _lock_wait(_LK_WR, lbl, pl_stow(l)) /* S --> W */
#define HA_RWLOCK_WRTOSK(lbl,l) _lock_wait(_LK_SK, lbl, pl_wtos(l)) /* W --> S */
#define HA_RWLOCK_SKTORD(lbl,l) _lock_wait(_LK_RD, lbl, pl_stor(l)) /* S --> R */
#define HA_RWLOCK_WRTORD(lbl,l) _lock_wait(_LK_RD, lbl, pl_wtor(l)) /* W --> R */
#define HA_RWLOCK_SKUNLOCK(lbl,l) _lock_wait(_LK_UN, lbl, pl_drop_s(l)) /* S --> N */
#define HA_RWLOCK_TRYSKLOCK(lbl,l) _lock_cond(_LK_SK, lbl, !pl_try_s(l)) /* N -?> S */
#define HA_RWLOCK_TRYRDTOSK(lbl,l) _lock_cond(_LK_SK, lbl, !pl_try_rtos(l)) /* R -?> S */
#define HA_RWLOCK_TRYRDTOWR(lbl, l) _lock_cond(_LK_WR, lbl, !pl_try_rtow(l)) /* R -?> W */
#define HA_RWLOCK_SKLOCK(lbl,l) _lock_wait(_LK_SK, 1, lbl, pl_take_s(l)) /* N --> S */
#define HA_RWLOCK_SKTOWR(lbl,l) _lock_wait(_LK_WR, 0, lbl, pl_stow(l)) /* S --> W */
#define HA_RWLOCK_WRTOSK(lbl,l) _lock_wait(_LK_SK, 0, lbl, pl_wtos(l)) /* W --> S */
#define HA_RWLOCK_SKTORD(lbl,l) _lock_wait(_LK_RD, 0, lbl, pl_stor(l)) /* S --> R */
#define HA_RWLOCK_WRTORD(lbl,l) _lock_wait(_LK_RD, 0, lbl, pl_wtor(l)) /* W --> R */
#define HA_RWLOCK_SKUNLOCK(lbl,l) _lock_wait(_LK_UN, -1, lbl, pl_drop_s(l)) /* S --> N */
#define HA_RWLOCK_TRYSKLOCK(lbl,l) _lock_cond(_LK_SK, 1, lbl, !pl_try_s(l)) /* N -?> S */
#define HA_RWLOCK_TRYRDTOSK(lbl,l) _lock_cond(_LK_SK, 0, lbl, !pl_try_rtos(l)) /* R -?> S */
#define HA_RWLOCK_TRYRDTOWR(lbl, l) _lock_cond(_LK_WR, 0, lbl, !pl_try_rtow(l)) /* R -?> W */
#else /* (DEBUG_THREAD < 2) && !defined(DEBUG_FULL) */
@ -471,28 +476,28 @@ static inline uint64_t cshared_read(struct cshared *ctr)
#define HA_SPIN_INIT(l) __spin_init(l)
#define HA_SPIN_DESTROY(l) __spin_destroy(l)
#define HA_SPIN_LOCK(lbl, l) _lock_wait(_LK_SK, lbl, __spin_lock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_SPIN_TRYLOCK(lbl, l) _lock_cond(_LK_SK, lbl, __spin_trylock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_SPIN_UNLOCK(lbl, l) _lock_wait(_LK_UN, lbl, __spin_unlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_SPIN_LOCK(lbl, l) _lock_wait(_LK_SK, 1, lbl, __spin_lock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_SPIN_TRYLOCK(lbl, l) _lock_cond(_LK_SK, 1, lbl, __spin_trylock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_SPIN_UNLOCK(lbl, l) _lock_wait(_LK_UN, -1, lbl, __spin_unlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_INIT(l) __ha_rwlock_init((l))
#define HA_RWLOCK_DESTROY(l) __ha_rwlock_destroy((l))
#define HA_RWLOCK_WRLOCK(lbl,l) _lock_wait(_LK_WR, lbl, __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYWRLOCK(lbl,l) _lock_cond(_LK_WR, lbl, __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_WRUNLOCK(lbl,l) _lock_wait(_LK_UN, lbl, __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_RDLOCK(lbl,l) _lock_wait(_LK_RD, lbl, __ha_rwlock_rdlock(lbl, l))
#define HA_RWLOCK_TRYRDLOCK(lbl,l) _lock_cond(_LK_RD, lbl, __ha_rwlock_tryrdlock(lbl, l))
#define HA_RWLOCK_RDUNLOCK(lbl,l) _lock_wait(_LK_UN, lbl, __ha_rwlock_rdunlock(lbl, l))
#define HA_RWLOCK_WRLOCK(lbl,l) _lock_wait(_LK_WR, 1, lbl, __ha_rwlock_wrlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYWRLOCK(lbl,l) _lock_cond(_LK_WR, 1, lbl, __ha_rwlock_trywrlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_WRUNLOCK(lbl,l) _lock_wait(_LK_UN, -1, lbl, __ha_rwlock_wrunlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_RDLOCK(lbl,l) _lock_wait(_LK_RD, 1, lbl, __ha_rwlock_rdlock(lbl, l))
#define HA_RWLOCK_TRYRDLOCK(lbl,l) _lock_cond(_LK_RD, 1, lbl, __ha_rwlock_tryrdlock(lbl, l))
#define HA_RWLOCK_RDUNLOCK(lbl,l) _lock_wait(_LK_UN, -1, lbl, __ha_rwlock_rdunlock(lbl, l))
#define HA_RWLOCK_SKLOCK(lbl,l) _lock_wait(_LK_SK, lbl, __ha_rwlock_sklock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKTOWR(lbl,l) _lock_wait(_LK_WR, lbl, __ha_rwlock_sktowr(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_WRTOSK(lbl,l) _lock_wait(_LK_SK, lbl, __ha_rwlock_wrtosk(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKTORD(lbl,l) _lock_wait(_LK_RD, lbl, __ha_rwlock_sktord(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_WRTORD(lbl,l) _lock_wait(_LK_RD, lbl, __ha_rwlock_wrtord(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKUNLOCK(lbl,l) _lock_wait(_LK_UN, lbl, __ha_rwlock_skunlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYSKLOCK(lbl,l) _lock_cond(_LK_SK, lbl, __ha_rwlock_trysklock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYRDTOSK(lbl,l) _lock_cond(_LK_RD, lbl, __ha_rwlock_tryrdtosk(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYRDTOWR(lbl,l) _lock_cond(_LK_WR, lbl, __ha_rwlock_tryrdtowr(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKLOCK(lbl,l) _lock_wait(_LK_SK, 1, lbl, __ha_rwlock_sklock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKTOWR(lbl,l) _lock_wait(_LK_WR, 0, lbl, __ha_rwlock_sktowr(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_WRTOSK(lbl,l) _lock_wait(_LK_SK, 0, lbl, __ha_rwlock_wrtosk(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKTORD(lbl,l) _lock_wait(_LK_RD, 0, lbl, __ha_rwlock_sktord(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_WRTORD(lbl,l) _lock_wait(_LK_RD, 0, lbl, __ha_rwlock_wrtord(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_SKUNLOCK(lbl,l) _lock_wait(_LK_UN, -1, lbl, __ha_rwlock_skunlock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYSKLOCK(lbl,l) _lock_cond(_LK_SK, 1, lbl, __ha_rwlock_trysklock(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYRDTOSK(lbl,l) _lock_cond(_LK_RD, 0, lbl, __ha_rwlock_tryrdtosk(lbl, l, __func__, __FILE__, __LINE__))
#define HA_RWLOCK_TRYRDTOWR(lbl,l) _lock_cond(_LK_WR, 0, lbl, __ha_rwlock_tryrdtowr(lbl, l, __func__, __FILE__, __LINE__))
/* Following functions are used to collect some stats about locks. We wrap
* pthread functions to known how much time we wait in a lock. */

View File

@ -142,8 +142,7 @@ struct thread_ctx {
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
uint8_t bufq_map; /* one bit per non-empty buffer_wq */
uint8_t trc_disable_ctr; /* cumulative counter to temporarily disable tracing */
// 1 byte hole here
uint8_t lock_level; /* locking level for that thread. 0=unlocked, +128=isolated. */
unsigned int nb_rhttp_conns; /* count of current conns used for active reverse HTTP */
struct sched_activity *sched_profile_entry; /* profile entry in use by the current task/tasklet, only if sched_wake_date>0 */

View File

@ -348,6 +348,9 @@ void ha_thread_dump_one(struct buffer *buf, int is_caller)
!!(_HA_ATOMIC_LOAD(&tg_ctx->threads_harmless) & ti->ltid_bit),
isolated_thread == tid);
#endif
#if (DEBUG_THREAD > 0) || defined(DEBUG_FULL)
chunk_appendf(buf, " locks=%d", th_ctx->lock_level);
#endif
chunk_appendf(buf, "\n");
chunk_appendf(buf, " cpu_ns: poll=%llu now=%llu diff=%llu\n", p, n, n-p);

View File

@ -139,6 +139,7 @@ void thread_isolate()
* 1) reset isolated_thread to ~0;
* 2) decrement rdv_requests.
*/
th_ctx->lock_level += 128;
}
/* Isolates the current thread : request the ability to work while all other
@ -212,6 +213,7 @@ void thread_isolate_full()
* 1) reset isolated_thread to ~0;
* 2) decrement rdv_requests.
*/
th_ctx->lock_level += 128;
}
/* Cancels the effect of thread_isolate() by resetting the ID of the isolated
@ -224,6 +226,7 @@ void thread_release()
{
HA_ATOMIC_STORE(&isolated_thread, ~0U);
HA_ATOMIC_DEC(&rdv_requests);
th_ctx->lock_level -= 128;
}
/* Sets up threads, signals and masks, and starts threads 2 and above.