mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-29 17:51:22 +02:00
MINOR: thread: only use atomic ops to touch the flags
The thread flags are touched a little bit by other threads, e.g. the STUCK flag may be set by other ones, and they're watched a little bit. As such we need to use atomic ops only to manipulate them. Most places were already using them, but here we generalize the practice. Only ha_thread_dump() does not change because it's run under isolation.
This commit is contained in:
parent
8e079cdd44
commit
bdcd32598f
@ -367,7 +367,7 @@ static inline void _tasklet_wakeup_on(struct tasklet *tl, int thr, const char *f
|
||||
tl->debug.caller_idx = !tl->debug.caller_idx;
|
||||
tl->debug.caller_file[tl->debug.caller_idx] = file;
|
||||
tl->debug.caller_line[tl->debug.caller_idx] = line;
|
||||
if (th_ctx->flags & TH_FL_TASK_PROFILING)
|
||||
if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)
|
||||
tl->call_date = now_mono_time();
|
||||
#endif
|
||||
__tasklet_wakeup_on(tl, thr);
|
||||
@ -421,7 +421,7 @@ static inline void _task_instant_wakeup(struct task *t, unsigned int f, const ch
|
||||
tl->debug.caller_idx = !tl->debug.caller_idx;
|
||||
tl->debug.caller_file[tl->debug.caller_idx] = file;
|
||||
tl->debug.caller_line[tl->debug.caller_idx] = line;
|
||||
if (th_ctx->flags & TH_FL_TASK_PROFILING)
|
||||
if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)
|
||||
tl->call_date = now_mono_time();
|
||||
#endif
|
||||
__tasklet_wakeup_on(tl, thr);
|
||||
|
@ -383,7 +383,7 @@ void activity_count_runtime(uint32_t run_time)
|
||||
* profiling to "on" when automatic, and going back below the "down"
|
||||
* threshold switches to off. The forced modes don't check the load.
|
||||
*/
|
||||
if (!(th_ctx->flags & TH_FL_TASK_PROFILING)) {
|
||||
if (!(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
|
||||
if (unlikely((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_ON ||
|
||||
((profiling & HA_PROF_TASKS_MASK) == HA_PROF_TASKS_AON &&
|
||||
swrate_avg(run_time, TIME_STATS_SAMPLES) >= up)))
|
||||
|
@ -154,6 +154,7 @@ void ha_backtrace_to_stderr(void)
|
||||
* The calling thread ID needs to be passed in <calling_tid> to display a star
|
||||
* in front of the calling thread's line (usually it's tid). Any stuck thread
|
||||
* is also prefixed with a '>'.
|
||||
* It must be called under thread isolation.
|
||||
*/
|
||||
void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
|
||||
{
|
||||
@ -1365,7 +1366,7 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
|
||||
* if it didn't move.
|
||||
*/
|
||||
if (!((threads_harmless_mask|sleeping_thread_mask) & tid_bit))
|
||||
th_ctx->flags |= TH_FL_STUCK;
|
||||
_HA_ATOMIC_OR(&th_ctx->flags, TH_FL_STUCK);
|
||||
}
|
||||
|
||||
static int init_debug_per_thread()
|
||||
|
2
src/fd.c
2
src/fd.c
@ -472,7 +472,7 @@ int fd_update_events(int fd, uint evts)
|
||||
uint new_flags, must_stop;
|
||||
ulong rmask, tmask;
|
||||
|
||||
th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
|
||||
|
||||
/* do nothing if the FD was taken over under us */
|
||||
do {
|
||||
|
@ -1135,7 +1135,7 @@ void listener_accept(struct listener *l)
|
||||
}
|
||||
#endif
|
||||
|
||||
th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
|
||||
} /* end of for (max_accept--) */
|
||||
|
||||
end:
|
||||
|
10
src/task.c
10
src/task.c
@ -241,7 +241,7 @@ void __task_wakeup(struct task *t)
|
||||
t->rq.key += offset;
|
||||
}
|
||||
|
||||
if (th_ctx->flags & TH_FL_TASK_PROFILING)
|
||||
if (_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)
|
||||
t->call_date = now_mono_time();
|
||||
|
||||
eb32_insert(root, &t->rq);
|
||||
@ -568,7 +568,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
process = t->process;
|
||||
t->calls++;
|
||||
th_ctx->current = t;
|
||||
th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
|
||||
|
||||
_HA_ATOMIC_DEC(&th_ctx->rq_total);
|
||||
|
||||
@ -578,7 +578,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
LIST_DEL_INIT(&((struct tasklet *)t)->list);
|
||||
__ha_barrier_store();
|
||||
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) {
|
||||
if (unlikely(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
|
||||
profile_entry = sched_activity_entry(sched_activity, t->process);
|
||||
before = now_mono_time();
|
||||
#ifdef DEBUG_TASK
|
||||
@ -603,7 +603,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
continue;
|
||||
}
|
||||
|
||||
if (unlikely(th_ctx->flags & TH_FL_TASK_PROFILING)) {
|
||||
if (unlikely(_HA_ATOMIC_LOAD(&th_ctx->flags) & TH_FL_TASK_PROFILING)) {
|
||||
HA_ATOMIC_INC(&profile_entry->calls);
|
||||
HA_ATOMIC_ADD(&profile_entry->cpu_time, now_mono_time() - before);
|
||||
}
|
||||
@ -734,7 +734,7 @@ void process_runnable_tasks()
|
||||
int heavy_queued = 0;
|
||||
int budget;
|
||||
|
||||
th_ctx->flags &= ~TH_FL_STUCK; // this thread is still running
|
||||
_HA_ATOMIC_AND(&th_ctx->flags, ~TH_FL_STUCK); // this thread is still running
|
||||
|
||||
if (!thread_has_tasks()) {
|
||||
activity[tid].empty_rq++;
|
||||
|
@ -99,7 +99,7 @@ void wdt_handler(int sig, siginfo_t *si, void *arg)
|
||||
* If it's already set, then it's our second call with no
|
||||
* progress and the thread is dead.
|
||||
*/
|
||||
if (!(ha_thread_ctx[thr].flags & TH_FL_STUCK)) {
|
||||
if (!(_HA_ATOMIC_LOAD(&ha_thread_ctx[thr].flags) & TH_FL_STUCK)) {
|
||||
_HA_ATOMIC_OR(&ha_thread_ctx[thr].flags, TH_FL_STUCK);
|
||||
goto update_and_leave;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user