MINOR: task/debug: make task_queue() and task_schedule() possible callers

It's common to see process_stream() being woken up by wake_expired_tasks
in the profiling output, without knowing which timeout was set to cause
this. By making it possible to record the call places of task_queue()
and task_schedule(), and by making wake_expired_tasks() explicitly not
replace it, we'll be able to know which task_queue() or task_schedule()
was triggered for a given wakeup.

For example below:
  process_stream                51200   311.4ms   6.081us   34.59s    675.6us <- run_tasks_from_lists@src/task.c:659 task_queue
  process_stream                19227   70.00ms   3.640us   9.813m    30.62ms <- sc_notify@src/stconn.c:1136 task_wakeup
  process_stream                 6414   102.3ms   15.95us   8.093m    75.70ms <- stream_new@src/stream.c:578 task_wakeup

It's visible that it's the run_tasks_from_lists() which in fact applies
on the task->expire returned by the ->process() function itself.
This commit is contained in:
Willy Tarreau 2023-11-09 12:05:08 +01:00
parent 0eb0914dba
commit a13f8425f0
3 changed files with 49 additions and 7 deletions

View File

@ -95,6 +95,8 @@ enum {
WAKEUP_TYPE_TASKLET_WAKEUP, WAKEUP_TYPE_TASKLET_WAKEUP,
WAKEUP_TYPE_TASKLET_WAKEUP_AFTER, WAKEUP_TYPE_TASKLET_WAKEUP_AFTER,
WAKEUP_TYPE_TASK_DROP_RUNNING, WAKEUP_TYPE_TASK_DROP_RUNNING,
WAKEUP_TYPE_TASK_SCHEDULE,
WAKEUP_TYPE_TASK_QUEUE,
WAKEUP_TYPE_APPCTX_WAKEUP, WAKEUP_TYPE_APPCTX_WAKEUP,
}; };

View File

@ -298,7 +298,10 @@ static inline struct task *task_unlink_wq(struct task *t)
* protected by the global wq_lock, otherwise by it necessarily belongs to the * protected by the global wq_lock, otherwise by it necessarily belongs to the
* current thread'sand is queued without locking. * current thread'sand is queued without locking.
*/ */
static inline void task_queue(struct task *task) #define task_queue(t) \
_task_queue(t, MK_CALLER(WAKEUP_TYPE_TASK_QUEUE, 0, 0))
static inline void _task_queue(struct task *task, const struct ha_caller *caller)
{ {
/* If we already have a place in the wait queue no later than the /* If we already have a place in the wait queue no later than the
* timeout we're trying to set, we'll stay there, because it is very * timeout we're trying to set, we'll stay there, because it is very
@ -315,17 +318,33 @@ static inline void task_queue(struct task *task)
#ifdef USE_THREAD #ifdef USE_THREAD
if (task->tid < 0) { if (task->tid < 0) {
HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock); HA_RWLOCK_WRLOCK(TASK_WQ_LOCK, &wq_lock);
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
if (likely(caller)) {
caller = HA_ATOMIC_XCHG(&task->caller, caller);
BUG_ON((ulong)caller & 1);
#ifdef DEBUG_TASK
HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
#endif
}
__task_queue(task, &tg_ctx->timers); __task_queue(task, &tg_ctx->timers);
}
HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock); HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
} else } else
#endif #endif
{ {
BUG_ON(task->tid != tid); BUG_ON(task->tid != tid);
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
if (likely(caller)) {
caller = HA_ATOMIC_XCHG(&task->caller, caller);
BUG_ON((ulong)caller & 1);
#ifdef DEBUG_TASK
HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
#endif
}
__task_queue(task, &th_ctx->timers); __task_queue(task, &th_ctx->timers);
} }
} }
}
/* Change the thread affinity of a task to <thr>, which may either be a valid /* Change the thread affinity of a task to <thr>, which may either be a valid
* thread number from 0 to nbthread-1, or a negative value to allow the task * thread number from 0 to nbthread-1, or a negative value to allow the task
@ -677,7 +696,10 @@ static inline void tasklet_set_tid(struct tasklet *tl, int tid)
* now_ms without using tick_add() will definitely make this happen once every * now_ms without using tick_add() will definitely make this happen once every
* 49.7 days. * 49.7 days.
*/ */
static inline void task_schedule(struct task *task, int when) #define task_schedule(t, w) \
_task_schedule(t, w, MK_CALLER(WAKEUP_TYPE_TASK_SCHEDULE, 0, 0))
static inline void _task_schedule(struct task *task, int when, const struct ha_caller *caller)
{ {
/* TODO: mthread, check if there is no tisk with this test */ /* TODO: mthread, check if there is no tisk with this test */
if (task_in_rq(task)) if (task_in_rq(task))
@ -691,8 +713,16 @@ static inline void task_schedule(struct task *task, int when)
when = tick_first(when, task->expire); when = tick_first(when, task->expire);
task->expire = when; task->expire = when;
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
if (likely(caller)) {
caller = HA_ATOMIC_XCHG(&task->caller, caller);
BUG_ON((ulong)caller & 1);
#ifdef DEBUG_TASK
HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
#endif
}
__task_queue(task, &tg_ctx->timers); __task_queue(task, &tg_ctx->timers);
}
HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock); HA_RWLOCK_WRUNLOCK(TASK_WQ_LOCK, &wq_lock);
} else } else
#endif #endif
@ -702,10 +732,18 @@ static inline void task_schedule(struct task *task, int when)
when = tick_first(when, task->expire); when = tick_first(when, task->expire);
task->expire = when; task->expire = when;
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key)) {
if (likely(caller)) {
caller = HA_ATOMIC_XCHG(&task->caller, caller);
BUG_ON((ulong)caller & 1);
#ifdef DEBUG_TASK
HA_ATOMIC_STORE(&task->debug.prev_caller, caller);
#endif
}
__task_queue(task, &th_ctx->timers); __task_queue(task, &th_ctx->timers);
} }
} }
}
/* returns the string corresponding to a task type as found in the task caller /* returns the string corresponding to a task type as found in the task caller
* locations. * locations.
@ -718,6 +756,8 @@ static inline const char *task_wakeup_type_str(uint t)
case WAKEUP_TYPE_TASKLET_WAKEUP : return "tasklet_wakeup"; case WAKEUP_TYPE_TASKLET_WAKEUP : return "tasklet_wakeup";
case WAKEUP_TYPE_TASKLET_WAKEUP_AFTER : return "tasklet_wakeup_after"; case WAKEUP_TYPE_TASKLET_WAKEUP_AFTER : return "tasklet_wakeup_after";
case WAKEUP_TYPE_TASK_DROP_RUNNING : return "task_drop_running"; case WAKEUP_TYPE_TASK_DROP_RUNNING : return "task_drop_running";
case WAKEUP_TYPE_TASK_QUEUE : return "task_queue";
case WAKEUP_TYPE_TASK_SCHEDULE : return "task_schedule";
case WAKEUP_TYPE_APPCTX_WAKEUP : return "appctx_wakeup"; case WAKEUP_TYPE_APPCTX_WAKEUP : return "appctx_wakeup";
default : return "?"; default : return "?";
} }

View File

@ -347,7 +347,7 @@ void wake_expired_tasks()
if (tick_is_expired(task->expire, now_ms)) { if (tick_is_expired(task->expire, now_ms)) {
/* expired task, wake it up */ /* expired task, wake it up */
__task_unlink_wq(task); __task_unlink_wq(task);
task_wakeup(task, TASK_WOKEN_TIMER); _task_wakeup(task, TASK_WOKEN_TIMER, 0);
} }
else if (task->expire != eb->key) { else if (task->expire != eb->key) {
/* task is not expired but its key doesn't match so let's /* task is not expired but its key doesn't match so let's