mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 15:47:01 +02:00
MINOR: task: move the allocated tasks counter to the per-thread struct
The nb_tasks counter was still global and gets incremented and decremented for each task_new()/task_free(), and was read in process_runnable_tasks(). But it's only used for stats reporting, so doing this this often is pointless and expensive. Let's move it to the task_per_thread struct and have the stats sum it when needed.
This commit is contained in:
parent
eeffb3df41
commit
955a11ebfa
@ -81,6 +81,7 @@ struct task_per_thread {
|
|||||||
int current_queue; /* points to current tasklet list being run, -1 if none */
|
int current_queue; /* points to current tasklet list being run, -1 if none */
|
||||||
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
|
unsigned int rq_total; /* total size of the run queue, prio_tree + tasklets */
|
||||||
struct task *current; /* current task (not tasklet) */
|
struct task *current; /* current task (not tasklet) */
|
||||||
|
unsigned int nb_tasks; /* number of tasks allocated on this thread */
|
||||||
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
|
uint8_t tl_class_mask; /* bit mask of non-empty tasklets classes */
|
||||||
__attribute__((aligned(64))) char end[0];
|
__attribute__((aligned(64))) char end[0];
|
||||||
};
|
};
|
||||||
|
@ -87,10 +87,8 @@
|
|||||||
|
|
||||||
|
|
||||||
/* a few exported variables */
|
/* a few exported variables */
|
||||||
extern unsigned int nb_tasks; /* total number of tasks */
|
|
||||||
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
|
extern volatile unsigned long global_tasks_mask; /* Mask of threads with tasks in the global runqueue */
|
||||||
extern unsigned int grq_total; /* total number of entries in the global run queue */
|
extern unsigned int grq_total; /* total number of entries in the global run queue */
|
||||||
extern unsigned int nb_tasks_cur;
|
|
||||||
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
|
||||||
extern struct pool_head *pool_head_task;
|
extern struct pool_head *pool_head_task;
|
||||||
extern struct pool_head *pool_head_tasklet;
|
extern struct pool_head *pool_head_tasklet;
|
||||||
@ -160,6 +158,19 @@ static inline int total_run_queues()
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* returns the number of allocated tasks across all threads. Note that this
|
||||||
|
* *is* racy since some threads might be updating their counts while we're
|
||||||
|
* looking, but this is only for statistics reporting.
|
||||||
|
*/
|
||||||
|
static inline int total_allocated_tasks()
|
||||||
|
{
|
||||||
|
int thr, ret;
|
||||||
|
|
||||||
|
for (thr = ret = 0; thr < global.nbthread; thr++)
|
||||||
|
ret += _HA_ATOMIC_LOAD(&task_per_thread[thr].nb_tasks);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* return 0 if task is in run queue, otherwise non-zero */
|
/* return 0 if task is in run queue, otherwise non-zero */
|
||||||
static inline int task_in_rq(struct task *t)
|
static inline int task_in_rq(struct task *t)
|
||||||
{
|
{
|
||||||
@ -496,7 +507,7 @@ static inline struct task *task_new(unsigned long thread_mask)
|
|||||||
{
|
{
|
||||||
struct task *t = pool_alloc(pool_head_task);
|
struct task *t = pool_alloc(pool_head_task);
|
||||||
if (t) {
|
if (t) {
|
||||||
_HA_ATOMIC_ADD(&nb_tasks, 1);
|
sched->nb_tasks++;
|
||||||
task_init(t, thread_mask);
|
task_init(t, thread_mask);
|
||||||
}
|
}
|
||||||
return t;
|
return t;
|
||||||
@ -521,9 +532,9 @@ static inline void __task_free(struct task *t)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
pool_free(pool_head_task, t);
|
pool_free(pool_head_task, t);
|
||||||
|
sched->nb_tasks--;
|
||||||
if (unlikely(stopping))
|
if (unlikely(stopping))
|
||||||
pool_flush(pool_head_task);
|
pool_flush(pool_head_task);
|
||||||
_HA_ATOMIC_SUB(&nb_tasks, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Destroys a task : it's unlinked from the wait queues and is freed if it's
|
/* Destroys a task : it's unlinked from the wait queues and is freed if it's
|
||||||
|
@ -3339,7 +3339,7 @@ static void stats_dump_html_info(struct stream_interface *si, struct uri_auth *u
|
|||||||
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
|
actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec),
|
||||||
bps >= 1000000000UL ? (bps / 1000000000.0) : bps >= 1000000UL ? (bps / 1000000.0) : (bps / 1000.0),
|
bps >= 1000000000UL ? (bps / 1000000000.0) : bps >= 1000000UL ? (bps / 1000000.0) : (bps / 1000.0),
|
||||||
bps >= 1000000000UL ? 'G' : bps >= 1000000UL ? 'M' : 'k',
|
bps >= 1000000000UL ? 'G' : bps >= 1000000UL ? 'M' : 'k',
|
||||||
total_run_queues(), nb_tasks_cur, ti->idle_pct
|
total_run_queues(), total_allocated_tasks(), ti->idle_pct
|
||||||
);
|
);
|
||||||
|
|
||||||
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
|
/* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */
|
||||||
@ -4365,7 +4365,7 @@ int stats_fill_info(struct field *info, int len)
|
|||||||
info[INF_ZLIB_MEM_USAGE] = mkf_u32(0, zlib_used_memory);
|
info[INF_ZLIB_MEM_USAGE] = mkf_u32(0, zlib_used_memory);
|
||||||
info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
|
info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem);
|
||||||
#endif
|
#endif
|
||||||
info[INF_TASKS] = mkf_u32(0, nb_tasks_cur);
|
info[INF_TASKS] = mkf_u32(0, total_allocated_tasks());
|
||||||
info[INF_RUN_QUEUE] = mkf_u32(0, total_run_queues());
|
info[INF_RUN_QUEUE] = mkf_u32(0, total_run_queues());
|
||||||
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, ti->idle_pct);
|
info[INF_IDLE_PCT] = mkf_u32(FN_AVG, ti->idle_pct);
|
||||||
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
|
info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node);
|
||||||
|
@ -35,9 +35,7 @@ DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
|
|||||||
*/
|
*/
|
||||||
DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
|
DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
|
||||||
|
|
||||||
unsigned int nb_tasks = 0;
|
|
||||||
volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
|
volatile unsigned long global_tasks_mask = 0; /* Mask of threads with tasks in the global runqueue */
|
||||||
unsigned int nb_tasks_cur = 0; /* copy of the tasks count */
|
|
||||||
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */
|
||||||
|
|
||||||
THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
|
THREAD_LOCAL struct task_per_thread *sched = &task_per_thread[0]; /* scheduler context for the current thread */
|
||||||
@ -591,7 +589,6 @@ void process_runnable_tasks()
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
nb_tasks_cur = nb_tasks;
|
|
||||||
max_processed = global.tune.runqueue_depth;
|
max_processed = global.tune.runqueue_depth;
|
||||||
|
|
||||||
if (likely(niced_tasks))
|
if (likely(niced_tasks))
|
||||||
|
Loading…
Reference in New Issue
Block a user