diff --git a/include/proto/stream.h b/include/proto/stream.h index db239566a..b439344f2 100644 --- a/include/proto/stream.h +++ b/include/proto/stream.h @@ -302,7 +302,7 @@ static inline void stream_offer_buffers() */ avail = pool2_buffer->allocated - pool2_buffer->used - global.tune.reserved_bufs / 2; - if (avail > (int)run_queue) + if (avail > (int)tasks_run_queue) __stream_offer_buffers(avail); } diff --git a/include/proto/task.h b/include/proto/task.h index 107c961a5..70fd0b31c 100644 --- a/include/proto/task.h +++ b/include/proto/task.h @@ -80,8 +80,8 @@ /* a few exported variables */ extern unsigned int nb_tasks; /* total number of tasks */ -extern unsigned int run_queue; /* run queue size */ -extern unsigned int run_queue_cur; +extern unsigned int tasks_run_queue; /* run queue size */ +extern unsigned int tasks_run_queue_cur; extern unsigned int nb_tasks_cur; extern unsigned int niced_tasks; /* number of niced tasks in the run queue */ extern struct pool_head *pool2_task; @@ -132,16 +132,16 @@ static inline struct task *task_unlink_wq(struct task *t) } /* - * Unlink the task from the run queue. The run_queue size and number of niced - * tasks are updated too. A pointer to the task itself is returned. The task - * *must* already be in the run queue before calling this function. If unsure, - * use the safer task_unlink_rq() function. Note that the pointer to the next - * run queue entry is neither checked nor updated. + * Unlink the task from the run queue. The tasks_run_queue size and number of + * niced tasks are updated too. A pointer to the task itself is returned. The + * task *must* already be in the run queue before calling this function. If + * unsure, use the safer task_unlink_rq() function. Note that the pointer to the + * next run queue entry is neither checked nor updated. */ static inline struct task *__task_unlink_rq(struct task *t) { eb32_delete(&t->rq); - run_queue--; + tasks_run_queue--; if (likely(t->nice)) niced_tasks--; return t; diff --git a/src/haproxy.c b/src/haproxy.c index b403ba1c1..c31ccb077 100644 --- a/src/haproxy.c +++ b/src/haproxy.c @@ -1730,7 +1730,7 @@ void run_poll_loop() break; /* expire immediately if events are pending */ - if (fd_cache_num || run_queue || signal_queue_len || applets_active_queue) + if (fd_cache_num || tasks_run_queue || signal_queue_len || applets_active_queue) next = now_ms; /* The poller will ensure it returns around */ diff --git a/src/stats.c b/src/stats.c index 0ba6d27ba..1a842e8d9 100644 --- a/src/stats.c +++ b/src/stats.c @@ -2083,7 +2083,7 @@ static void stats_dump_html_info(struct stream_interface *si, struct uri_auth *u global.rlimit_nofile, global.maxsock, global.maxconn, global.maxpipes, actconn, pipes_used, pipes_used+pipes_free, read_freq_ctr(&global.conn_per_sec), - run_queue_cur, nb_tasks_cur, idle_pct + tasks_run_queue_cur, nb_tasks_cur, idle_pct ); /* scope_txt = search query, appctx->ctx.stats.scope_len is always <= STAT_SCOPE_TXT_MAXLEN */ @@ -2996,7 +2996,7 @@ int stats_fill_info(struct field *info, int len) info[INF_MAX_ZLIB_MEM_USAGE] = mkf_u32(FO_CONFIG|FN_LIMIT, global.maxzlibmem); #endif info[INF_TASKS] = mkf_u32(0, nb_tasks_cur); - info[INF_RUN_QUEUE] = mkf_u32(0, run_queue_cur); + info[INF_RUN_QUEUE] = mkf_u32(0, tasks_run_queue_cur); info[INF_IDLE_PCT] = mkf_u32(FN_AVG, idle_pct); info[INF_NODE] = mkf_str(FO_CONFIG|FN_OUTPUT|FS_SERVICE, global.node); if (global.desc) diff --git a/src/stream.c b/src/stream.c index 08f3aa9de..055cc2350 100644 --- a/src/stream.c +++ b/src/stream.c @@ -449,7 +449,7 @@ void __stream_offer_buffers(int rqlimit) struct stream *sess, *bak; list_for_each_entry_safe(sess, bak, &buffer_wq, buffer_wait) { - if (rqlimit <= run_queue) + if (rqlimit <= tasks_run_queue) break; if (sess->task->state & TASK_RUNNING) diff --git a/src/task.c b/src/task.c index 4a8b9074f..c99cea89c 100644 --- a/src/task.c +++ b/src/task.c @@ -26,8 +26,8 @@ struct pool_head *pool2_task; unsigned int nb_tasks = 0; -unsigned int run_queue = 0; -unsigned int run_queue_cur = 0; /* copy of the run queue size */ +unsigned int tasks_run_queue = 0; +unsigned int tasks_run_queue_cur = 0; /* copy of the run queue size */ unsigned int nb_tasks_cur = 0; /* copy of the tasks count */ unsigned int niced_tasks = 0; /* number of niced tasks in the run queue */ struct eb32_node *last_timer = NULL; /* optimization: last queued timer */ @@ -39,15 +39,15 @@ static unsigned int rqueue_ticks; /* insertion count */ /* Puts the task in run queue at a position depending on t->nice. is * returned. The nice value assigns boosts in 32th of the run queue size. A - * nice value of -1024 sets the task to -run_queue*32, while a nice value of - * 1024 sets the task to run_queue*32. The state flags are cleared, so the - * caller will have to set its flags after this call. + * nice value of -1024 sets the task to -tasks_run_queue*32, while a nice value + * of 1024 sets the task to tasks_run_queue*32. The state flags are cleared, so + * the caller will have to set its flags after this call. * The task must not already be in the run queue. If unsure, use the safer * task_wakeup() function. */ struct task *__task_wakeup(struct task *t) { - run_queue++; + tasks_run_queue++; t->rq.key = ++rqueue_ticks; if (likely(t->nice)) { @@ -55,9 +55,9 @@ struct task *__task_wakeup(struct task *t) niced_tasks++; if (likely(t->nice > 0)) - offset = (unsigned)((run_queue * (unsigned int)t->nice) / 32U); + offset = (unsigned)((tasks_run_queue * (unsigned int)t->nice) / 32U); else - offset = -(unsigned)((run_queue * (unsigned int)-t->nice) / 32U); + offset = -(unsigned)((tasks_run_queue * (unsigned int)-t->nice) / 32U); t->rq.key += offset; } @@ -191,11 +191,11 @@ void process_runnable_tasks() struct task *t; unsigned int max_processed; - run_queue_cur = run_queue; /* keep a copy for reporting */ + tasks_run_queue_cur = tasks_run_queue; /* keep a copy for reporting */ nb_tasks_cur = nb_tasks; - max_processed = run_queue; + max_processed = tasks_run_queue; - if (!run_queue) + if (!tasks_run_queue) return; if (max_processed > 200)