mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-06 23:27:04 +02:00
[MEDIUM] indicate a reason for a task wakeup
It's very frequent to require some information about the reason why a task is running. Some flags have been added so that a task now knows if it got woken up due to I/O completion, timeout, etc...
This commit is contained in:
parent
75cf17ee30
commit
fdccded0e8
@ -40,13 +40,14 @@ extern struct task *last_timer; /* optimization: last queued timer */
|
||||
/* perform minimal initializations, report 0 in case of error, 1 if OK. */
|
||||
int init_task();
|
||||
|
||||
/* puts the task <t> in run queue <q>, and returns <t> */
|
||||
/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
|
||||
struct task *__task_wakeup(struct task *t);
|
||||
static inline struct task *task_wakeup(struct task *t)
|
||||
static inline struct task *task_wakeup(struct task *t, unsigned int f)
|
||||
{
|
||||
if (t->state == TASK_RUNNING)
|
||||
return t;
|
||||
return __task_wakeup(t);
|
||||
if (likely(!(t->state & TASK_IN_RUNQUEUE)))
|
||||
__task_wakeup(t);
|
||||
t->state |= f;
|
||||
return t;
|
||||
}
|
||||
|
||||
/* removes the task <t> from the run queue if it was in it.
|
||||
@ -54,8 +55,8 @@ static inline struct task *task_wakeup(struct task *t)
|
||||
*/
|
||||
static inline struct task *task_sleep(struct task *t)
|
||||
{
|
||||
if (t->state == TASK_RUNNING) {
|
||||
t->state = TASK_IDLE;
|
||||
if (t->state & TASK_IN_RUNQUEUE) {
|
||||
t->state = TASK_SLEEPING;
|
||||
eb32_delete(&t->eb);
|
||||
run_queue--;
|
||||
if (likely(t->nice))
|
||||
@ -87,7 +88,7 @@ static inline struct task *task_dequeue(struct task *t)
|
||||
static inline struct task *task_delete(struct task *t)
|
||||
{
|
||||
task_dequeue(t);
|
||||
if (t->state == TASK_RUNNING) {
|
||||
if (t->state & TASK_IN_RUNQUEUE) {
|
||||
run_queue--;
|
||||
if (likely(t->nice))
|
||||
niced_tasks--;
|
||||
@ -102,7 +103,7 @@ static inline struct task *task_delete(struct task *t)
|
||||
static inline struct task *task_init(struct task *t)
|
||||
{
|
||||
t->eb.node.leaf_p = NULL;
|
||||
t->state = TASK_IDLE;
|
||||
t->state = TASK_SLEEPING;
|
||||
t->nice = 0;
|
||||
return t;
|
||||
}
|
||||
|
@ -29,13 +29,25 @@
|
||||
#include <common/mini-clist.h>
|
||||
|
||||
/* values for task->state */
|
||||
#define TASK_IDLE 0
|
||||
#define TASK_RUNNING 1
|
||||
#define TASK_SLEEPING 0x00 /* task sleeping */
|
||||
#define TASK_IN_RUNQUEUE 0x01 /* the task is in the run queue */
|
||||
#define TASK_WOKEN_INIT 0x02 /* woken up for initialisation purposes */
|
||||
#define TASK_WOKEN_TIMER 0x04 /* woken up because of expired timer */
|
||||
#define TASK_WOKEN_IO 0x08 /* woken up because of completed I/O */
|
||||
#define TASK_WOKEN_SIGNAL 0x10 /* woken up by a system signal */
|
||||
#define TASK_WOKEN_MSG 0x20 /* woken up by another task's message */
|
||||
#define TASK_WOKEN_RES 0x40 /* woken up because of available resource */
|
||||
#define TASK_WOKEN_OTHER 0x80 /* woken up for an unspecified reason */
|
||||
|
||||
/* use this to check a task state or to clean it up before queueing */
|
||||
#define TASK_WOKEN_ANY (TASK_WOKEN_OTHER|TASK_WOKEN_INIT|TASK_WOKEN_TIMER| \
|
||||
TASK_WOKEN_IO|TASK_WOKEN_SIGNAL|TASK_WOKEN_MSG| \
|
||||
TASK_WOKEN_RES)
|
||||
|
||||
/* The base for all tasks */
|
||||
struct task {
|
||||
struct eb32_node eb; /* ebtree node used to hold the task in the wait queue */
|
||||
int state; /* task state : IDLE or RUNNING */
|
||||
int state; /* task state : bit field of TASK_* */
|
||||
unsigned int expire; /* next expiration time for this task */
|
||||
void (*process)(struct task *t, int *next); /* the function which processes the task */
|
||||
void *context; /* the task's context */
|
||||
|
@ -73,7 +73,7 @@ static int redistribute_pending(struct server *s)
|
||||
sess->flags &= ~(SN_DIRECT | SN_ASSIGNED | SN_ADDR_SET);
|
||||
|
||||
pendconn_free(pc);
|
||||
task_wakeup(sess->task);
|
||||
task_wakeup(sess->task, TASK_WOKEN_RES);
|
||||
xferred++;
|
||||
}
|
||||
}
|
||||
@ -102,7 +102,7 @@ static int check_for_pending(struct server *s)
|
||||
p->sess->srv = s;
|
||||
sess = p->sess;
|
||||
pendconn_free(p);
|
||||
task_wakeup(sess->task);
|
||||
task_wakeup(sess->task, TASK_WOKEN_RES);
|
||||
}
|
||||
return xferred;
|
||||
}
|
||||
@ -404,7 +404,7 @@ static int event_srv_chk_w(int fd)
|
||||
}
|
||||
}
|
||||
out_wakeup:
|
||||
task_wakeup(t);
|
||||
task_wakeup(t, TASK_WOKEN_IO);
|
||||
out_nowake:
|
||||
EV_FD_CLR(fd, DIR_WR); /* nothing more to write */
|
||||
fdtab[fd].ev &= ~FD_POLL_OUT;
|
||||
@ -512,7 +512,7 @@ static int event_srv_chk_r(int fd)
|
||||
fdtab[fd].state = FD_STERROR;
|
||||
|
||||
EV_FD_CLR(fd, DIR_RD);
|
||||
task_wakeup(t);
|
||||
task_wakeup(t, TASK_WOKEN_IO);
|
||||
fdtab[fd].ev &= ~FD_POLL_IN;
|
||||
return 1;
|
||||
}
|
||||
|
@ -409,7 +409,7 @@ int event_accept(int fd) {
|
||||
* priorities to tasks.
|
||||
*/
|
||||
if (p->mode != PR_MODE_HEALTH)
|
||||
task_wakeup(t);
|
||||
task_wakeup(t, TASK_WOKEN_INIT);
|
||||
|
||||
p->feconn++; /* beconn will be increased later */
|
||||
if (p->feconn > p->feconn_max)
|
||||
|
@ -520,7 +520,7 @@ int uxst_event_accept(int fd) {
|
||||
t->expire = s->req->rex;
|
||||
}
|
||||
|
||||
task_wakeup(t);
|
||||
task_wakeup(t, TASK_WOKEN_INIT);
|
||||
|
||||
l->nbconn++; /* warning! right now, it's up to the handler to decrease this */
|
||||
if (l->nbconn >= l->maxconn) {
|
||||
|
@ -77,7 +77,7 @@ void process_srv_queue(struct server *s)
|
||||
struct session *sess = pendconn_get_next_sess(s, p);
|
||||
if (sess == NULL)
|
||||
break;
|
||||
task_wakeup(sess->task);
|
||||
task_wakeup(sess->task, TASK_WOKEN_RES);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,7 @@ int stream_sock_read(int fd) {
|
||||
if (!(b->flags & BF_READ_ACTIVITY))
|
||||
goto out_skip_wakeup;
|
||||
out_wakeup:
|
||||
task_wakeup(fdtab[fd].owner);
|
||||
task_wakeup(fdtab[fd].owner, TASK_WOKEN_IO);
|
||||
|
||||
out_skip_wakeup:
|
||||
fdtab[fd].ev &= ~FD_POLL_IN;
|
||||
@ -411,7 +411,7 @@ int stream_sock_write(int fd) {
|
||||
if (!(b->flags & BF_WRITE_ACTIVITY))
|
||||
goto out_skip_wakeup;
|
||||
out_wakeup:
|
||||
task_wakeup(fdtab[fd].owner);
|
||||
task_wakeup(fdtab[fd].owner, TASK_WOKEN_IO);
|
||||
|
||||
out_skip_wakeup:
|
||||
fdtab[fd].ev &= ~FD_POLL_OUT;
|
||||
|
@ -149,7 +149,8 @@ struct task *__task_wakeup(struct task *t)
|
||||
t->eb.key += offset;
|
||||
}
|
||||
|
||||
t->state = TASK_RUNNING;
|
||||
/* clear state flags at the same time */
|
||||
t->state = TASK_IN_RUNQUEUE;
|
||||
|
||||
eb32_insert(&rqueue[ticks_to_tree(t->eb.key)], &t->eb);
|
||||
return t;
|
||||
@ -229,6 +230,7 @@ void wake_expired_tasks(int *next)
|
||||
/* detach the task from the queue and add the task to the run queue */
|
||||
eb = eb32_next(eb);
|
||||
__task_wakeup(task);
|
||||
task->state |= TASK_WOKEN_TIMER;
|
||||
}
|
||||
tree = (tree + 1) & TIMER_TREE_MASK;
|
||||
} while (((tree - now_tree) & TIMER_TREE_MASK) < TIMER_TREES/2);
|
||||
@ -286,7 +288,7 @@ void process_runnable_tasks(int *next)
|
||||
run_queue--;
|
||||
if (likely(t->nice))
|
||||
niced_tasks--;
|
||||
t->state = TASK_IDLE;
|
||||
t->state &= ~TASK_IN_RUNQUEUE;
|
||||
task_dequeue(t);
|
||||
|
||||
t->process(t, &temp);
|
||||
|
Loading…
Reference in New Issue
Block a user