mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-09-22 14:21:25 +02:00
CLEANUP: threads: rename process_mask to thread_mask
It was a leftover from the last cleaning session; this mask applies to threads and calling it process_mask is a bit confusing. It's the same in fd, task and applets.
This commit is contained in:
parent
5f4a47b701
commit
f65610a83d
@ -50,7 +50,7 @@ static inline void appctx_init(struct appctx *appctx, unsigned long thread_mask)
|
|||||||
{
|
{
|
||||||
appctx->st0 = appctx->st1 = appctx->st2 = 0;
|
appctx->st0 = appctx->st1 = appctx->st2 = 0;
|
||||||
appctx->io_release = NULL;
|
appctx->io_release = NULL;
|
||||||
appctx->process_mask = thread_mask;
|
appctx->thread_mask = thread_mask;
|
||||||
appctx->state = APPLET_SLEEPING;
|
appctx->state = APPLET_SLEEPING;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,7 +404,7 @@ static inline void fd_insert(int fd, unsigned long thread_mask)
|
|||||||
fdtab[fd].linger_risk = 0;
|
fdtab[fd].linger_risk = 0;
|
||||||
fdtab[fd].cloned = 0;
|
fdtab[fd].cloned = 0;
|
||||||
fdtab[fd].cache = 0;
|
fdtab[fd].cache = 0;
|
||||||
fdtab[fd].process_mask = thread_mask;
|
fdtab[fd].thread_mask = thread_mask;
|
||||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||||
|
|
||||||
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
|
SPIN_LOCK(FDTAB_LOCK, &fdtab_lock);
|
||||||
|
@ -127,11 +127,12 @@ static inline struct task *task_wakeup(struct task *t, unsigned int f)
|
|||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* change the thread affinity of a task to <thread_mask> */
|
||||||
static inline void task_set_affinity(struct task *t, unsigned long thread_mask)
|
static inline void task_set_affinity(struct task *t, unsigned long thread_mask)
|
||||||
{
|
{
|
||||||
|
t->thread_mask = thread_mask;
|
||||||
t->process_mask = thread_mask;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlink the task from the wait queue, and possibly update the last_timer
|
* Unlink the task from the wait queue, and possibly update the last_timer
|
||||||
* pointer. A pointer to the task itself is returned. The task *must* already
|
* pointer. A pointer to the task itself is returned. The task *must* already
|
||||||
@ -202,7 +203,7 @@ static inline struct task *task_init(struct task *t, unsigned long thread_mask)
|
|||||||
t->wq.node.leaf_p = NULL;
|
t->wq.node.leaf_p = NULL;
|
||||||
t->rq.node.leaf_p = NULL;
|
t->rq.node.leaf_p = NULL;
|
||||||
t->pending_state = t->state = TASK_SLEEPING;
|
t->pending_state = t->state = TASK_SLEEPING;
|
||||||
t->process_mask = thread_mask;
|
t->thread_mask = thread_mask;
|
||||||
t->nice = 0;
|
t->nice = 0;
|
||||||
t->calls = 0;
|
t->calls = 0;
|
||||||
t->expire = TICK_ETERNITY;
|
t->expire = TICK_ETERNITY;
|
||||||
|
@ -67,7 +67,7 @@ struct appctx {
|
|||||||
if the command is terminated or the session released */
|
if the command is terminated or the session released */
|
||||||
int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
|
int cli_severity_output; /* used within the cli_io_handler to format severity output of informational feedback */
|
||||||
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
|
struct buffer_wait buffer_wait; /* position in the list of objects waiting for a buffer */
|
||||||
unsigned long process_mask; /* mask of thread IDs authorized to process the applet */
|
unsigned long thread_mask; /* mask of thread IDs authorized to process the applet */
|
||||||
|
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
|
@ -94,7 +94,7 @@ enum fd_states {
|
|||||||
struct fdtab {
|
struct fdtab {
|
||||||
void (*iocb)(int fd); /* I/O handler */
|
void (*iocb)(int fd); /* I/O handler */
|
||||||
void *owner; /* the connection or listener associated with this fd, NULL if closed */
|
void *owner; /* the connection or listener associated with this fd, NULL if closed */
|
||||||
unsigned long process_mask; /* mask of thread IDs authorized to process the task */
|
unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
|
||||||
#ifdef USE_THREAD
|
#ifdef USE_THREAD
|
||||||
HA_SPINLOCK_T lock;
|
HA_SPINLOCK_T lock;
|
||||||
#endif
|
#endif
|
||||||
|
@ -72,7 +72,7 @@ struct task {
|
|||||||
void *context; /* the task's context */
|
void *context; /* the task's context */
|
||||||
struct eb32_node wq; /* ebtree node used to hold the task in the wait queue */
|
struct eb32_node wq; /* ebtree node used to hold the task in the wait queue */
|
||||||
int expire; /* next expiration date for this task, in ticks */
|
int expire; /* next expiration date for this task, in ticks */
|
||||||
unsigned long process_mask; /* mask of thread IDs authorized to process the task */
|
unsigned long thread_mask; /* mask of thread IDs authorized to process the task */
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -43,7 +43,7 @@ void applet_run_active()
|
|||||||
curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq);
|
curr = LIST_NEXT(&applet_active_queue, typeof(curr), runq);
|
||||||
while (&curr->runq != &applet_active_queue) {
|
while (&curr->runq != &applet_active_queue) {
|
||||||
next = LIST_NEXT(&curr->runq, typeof(next), runq);
|
next = LIST_NEXT(&curr->runq, typeof(next), runq);
|
||||||
if (curr->process_mask & tid_bit) {
|
if (curr->thread_mask & tid_bit) {
|
||||||
LIST_DEL(&curr->runq);
|
LIST_DEL(&curr->runq);
|
||||||
curr->state = APPLET_RUNNING;
|
curr->state = APPLET_RUNNING;
|
||||||
LIST_ADDQ(&applet_cur_queue, &curr->runq);
|
LIST_ADDQ(&applet_cur_queue, &curr->runq);
|
||||||
|
@ -136,7 +136,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
|||||||
unsigned int e = epoll_events[count].events;
|
unsigned int e = epoll_events[count].events;
|
||||||
fd = epoll_events[count].data.fd;
|
fd = epoll_events[count].data.fd;
|
||||||
|
|
||||||
if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
|
if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* it looks complicated but gcc can optimize it away when constants
|
/* it looks complicated but gcc can optimize it away when constants
|
||||||
|
@ -122,7 +122,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
|||||||
unsigned int n = 0;
|
unsigned int n = 0;
|
||||||
fd = kev[count].ident;
|
fd = kev[count].ident;
|
||||||
|
|
||||||
if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
|
if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (kev[count].filter == EVFILT_READ) {
|
if (kev[count].filter == EVFILT_READ) {
|
||||||
|
@ -112,7 +112,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
|||||||
|
|
||||||
for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) {
|
for (count = 0, fd = fds * 8*sizeof(**fd_evts); count < 8*sizeof(**fd_evts) && fd < maxfd; count++, fd++) {
|
||||||
|
|
||||||
if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
|
if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
sr = (rn >> count) & 1;
|
sr = (rn >> count) & 1;
|
||||||
|
@ -141,7 +141,7 @@ REGPRM2 static void _do_poll(struct poller *p, int exp)
|
|||||||
/* if we specify read first, the accepts and zero reads will be
|
/* if we specify read first, the accepts and zero reads will be
|
||||||
* seen first. Moreover, system buffers will be flushed faster.
|
* seen first. Moreover, system buffers will be flushed faster.
|
||||||
*/
|
*/
|
||||||
if (!fdtab[fd].owner || !(fdtab[fd].process_mask & tid_bit))
|
if (!fdtab[fd].owner || !(fdtab[fd].thread_mask & tid_bit))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (FD_ISSET(fd, tmp_evts[DIR_RD]))
|
if (FD_ISSET(fd, tmp_evts[DIR_RD]))
|
||||||
|
4
src/fd.c
4
src/fd.c
@ -202,7 +202,7 @@ static void fd_dodelete(int fd, int do_close)
|
|||||||
fdtab[fd].owner = NULL;
|
fdtab[fd].owner = NULL;
|
||||||
fdtab[fd].updated = 0;
|
fdtab[fd].updated = 0;
|
||||||
fdtab[fd].new = 0;
|
fdtab[fd].new = 0;
|
||||||
fdtab[fd].process_mask = 0;
|
fdtab[fd].thread_mask = 0;
|
||||||
if (do_close)
|
if (do_close)
|
||||||
close(fd);
|
close(fd);
|
||||||
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
SPIN_UNLOCK(FD_LOCK, &fdtab[fd].lock);
|
||||||
@ -245,7 +245,7 @@ void fd_process_cached_events()
|
|||||||
for (entry = 0; entry < fd_cache_num; ) {
|
for (entry = 0; entry < fd_cache_num; ) {
|
||||||
fd = fd_cache[entry];
|
fd = fd_cache[entry];
|
||||||
|
|
||||||
if (!(fdtab[fd].process_mask & tid_bit))
|
if (!(fdtab[fd].thread_mask & tid_bit))
|
||||||
goto next;
|
goto next;
|
||||||
if (SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
|
if (SPIN_TRYLOCK(FD_LOCK, &fdtab[fd].lock))
|
||||||
goto next;
|
goto next;
|
||||||
|
@ -229,7 +229,7 @@ void process_runnable_tasks()
|
|||||||
while (local_tasks_count < 16) {
|
while (local_tasks_count < 16) {
|
||||||
t = eb32_entry(rq_next, struct task, rq);
|
t = eb32_entry(rq_next, struct task, rq);
|
||||||
rq_next = eb32_next(rq_next);
|
rq_next = eb32_next(rq_next);
|
||||||
if (t->process_mask & tid_bit) {
|
if (t->thread_mask & tid_bit) {
|
||||||
/* detach the task from the queue */
|
/* detach the task from the queue */
|
||||||
__task_unlink_rq(t);
|
__task_unlink_rq(t);
|
||||||
t->state |= TASK_RUNNING;
|
t->state |= TASK_RUNNING;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user