mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-06 15:17:01 +02:00
CLEANUP: atomic/tree-wide: replace single increments/decrements with inc/dec
This patch replaces roughly all occurrences of an HA_ATOMIC_ADD(&foo, 1) or HA_ATOMIC_SUB(&foo, 1) with the equivalent HA_ATOMIC_INC(&foo) and HA_ATOMIC_DEC(&foo) respectively. These are 507 changes over 45 files.
This commit is contained in:
parent
22d675cb77
commit
4781b1521a
@ -78,7 +78,7 @@ static inline struct appctx *appctx_new(struct applet *applet, unsigned long thr
|
||||
LIST_INIT(&appctx->buffer_wait.list);
|
||||
appctx->buffer_wait.target = appctx;
|
||||
appctx->buffer_wait.wakeup_cb = appctx_buf_available;
|
||||
_HA_ATOMIC_ADD(&nb_applets, 1);
|
||||
_HA_ATOMIC_INC(&nb_applets);
|
||||
}
|
||||
return appctx;
|
||||
}
|
||||
@ -91,7 +91,7 @@ static inline void __appctx_free(struct appctx *appctx)
|
||||
LIST_DEL_INIT(&appctx->buffer_wait.list);
|
||||
|
||||
pool_free(pool_head_appctx, appctx);
|
||||
_HA_ATOMIC_SUB(&nb_applets, 1);
|
||||
_HA_ATOMIC_DEC(&nb_applets);
|
||||
}
|
||||
|
||||
static inline void appctx_free(struct appctx *appctx)
|
||||
|
@ -130,7 +130,7 @@ struct mem_stats {
|
||||
}; \
|
||||
__asm__(".globl __start_mem_stats"); \
|
||||
__asm__(".globl __stop_mem_stats"); \
|
||||
_HA_ATOMIC_ADD(&_.calls, 1); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __x * __y); \
|
||||
calloc(__x,__y); \
|
||||
})
|
||||
@ -148,7 +148,7 @@ struct mem_stats {
|
||||
__asm__(".globl __start_mem_stats"); \
|
||||
__asm__(".globl __stop_mem_stats"); \
|
||||
if (__x) \
|
||||
_HA_ATOMIC_ADD(&_.calls, 1); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
free(__x); \
|
||||
})
|
||||
|
||||
@ -165,7 +165,7 @@ struct mem_stats {
|
||||
HA_LINK_ERROR(call_to_ha_free_attempts_to_free_a_constant); \
|
||||
} \
|
||||
if (*__x) \
|
||||
_HA_ATOMIC_ADD(&_.calls, 1); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
free(*__x); \
|
||||
*__x = NULL; \
|
||||
})
|
||||
@ -179,7 +179,7 @@ struct mem_stats {
|
||||
}; \
|
||||
__asm__(".globl __start_mem_stats"); \
|
||||
__asm__(".globl __stop_mem_stats"); \
|
||||
_HA_ATOMIC_ADD(&_.calls, 1); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __x); \
|
||||
malloc(__x); \
|
||||
})
|
||||
@ -193,7 +193,7 @@ struct mem_stats {
|
||||
}; \
|
||||
__asm__(".globl __start_mem_stats"); \
|
||||
__asm__(".globl __stop_mem_stats"); \
|
||||
_HA_ATOMIC_ADD(&_.calls, 1); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __y); \
|
||||
realloc(__x,__y); \
|
||||
})
|
||||
@ -207,7 +207,7 @@ struct mem_stats {
|
||||
}; \
|
||||
__asm__(".globl __start_mem_stats"); \
|
||||
__asm__(".globl __stop_mem_stats"); \
|
||||
_HA_ATOMIC_ADD(&_.calls, 1); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __y); \
|
||||
strdup(__x); \
|
||||
})
|
||||
|
@ -448,7 +448,7 @@ static inline void fd_insert(int fd, void *owner, void (*iocb)(int fd), unsigned
|
||||
|
||||
/* the two directions are ready until proven otherwise */
|
||||
fd_may_both(fd);
|
||||
_HA_ATOMIC_ADD(&ha_used_fds, 1);
|
||||
_HA_ATOMIC_INC(&ha_used_fds);
|
||||
}
|
||||
|
||||
/* Computes the bounded poll() timeout based on the next expiration timer <next>
|
||||
|
@ -163,8 +163,8 @@ static inline void *__pool_get_first(struct pool_head *pool)
|
||||
|
||||
static inline void __pool_free(struct pool_head *pool, void *ptr)
|
||||
{
|
||||
_HA_ATOMIC_SUB(&pool->used, 1);
|
||||
_HA_ATOMIC_SUB(&pool->allocated, 1);
|
||||
_HA_ATOMIC_DEC(&pool->used);
|
||||
_HA_ATOMIC_DEC(&pool->allocated);
|
||||
pool_free_area(ptr, pool->size + POOL_EXTRA);
|
||||
}
|
||||
|
||||
@ -194,7 +194,7 @@ static inline void *__pool_get_first(struct pool_head *pool)
|
||||
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
|
||||
__ha_barrier_atomic_store();
|
||||
|
||||
_HA_ATOMIC_ADD(&pool->used, 1);
|
||||
_HA_ATOMIC_INC(&pool->used);
|
||||
#ifdef DEBUG_MEMORY_POOLS
|
||||
/* keep track of where the element was allocated from */
|
||||
*POOL_LINK(pool, cmp.free_list) = (void *)pool;
|
||||
@ -210,11 +210,11 @@ static inline void __pool_free(struct pool_head *pool, void *ptr)
|
||||
{
|
||||
void **free_list = pool->free_list;
|
||||
|
||||
_HA_ATOMIC_SUB(&pool->used, 1);
|
||||
_HA_ATOMIC_DEC(&pool->used);
|
||||
|
||||
if (unlikely(pool_is_crowded(pool))) {
|
||||
pool_free_area(ptr, pool->size + POOL_EXTRA);
|
||||
_HA_ATOMIC_SUB(&pool->allocated, 1);
|
||||
_HA_ATOMIC_DEC(&pool->allocated);
|
||||
} else {
|
||||
do {
|
||||
*POOL_LINK(pool, ptr) = (void *)free_list;
|
||||
|
@ -122,9 +122,9 @@ static inline void proxy_reset_timeouts(struct proxy *proxy)
|
||||
/* increase the number of cumulated connections received on the designated frontend */
|
||||
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
_HA_ATOMIC_ADD(&fe->fe_counters.cum_conn, 1);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.cum_conn);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_ADD(&l->counters->cum_conn, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->cum_conn);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
|
||||
update_freq_ctr(&fe->fe_conn_per_sec, 1));
|
||||
}
|
||||
@ -133,9 +133,9 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
|
||||
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
|
||||
_HA_ATOMIC_ADD(&fe->fe_counters.cum_sess, 1);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.cum_sess);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_ADD(&l->counters->cum_sess, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->cum_sess);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
|
||||
update_freq_ctr(&fe->fe_sess_per_sec, 1));
|
||||
}
|
||||
@ -143,7 +143,7 @@ static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
|
||||
/* increase the number of cumulated connections on the designated backend */
|
||||
static inline void proxy_inc_be_ctr(struct proxy *be)
|
||||
{
|
||||
_HA_ATOMIC_ADD(&be->be_counters.cum_conn, 1);
|
||||
_HA_ATOMIC_INC(&be->be_counters.cum_conn);
|
||||
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
|
||||
update_freq_ctr(&be->be_sess_per_sec, 1));
|
||||
}
|
||||
@ -151,9 +151,9 @@ static inline void proxy_inc_be_ctr(struct proxy *be)
|
||||
/* increase the number of cumulated requests on the designated frontend */
|
||||
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe)
|
||||
{
|
||||
_HA_ATOMIC_ADD(&fe->fe_counters.p.http.cum_req, 1);
|
||||
_HA_ATOMIC_INC(&fe->fe_counters.p.http.cum_req);
|
||||
if (l && l->counters)
|
||||
_HA_ATOMIC_ADD(&l->counters->p.http.cum_req, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->p.http.cum_req);
|
||||
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
|
||||
update_freq_ctr(&fe->fe_req_per_sec, 1));
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ void srv_set_dyncookie(struct server *s);
|
||||
/* increase the number of cumulated connections on the designated server */
|
||||
static inline void srv_inc_sess_ctr(struct server *s)
|
||||
{
|
||||
_HA_ATOMIC_ADD(&s->counters.cum_sess, 1);
|
||||
_HA_ATOMIC_INC(&s->counters.cum_sess);
|
||||
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
|
||||
update_freq_ctr(&s->sess_per_sec, 1));
|
||||
}
|
||||
@ -277,15 +277,15 @@ static inline void srv_release_conn(struct server *srv, struct connection *conn)
|
||||
/* The connection is currently in the server's idle list, so tell it
|
||||
* there's one less connection available in that list.
|
||||
*/
|
||||
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
|
||||
_HA_ATOMIC_SUB(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb, 1);
|
||||
_HA_ATOMIC_SUB(&srv->curr_idle_thr[tid], 1);
|
||||
_HA_ATOMIC_DEC(&srv->curr_idle_conns);
|
||||
_HA_ATOMIC_DEC(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb);
|
||||
_HA_ATOMIC_DEC(&srv->curr_idle_thr[tid]);
|
||||
}
|
||||
else {
|
||||
/* The connection is not private and not in any server's idle
|
||||
* list, so decrement the current number of used connections
|
||||
*/
|
||||
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
|
||||
_HA_ATOMIC_DEC(&srv->curr_used_conns);
|
||||
}
|
||||
|
||||
/* Remove the connection from any tree (safe, idle or available) */
|
||||
@ -320,10 +320,10 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
|
||||
|
||||
retadd = _HA_ATOMIC_ADD_FETCH(&srv->curr_idle_conns, 1);
|
||||
if (retadd > srv->max_idle_conns) {
|
||||
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
|
||||
_HA_ATOMIC_DEC(&srv->curr_idle_conns);
|
||||
return 0;
|
||||
}
|
||||
_HA_ATOMIC_SUB(&srv->curr_used_conns, 1);
|
||||
_HA_ATOMIC_DEC(&srv->curr_used_conns);
|
||||
|
||||
HA_SPIN_LOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
conn_delete_from_tree(&conn->hash_node->node);
|
||||
@ -331,14 +331,14 @@ static inline int srv_add_to_idle_list(struct server *srv, struct connection *co
|
||||
if (is_safe) {
|
||||
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_SAFE_LIST;
|
||||
ebmb_insert(&srv->per_thr[tid].safe_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
|
||||
_HA_ATOMIC_ADD(&srv->curr_safe_nb, 1);
|
||||
_HA_ATOMIC_INC(&srv->curr_safe_nb);
|
||||
} else {
|
||||
conn->flags = (conn->flags & ~CO_FL_LIST_MASK) | CO_FL_IDLE_LIST;
|
||||
ebmb_insert(&srv->per_thr[tid].idle_conns, &conn->hash_node->node, sizeof(conn->hash_node->hash));
|
||||
_HA_ATOMIC_ADD(&srv->curr_idle_nb, 1);
|
||||
_HA_ATOMIC_INC(&srv->curr_idle_nb);
|
||||
}
|
||||
HA_SPIN_UNLOCK(IDLE_CONNS_LOCK, &idle_conns[tid].idle_conns_lock);
|
||||
_HA_ATOMIC_ADD(&srv->curr_idle_thr[tid], 1);
|
||||
_HA_ATOMIC_INC(&srv->curr_idle_thr[tid]);
|
||||
|
||||
__ha_barrier_full();
|
||||
if ((volatile void *)srv->idle_node.node.leaf_p == NULL) {
|
||||
|
@ -72,7 +72,7 @@ static inline ssize_t sink_write(struct sink *sink, const struct ist msg[], size
|
||||
|
||||
fail:
|
||||
if (unlikely(sent <= 0))
|
||||
HA_ATOMIC_ADD(&sink->ctx.dropped, 1);
|
||||
HA_ATOMIC_INC(&sink->ctx.dropped);
|
||||
|
||||
return sent;
|
||||
}
|
||||
|
@ -345,8 +345,8 @@ static inline void stream_choose_redispatch(struct stream *s)
|
||||
si->state = SI_ST_REQ;
|
||||
} else {
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.retries, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.retries, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.retries);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.retries);
|
||||
si->state = SI_ST_ASS;
|
||||
}
|
||||
|
||||
|
@ -333,12 +333,12 @@ static inline struct task *task_unlink_rq(struct task *t)
|
||||
if (done) {
|
||||
if (is_global) {
|
||||
_HA_ATOMIC_AND(&t->state, ~TASK_GLOBAL);
|
||||
_HA_ATOMIC_SUB(&grq_total, 1);
|
||||
_HA_ATOMIC_DEC(&grq_total);
|
||||
}
|
||||
else
|
||||
_HA_ATOMIC_SUB(&sched->rq_total, 1);
|
||||
_HA_ATOMIC_DEC(&sched->rq_total);
|
||||
if (t->nice)
|
||||
_HA_ATOMIC_SUB(&niced_tasks, 1);
|
||||
_HA_ATOMIC_DEC(&niced_tasks);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
@ -405,7 +405,7 @@ static inline void tasklet_remove_from_tasklet_list(struct tasklet *t)
|
||||
{
|
||||
if (MT_LIST_DEL((struct mt_list *)&t->list)) {
|
||||
_HA_ATOMIC_AND(&t->state, ~TASK_IN_LIST);
|
||||
_HA_ATOMIC_SUB(&task_per_thread[t->tid >= 0 ? t->tid : tid].rq_total, 1);
|
||||
_HA_ATOMIC_DEC(&task_per_thread[t->tid >= 0 ? t->tid : tid].rq_total);
|
||||
}
|
||||
}
|
||||
|
||||
@ -532,7 +532,7 @@ static inline void task_destroy(struct task *t)
|
||||
static inline void tasklet_free(struct tasklet *tl)
|
||||
{
|
||||
if (MT_LIST_DEL((struct mt_list *)&tl->list))
|
||||
_HA_ATOMIC_SUB(&task_per_thread[tl->tid >= 0 ? tl->tid : tid].rq_total, 1);
|
||||
_HA_ATOMIC_DEC(&task_per_thread[tl->tid >= 0 ? tl->tid : tid].rq_total);
|
||||
|
||||
#ifdef DEBUG_TASK
|
||||
if ((unsigned int)tl->debug.caller_idx > 1)
|
||||
|
@ -559,7 +559,7 @@ static inline void __ha_rwlock_wrlock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
__RWLOCK_WRLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
l->info.cur_writer = tid_bit;
|
||||
l->info.last_location.function = func;
|
||||
@ -588,7 +588,7 @@ static inline int __ha_rwlock_trywrlock(enum lock_label lbl, struct ha_rwlock *l
|
||||
HA_ATOMIC_AND(&l->info.wait_writers, ~tid_bit);
|
||||
return r;
|
||||
}
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
l->info.cur_writer = tid_bit;
|
||||
l->info.last_location.function = func;
|
||||
@ -615,7 +615,7 @@ static inline void __ha_rwlock_wrunlock(enum lock_label lbl,struct ha_rwlock *l,
|
||||
|
||||
__RWLOCK_WRUNLOCK(&l->lock);
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
|
||||
}
|
||||
|
||||
static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
|
||||
@ -630,7 +630,7 @@ static inline void __ha_rwlock_rdlock(enum lock_label lbl,struct ha_rwlock *l)
|
||||
start_time = nsec_now();
|
||||
__RWLOCK_RDLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
||||
|
||||
@ -648,7 +648,7 @@ static inline int __ha_rwlock_tryrdlock(enum lock_label lbl,struct ha_rwlock *l)
|
||||
r = __RWLOCK_TRYRDLOCK(&l->lock);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
||||
|
||||
@ -666,7 +666,7 @@ static inline void __ha_rwlock_rdunlock(enum lock_label lbl,struct ha_rwlock *l)
|
||||
|
||||
__RWLOCK_RDUNLOCK(&l->lock);
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_unlocked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_unlocked);
|
||||
}
|
||||
|
||||
static inline void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
|
||||
@ -686,7 +686,7 @@ static inline void __ha_rwlock_wrtord(enum lock_label lbl, struct ha_rwlock *l,
|
||||
__RWLOCK_WRTORD(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
||||
HA_ATOMIC_AND(&l->info.cur_writer, ~tid_bit);
|
||||
@ -714,7 +714,7 @@ static inline void __ha_rwlock_wrtosk(enum lock_label lbl, struct ha_rwlock *l,
|
||||
__RWLOCK_WRTOSK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_seek_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
|
||||
HA_ATOMIC_AND(&l->info.cur_writer, ~tid_bit);
|
||||
@ -739,7 +739,7 @@ static inline void __ha_rwlock_sklock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
__RWLOCK_SKLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_seek, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_seek_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
|
||||
l->info.last_location.function = func;
|
||||
@ -766,7 +766,7 @@ static inline void __ha_rwlock_sktowr(enum lock_label lbl, struct ha_rwlock *l,
|
||||
__RWLOCK_SKTOWR(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_writer, tid_bit);
|
||||
HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
|
||||
@ -794,7 +794,7 @@ static inline void __ha_rwlock_sktord(enum lock_label lbl, struct ha_rwlock *l,
|
||||
__RWLOCK_SKTORD(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_read, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_read_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_read_locked);
|
||||
|
||||
HA_ATOMIC_OR(&l->info.cur_readers, tid_bit);
|
||||
HA_ATOMIC_AND(&l->info.cur_seeker, ~tid_bit);
|
||||
@ -818,7 +818,7 @@ static inline void __ha_rwlock_skunlock(enum lock_label lbl,struct ha_rwlock *l,
|
||||
|
||||
__RWLOCK_SKUNLOCK(&l->lock);
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_seek_unlocked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_unlocked);
|
||||
}
|
||||
|
||||
static inline int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l,
|
||||
@ -838,7 +838,7 @@ static inline int __ha_rwlock_trysklock(enum lock_label lbl, struct ha_rwlock *l
|
||||
|
||||
if (likely(!r)) {
|
||||
/* got the lock ! */
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_seek_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
|
||||
HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
|
||||
l->info.last_location.function = func;
|
||||
l->info.last_location.file = file;
|
||||
@ -869,7 +869,7 @@ static inline int __ha_rwlock_tryrdtosk(enum lock_label lbl, struct ha_rwlock *l
|
||||
|
||||
if (likely(!r)) {
|
||||
/* got the lock ! */
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_seek_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_seek_locked);
|
||||
HA_ATOMIC_OR(&l->info.cur_seeker, tid_bit);
|
||||
HA_ATOMIC_AND(&l->info.cur_readers, ~tid_bit);
|
||||
l->info.last_location.function = func;
|
||||
@ -909,7 +909,7 @@ static inline void __spin_lock(enum lock_label lbl, struct ha_spinlock *l,
|
||||
__SPIN_LOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].nsec_wait_for_write, (nsec_now() - start_time));
|
||||
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
|
||||
l->info.owner = tid_bit;
|
||||
@ -934,7 +934,7 @@ static inline int __spin_trylock(enum lock_label lbl, struct ha_spinlock *l,
|
||||
r = __SPIN_TRYLOCK(&l->lock);
|
||||
if (unlikely(r))
|
||||
return r;
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_locked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_locked);
|
||||
|
||||
l->info.owner = tid_bit;
|
||||
l->info.last_location.function = func;
|
||||
@ -958,7 +958,7 @@ static inline void __spin_unlock(enum lock_label lbl, struct ha_spinlock *l,
|
||||
l->info.last_location.line = line;
|
||||
|
||||
__SPIN_UNLOCK(&l->lock);
|
||||
HA_ATOMIC_ADD(&lock_stats[lbl].num_write_unlocked, 1);
|
||||
HA_ATOMIC_INC(&lock_stats[lbl].num_write_unlocked);
|
||||
}
|
||||
|
||||
#endif /* DEBUG_THREAD */
|
||||
|
@ -782,8 +782,8 @@ int assign_server(struct stream *s)
|
||||
goto out;
|
||||
}
|
||||
else if (srv != prev_srv) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cum_lbconn, 1);
|
||||
_HA_ATOMIC_ADD(&srv->counters.cum_lbconn, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cum_lbconn);
|
||||
_HA_ATOMIC_INC(&srv->counters.cum_lbconn);
|
||||
}
|
||||
s->target = &srv->obj_type;
|
||||
}
|
||||
@ -963,11 +963,11 @@ int assign_server_and_queue(struct stream *s)
|
||||
s->txn->flags |= TX_CK_DOWN;
|
||||
}
|
||||
s->flags |= SF_REDISP;
|
||||
_HA_ATOMIC_ADD(&prev_srv->counters.redispatches, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.redispatches, 1);
|
||||
_HA_ATOMIC_INC(&prev_srv->counters.redispatches);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.redispatches);
|
||||
} else {
|
||||
_HA_ATOMIC_ADD(&prev_srv->counters.retries, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.retries, 1);
|
||||
_HA_ATOMIC_INC(&prev_srv->counters.retries);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.retries);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1187,7 +1187,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
|
||||
while (conn) {
|
||||
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
|
||||
conn_delete_from_tree(&conn->hash_node->node);
|
||||
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
|
||||
_HA_ATOMIC_INC(&activity[tid].fd_takeover);
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -1200,7 +1200,7 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
|
||||
while (conn) {
|
||||
if (conn->mux->takeover && conn->mux->takeover(conn, i) == 0) {
|
||||
conn_delete_from_tree(&conn->hash_node->node);
|
||||
_HA_ATOMIC_ADD(&activity[tid].fd_takeover, 1);
|
||||
_HA_ATOMIC_INC(&activity[tid].fd_takeover);
|
||||
found = 1;
|
||||
is_safe = 1;
|
||||
break;
|
||||
@ -1220,9 +1220,9 @@ static struct connection *conn_backend_get(struct stream *s, struct server *srv,
|
||||
|
||||
srv_use_conn(srv, conn);
|
||||
|
||||
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
|
||||
_HA_ATOMIC_SUB(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb, 1);
|
||||
_HA_ATOMIC_SUB(&srv->curr_idle_thr[i], 1);
|
||||
_HA_ATOMIC_DEC(&srv->curr_idle_conns);
|
||||
_HA_ATOMIC_DEC(conn->flags & CO_FL_SAFE_LIST ? &srv->curr_safe_nb : &srv->curr_idle_nb);
|
||||
_HA_ATOMIC_DEC(&srv->curr_idle_thr[i]);
|
||||
conn->flags &= ~CO_FL_LIST_MASK;
|
||||
__ha_barrier_atomic_store();
|
||||
|
||||
@ -1617,13 +1617,13 @@ int connect_server(struct stream *s)
|
||||
s->si[1].flags |= SI_FL_NOLINGER;
|
||||
|
||||
if (s->flags & SF_SRV_REUSED) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.reuse, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.reuse);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.reuse, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.reuse);
|
||||
} else {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.connect, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.connect);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.connect, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.connect);
|
||||
}
|
||||
|
||||
err = si_connect(&s->si[1], srv_conn);
|
||||
@ -1800,8 +1800,8 @@ int srv_redispatch_connect(struct stream *s)
|
||||
s->si[1].err_type = SI_ET_QUEUE_ERR;
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.failed_conns);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
|
||||
return 1;
|
||||
|
||||
case SRV_STATUS_NOSRV:
|
||||
@ -1810,7 +1810,7 @@ int srv_redispatch_connect(struct stream *s)
|
||||
s->si[1].err_type = SI_ET_CONN_ERR;
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
|
||||
return 1;
|
||||
|
||||
case SRV_STATUS_QUEUED:
|
||||
@ -1830,8 +1830,8 @@ int srv_redispatch_connect(struct stream *s)
|
||||
if (srv)
|
||||
srv_set_sess_last(srv);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.failed_conns);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
|
||||
|
||||
/* release other streams waiting for this server */
|
||||
if (may_dequeue_tasks(srv, s->be))
|
||||
@ -1905,8 +1905,8 @@ void back_try_conn_req(struct stream *s)
|
||||
if (srv)
|
||||
srv_set_sess_last(srv);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.failed_conns);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
|
||||
|
||||
/* release other streams waiting for this server */
|
||||
sess_change_server(s, NULL);
|
||||
@ -1972,8 +1972,8 @@ void back_try_conn_req(struct stream *s)
|
||||
pendconn_cond_unlink(s->pend_pos);
|
||||
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.failed_conns, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.failed_conns);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
|
||||
si_shutr(si);
|
||||
si_shutw(si);
|
||||
req->flags |= CF_WRITE_TIMEOUT;
|
||||
@ -2201,7 +2201,7 @@ void back_handle_st_cer(struct stream *s)
|
||||
|
||||
if (s->flags & SF_CURR_SESS) {
|
||||
s->flags &= ~SF_CURR_SESS;
|
||||
_HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
|
||||
_HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
|
||||
}
|
||||
|
||||
if ((si->flags & SI_FL_ERR) &&
|
||||
@ -2235,8 +2235,8 @@ void back_handle_st_cer(struct stream *s)
|
||||
}
|
||||
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_conns, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_conns, 1);
|
||||
_HA_ATOMIC_INC(&objt_server(s->target)->counters.failed_conns);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_conns);
|
||||
sess_change_server(s, NULL);
|
||||
if (may_dequeue_tasks(objt_server(s->target), s->be))
|
||||
process_srv_queue(objt_server(s->target));
|
||||
@ -2362,7 +2362,7 @@ void back_handle_st_rdy(struct stream *s)
|
||||
void set_backend_down(struct proxy *be)
|
||||
{
|
||||
be->last_change = now.tv_sec;
|
||||
_HA_ATOMIC_ADD(&be->down_trans, 1);
|
||||
_HA_ATOMIC_INC(&be->down_trans);
|
||||
|
||||
if (!(global.mode & MODE_STARTING)) {
|
||||
ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
|
||||
|
@ -1768,9 +1768,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
|
||||
return ACT_RET_CONT;
|
||||
|
||||
if (px == strm_fe(s))
|
||||
_HA_ATOMIC_ADD(&px->fe_counters.p.http.cache_lookups, 1);
|
||||
_HA_ATOMIC_INC(&px->fe_counters.p.http.cache_lookups);
|
||||
else
|
||||
_HA_ATOMIC_ADD(&px->be_counters.p.http.cache_lookups, 1);
|
||||
_HA_ATOMIC_INC(&px->be_counters.p.http.cache_lookups);
|
||||
|
||||
shctx_lock(shctx_ptr(cache));
|
||||
res = entry_exist(cache, s->txn->cache_hash);
|
||||
@ -1822,9 +1822,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
|
||||
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
|
||||
|
||||
if (px == strm_fe(s))
|
||||
_HA_ATOMIC_ADD(&px->fe_counters.p.http.cache_hits, 1);
|
||||
_HA_ATOMIC_INC(&px->fe_counters.p.http.cache_hits);
|
||||
else
|
||||
_HA_ATOMIC_ADD(&px->be_counters.p.http.cache_hits, 1);
|
||||
_HA_ATOMIC_INC(&px->be_counters.p.http.cache_hits);
|
||||
return ACT_RET_CONT;
|
||||
} else {
|
||||
shctx_lock(shctx_ptr(cache));
|
||||
|
@ -269,7 +269,7 @@ void set_server_check_status(struct check *check, short status, const char *desc
|
||||
if ((!(check->state & CHK_ST_AGENT) ||
|
||||
(check->status >= HCHK_STATUS_L57DATA)) &&
|
||||
(check->health > 0)) {
|
||||
_HA_ATOMIC_ADD(&s->counters.failed_checks, 1);
|
||||
_HA_ATOMIC_INC(&s->counters.failed_checks);
|
||||
report = 1;
|
||||
check->health--;
|
||||
if (check->health < check->rise)
|
||||
@ -436,7 +436,7 @@ void __health_adjust(struct server *s, short status)
|
||||
return;
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&s->consecutive_errors, 1);
|
||||
_HA_ATOMIC_INC(&s->consecutive_errors);
|
||||
|
||||
if (s->consecutive_errors < s->consecutive_errors_limit)
|
||||
return;
|
||||
@ -486,7 +486,7 @@ void __health_adjust(struct server *s, short status)
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
|
||||
|
||||
s->consecutive_errors = 0;
|
||||
_HA_ATOMIC_ADD(&s->counters.failed_hana, 1);
|
||||
_HA_ATOMIC_INC(&s->counters.failed_hana);
|
||||
|
||||
if (tick_is_lt(expire, s->check.task->expire)) {
|
||||
/* requeue check task with new expire */
|
||||
|
@ -2497,7 +2497,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
*/
|
||||
|
||||
if (s->flags & SF_BE_ASSIGNED) {
|
||||
HA_ATOMIC_SUB(&be->beconn, 1);
|
||||
HA_ATOMIC_DEC(&be->beconn);
|
||||
if (unlikely(s->srv_conn))
|
||||
sess_change_server(s, NULL);
|
||||
}
|
||||
@ -2542,7 +2542,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
if (objt_server(s->target)) {
|
||||
if (s->flags & SF_CURR_SESS) {
|
||||
s->flags &= ~SF_CURR_SESS;
|
||||
HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
|
||||
HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
|
||||
}
|
||||
if (may_dequeue_tasks(__objt_server(s->target), be))
|
||||
process_srv_queue(__objt_server(s->target));
|
||||
@ -2901,7 +2901,7 @@ int mworker_cli_sockpair_new(struct mworker_proc *mworker_proc, int proc)
|
||||
l->accept = session_accept_fd;
|
||||
l->default_target = global.cli_fe->default_target;
|
||||
l->options |= (LI_O_UNLIMITED | LI_O_NOSTOP);
|
||||
HA_ATOMIC_ADD(&unstoppable_jobs, 1);
|
||||
HA_ATOMIC_INC(&unstoppable_jobs);
|
||||
/* it's a sockpair but we don't want to keep the fd in the master */
|
||||
l->rx.flags &= ~RX_F_INHERITED;
|
||||
l->nice = -64; /* we want to boost priority for local stats */
|
||||
|
26
src/debug.c
26
src/debug.c
@ -320,7 +320,7 @@ static int debug_parse_cli_exit(char **args, char *payload, struct appctx *appct
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
exit(code);
|
||||
return 1;
|
||||
}
|
||||
@ -333,7 +333,7 @@ int debug_parse_cli_bug(char **args, char *payload, struct appctx *appctx, void
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
BUG_ON(one > zero);
|
||||
return 1;
|
||||
}
|
||||
@ -356,7 +356,7 @@ static int debug_parse_cli_close(char **args, char *payload, struct appctx *appc
|
||||
if (!fdtab[fd].owner)
|
||||
return cli_msg(appctx, LOG_INFO, "File descriptor was already closed.\n");
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
fd_delete(fd);
|
||||
return 1;
|
||||
}
|
||||
@ -369,7 +369,7 @@ static int debug_parse_cli_delay(char **args, char *payload, struct appctx *appc
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
usleep((long)delay * 1000);
|
||||
return 1;
|
||||
}
|
||||
@ -382,7 +382,7 @@ static int debug_parse_cli_log(char **args, char *payload, struct appctx *appctx
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
chunk_reset(&trash);
|
||||
for (arg = 3; *args[arg]; arg++) {
|
||||
if (arg > 3)
|
||||
@ -403,7 +403,7 @@ static int debug_parse_cli_loop(char **args, char *payload, struct appctx *appct
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
gettimeofday(&curr, NULL);
|
||||
tv_ms_add(&deadline, &curr, loop);
|
||||
|
||||
@ -419,7 +419,7 @@ static int debug_parse_cli_panic(char **args, char *payload, struct appctx *appc
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
ha_panic();
|
||||
return 1;
|
||||
}
|
||||
@ -435,7 +435,7 @@ static int debug_parse_cli_exec(char **args, char *payload, struct appctx *appct
|
||||
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
|
||||
return 1;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
chunk_reset(&trash);
|
||||
for (arg = 3; *args[arg]; arg++) {
|
||||
if (arg > 3)
|
||||
@ -515,7 +515,7 @@ static int debug_parse_cli_hex(char **args, char *payload, struct appctx *appctx
|
||||
if (!start)
|
||||
return cli_err(appctx, "Will not dump from NULL address.\n");
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
|
||||
/* by default, dump ~128 till next block of 16 */
|
||||
len = strtoul(args[4], NULL, 0);
|
||||
@ -546,7 +546,7 @@ static int debug_parse_cli_tkill(char **args, char *payload, struct appctx *appc
|
||||
if (*args[4])
|
||||
sig = atoi(args[4]);
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
if (thr)
|
||||
ha_tkill(thr - 1, sig);
|
||||
else
|
||||
@ -566,7 +566,7 @@ static int debug_parse_cli_write(char **args, char *payload, struct appctx *appc
|
||||
if (len >= trash.size)
|
||||
return cli_err(appctx, "Output too large, must be <tune.bufsize.\n");
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
|
||||
chunk_reset(&trash);
|
||||
trash.data = len;
|
||||
@ -612,7 +612,7 @@ static int debug_parse_cli_stream(char **args, char *payload, struct appctx *app
|
||||
);
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
for (arg = 3; *args[arg]; arg++) {
|
||||
old = 0;
|
||||
end = word = args[arg];
|
||||
@ -809,7 +809,7 @@ static int debug_parse_cli_sched(char **args, char *payload, struct appctx *appc
|
||||
|
||||
mode = strcmp(args[3], "task") == 0;
|
||||
|
||||
_HA_ATOMIC_ADD(&debug_commands_issued, 1);
|
||||
_HA_ATOMIC_INC(&debug_commands_issued);
|
||||
for (arg = 4; *args[arg]; arg++) {
|
||||
end = word = args[arg];
|
||||
while (*end && *end != '=' && *end != '^' && *end != '+' && *end != '-')
|
||||
|
@ -86,7 +86,7 @@ struct dict_entry *dict_insert(struct dict *d, char *s)
|
||||
de = __dict_lookup(d, s);
|
||||
HA_RWLOCK_RDUNLOCK(DICT_LOCK, &d->rwlock);
|
||||
if (de) {
|
||||
HA_ATOMIC_ADD(&de->refcount, 1);
|
||||
HA_ATOMIC_INC(&de->refcount);
|
||||
return de;
|
||||
}
|
||||
|
||||
|
18
src/dns.c
18
src/dns.c
@ -298,7 +298,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
|
||||
*/
|
||||
if (unlikely(ofs == ~0)) {
|
||||
ofs = 0;
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
|
||||
*/
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= buf->size);
|
||||
HA_ATOMIC_SUB(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
||||
|
||||
while (ofs + 1 < b_data(buf)) {
|
||||
int ret;
|
||||
@ -348,7 +348,7 @@ static void dns_resolve_send(struct dgram_conn *dgram)
|
||||
|
||||
out:
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
ns->dgram->ofs_req = ofs;
|
||||
HA_RWLOCK_RDUNLOCK(DNS_LOCK, &ring->lock);
|
||||
@ -464,7 +464,7 @@ static void dns_session_io_handler(struct appctx *appctx)
|
||||
if (unlikely(ofs == ~0)) {
|
||||
ofs = 0;
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
}
|
||||
|
||||
@ -478,7 +478,7 @@ static void dns_session_io_handler(struct appctx *appctx)
|
||||
*/
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= buf->size);
|
||||
HA_ATOMIC_SUB(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
||||
|
||||
ret = 1;
|
||||
while (ofs + 1 < b_data(buf)) {
|
||||
@ -604,7 +604,7 @@ static void dns_session_io_handler(struct appctx *appctx)
|
||||
ofs += cnt + msg_len;
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
ds->ofs = ofs;
|
||||
}
|
||||
@ -1084,7 +1084,7 @@ static struct task *dns_process_req(struct task *t, void *context, unsigned int
|
||||
*/
|
||||
if (unlikely(ofs == ~0)) {
|
||||
ofs = 0;
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
}
|
||||
|
||||
@ -1093,7 +1093,7 @@ static struct task *dns_process_req(struct task *t, void *context, unsigned int
|
||||
*/
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= buf->size);
|
||||
HA_ATOMIC_SUB(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
||||
|
||||
while (ofs + 1 < b_data(buf)) {
|
||||
struct ist myist;
|
||||
@ -1179,7 +1179,7 @@ static struct task *dns_process_req(struct task *t, void *context, unsigned int
|
||||
ofs += cnt + len;
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
dss->ofs_req = ofs;
|
||||
HA_RWLOCK_RDUNLOCK(DNS_LOCK, &ring->lock);
|
||||
|
@ -223,7 +223,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
|
||||
fd = epoll_events[count].data.fd;
|
||||
|
||||
#ifdef DEBUG_FD
|
||||
_HA_ATOMIC_ADD(&fdtab[fd].event_count, 1);
|
||||
_HA_ATOMIC_INC(&fdtab[fd].event_count);
|
||||
#endif
|
||||
if (!fdtab[fd].owner) {
|
||||
activity[tid].poll_dead_fd++;
|
||||
|
@ -217,7 +217,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
|
||||
events = evports_evlist[i].portev_events;
|
||||
|
||||
#ifdef DEBUG_FD
|
||||
_HA_ATOMIC_ADD(&fdtab[fd].event_count, 1);
|
||||
_HA_ATOMIC_INC(&fdtab[fd].event_count);
|
||||
#endif
|
||||
if (fdtab[fd].owner == NULL) {
|
||||
activity[tid].poll_dead_fd++;
|
||||
|
@ -184,7 +184,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
|
||||
fd = kev[count].ident;
|
||||
|
||||
#ifdef DEBUG_FD
|
||||
_HA_ATOMIC_ADD(&fdtab[fd].event_count, 1);
|
||||
_HA_ATOMIC_INC(&fdtab[fd].event_count);
|
||||
#endif
|
||||
if (!fdtab[fd].owner) {
|
||||
activity[tid].poll_dead_fd++;
|
||||
|
@ -220,7 +220,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
|
||||
fd = poll_events[count].fd;
|
||||
|
||||
#ifdef DEBUG_FD
|
||||
_HA_ATOMIC_ADD(&fdtab[fd].event_count, 1);
|
||||
_HA_ATOMIC_INC(&fdtab[fd].event_count);
|
||||
#endif
|
||||
if (!(e & ( POLLOUT | POLLIN | POLLERR | POLLHUP | POLLRDHUP )))
|
||||
continue;
|
||||
|
@ -198,7 +198,7 @@ static void _do_poll(struct poller *p, int exp, int wake)
|
||||
unsigned int n = 0;
|
||||
|
||||
#ifdef DEBUG_FD
|
||||
_HA_ATOMIC_ADD(&fdtab[fd].event_count, 1);
|
||||
_HA_ATOMIC_INC(&fdtab[fd].event_count);
|
||||
#endif
|
||||
if (!fdtab[fd].owner) {
|
||||
activity[tid].poll_dead_fd++;
|
||||
|
@ -474,12 +474,12 @@ static int fcgi_flt_http_headers(struct stream *s, struct filter *filter, struct
|
||||
goto end;
|
||||
|
||||
rewrite_err:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
|
||||
hdr_rule_err:
|
||||
node = ebpt_first(&hdr_rules);
|
||||
while (node) {
|
||||
|
2
src/fd.c
2
src/fd.c
@ -324,7 +324,7 @@ void _fd_delete_orphan(int fd)
|
||||
* of this FD by any other thread.
|
||||
*/
|
||||
close(fd);
|
||||
_HA_ATOMIC_SUB(&ha_used_fds, 1);
|
||||
_HA_ATOMIC_DEC(&ha_used_fds);
|
||||
}
|
||||
|
||||
#ifndef HA_HAVE_CAS_DW
|
||||
|
@ -281,9 +281,9 @@ comp_http_end(struct stream *s, struct filter *filter,
|
||||
goto end;
|
||||
|
||||
if (strm_fe(s)->mode == PR_MODE_HTTP)
|
||||
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.p.http.comp_rsp, 1);
|
||||
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.p.http.comp_rsp);
|
||||
if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.p.http.comp_rsp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.p.http.comp_rsp);
|
||||
end:
|
||||
return 1;
|
||||
}
|
||||
|
@ -1230,7 +1230,7 @@ spoe_release_appctx(struct appctx *appctx)
|
||||
__FUNCTION__, appctx);
|
||||
|
||||
/* Remove applet from the list of running applets */
|
||||
_HA_ATOMIC_SUB(&agent->counters.applets, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.applets);
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &agent->rt[tid].lock);
|
||||
if (!LIST_ISEMPTY(&spoe_appctx->list)) {
|
||||
LIST_DEL(&spoe_appctx->list);
|
||||
@ -1242,7 +1242,7 @@ spoe_release_appctx(struct appctx *appctx)
|
||||
if (appctx->st0 != SPOE_APPCTX_ST_END) {
|
||||
if (appctx->st0 == SPOE_APPCTX_ST_IDLE) {
|
||||
eb32_delete(&spoe_appctx->node);
|
||||
_HA_ATOMIC_SUB(&agent->counters.idles, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.idles);
|
||||
}
|
||||
|
||||
appctx->st0 = SPOE_APPCTX_ST_END;
|
||||
@ -1261,7 +1261,7 @@ spoe_release_appctx(struct appctx *appctx)
|
||||
list_for_each_entry_safe(ctx, back, &spoe_appctx->waiting_queue, list) {
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_waiting, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_waiting);
|
||||
spoe_update_stat_time(&ctx->stats.tv_wait, &ctx->stats.t_waiting);
|
||||
ctx->spoe_appctx = NULL;
|
||||
ctx->state = SPOE_CTX_ST_ERROR;
|
||||
@ -1291,7 +1291,7 @@ spoe_release_appctx(struct appctx *appctx)
|
||||
list_for_each_entry_safe(ctx, back, &agent->rt[tid].sending_queue, list) {
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_sending, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_sending);
|
||||
spoe_update_stat_time(&ctx->stats.tv_queue, &ctx->stats.t_queue);
|
||||
ctx->spoe_appctx = NULL;
|
||||
ctx->state = SPOE_CTX_ST_ERROR;
|
||||
@ -1301,7 +1301,7 @@ spoe_release_appctx(struct appctx *appctx)
|
||||
list_for_each_entry_safe(ctx, back, &agent->rt[tid].waiting_queue, list) {
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_waiting, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_waiting);
|
||||
spoe_update_stat_time(&ctx->stats.tv_wait, &ctx->stats.t_waiting);
|
||||
ctx->spoe_appctx = NULL;
|
||||
ctx->state = SPOE_CTX_ST_ERROR;
|
||||
@ -1432,7 +1432,7 @@ spoe_handle_connecting_appctx(struct appctx *appctx)
|
||||
goto stop;
|
||||
|
||||
default:
|
||||
_HA_ATOMIC_ADD(&agent->counters.idles, 1);
|
||||
_HA_ATOMIC_INC(&agent->counters.idles);
|
||||
appctx->st0 = SPOE_APPCTX_ST_IDLE;
|
||||
SPOE_APPCTX(appctx)->node.key = 0;
|
||||
eb32_insert(&agent->rt[tid].idle_applets, &SPOE_APPCTX(appctx)->node);
|
||||
@ -1502,7 +1502,7 @@ spoe_handle_sending_frame_appctx(struct appctx *appctx, int *skip)
|
||||
spoe_release_buffer(&ctx->buffer, &ctx->buffer_wait);
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_sending, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_sending);
|
||||
spoe_update_stat_time(&ctx->stats.tv_queue, &ctx->stats.t_queue);
|
||||
ctx->spoe_appctx = NULL;
|
||||
ctx->state = SPOE_CTX_ST_ERROR;
|
||||
@ -1522,7 +1522,7 @@ spoe_handle_sending_frame_appctx(struct appctx *appctx, int *skip)
|
||||
spoe_release_buffer(&ctx->buffer, &ctx->buffer_wait);
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_sending, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_sending);
|
||||
spoe_update_stat_time(&ctx->stats.tv_queue, &ctx->stats.t_queue);
|
||||
ctx->spoe_appctx = SPOE_APPCTX(appctx);
|
||||
if (!(ctx->flags & SPOE_CTX_FL_FRAGMENTED) ||
|
||||
@ -1557,7 +1557,7 @@ spoe_handle_sending_frame_appctx(struct appctx *appctx, int *skip)
|
||||
*skip = 1;
|
||||
LIST_ADDQ(&SPOE_APPCTX(appctx)->waiting_queue, &ctx->list);
|
||||
}
|
||||
_HA_ATOMIC_ADD(&agent->counters.nb_waiting, 1);
|
||||
_HA_ATOMIC_INC(&agent->counters.nb_waiting);
|
||||
ctx->stats.tv_wait = now;
|
||||
SPOE_APPCTX(appctx)->frag_ctx.ctx = NULL;
|
||||
SPOE_APPCTX(appctx)->frag_ctx.cursid = 0;
|
||||
@ -1613,7 +1613,7 @@ spoe_handle_receiving_frame_appctx(struct appctx *appctx, int *skip)
|
||||
default:
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_waiting, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_waiting);
|
||||
spoe_update_stat_time(&ctx->stats.tv_wait, &ctx->stats.t_waiting);
|
||||
ctx->stats.tv_response = now;
|
||||
if (ctx->spoe_appctx) {
|
||||
@ -1733,7 +1733,7 @@ spoe_handle_processing_appctx(struct appctx *appctx)
|
||||
appctx->st1 = SPOE_APPCTX_ERR_NONE;
|
||||
goto next;
|
||||
}
|
||||
_HA_ATOMIC_ADD(&agent->counters.idles, 1);
|
||||
_HA_ATOMIC_INC(&agent->counters.idles);
|
||||
appctx->st0 = SPOE_APPCTX_ST_IDLE;
|
||||
eb32_insert(&agent->rt[tid].idle_applets, &SPOE_APPCTX(appctx)->node);
|
||||
}
|
||||
@ -1896,7 +1896,7 @@ spoe_handle_appctx(struct appctx *appctx)
|
||||
goto switchstate;
|
||||
|
||||
case SPOE_APPCTX_ST_IDLE:
|
||||
_HA_ATOMIC_SUB(&agent->counters.idles, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.idles);
|
||||
eb32_delete(&SPOE_APPCTX(appctx)->node);
|
||||
if (stopping &&
|
||||
LIST_ISEMPTY(&agent->rt[tid].sending_queue) &&
|
||||
@ -2010,7 +2010,7 @@ spoe_create_appctx(struct spoe_config *conf)
|
||||
HA_SPIN_LOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
|
||||
LIST_ADDQ(&conf->agent->rt[tid].applets, &SPOE_APPCTX(appctx)->list);
|
||||
HA_SPIN_UNLOCK(SPOE_APPLET_LOCK, &conf->agent->rt[tid].lock);
|
||||
_HA_ATOMIC_ADD(&conf->agent->counters.applets, 1);
|
||||
_HA_ATOMIC_INC(&conf->agent->counters.applets);
|
||||
|
||||
task_wakeup(SPOE_APPCTX(appctx)->task, TASK_WOKEN_INIT);
|
||||
task_wakeup(strm->task, TASK_WOKEN_INIT);
|
||||
@ -2096,7 +2096,7 @@ spoe_queue_context(struct spoe_context *ctx)
|
||||
/* Add the SPOE context in the sending queue if the stream has no applet
|
||||
* already assigned and wakeup all idle applets. Otherwise, don't queue
|
||||
* it. */
|
||||
_HA_ATOMIC_ADD(&agent->counters.nb_sending, 1);
|
||||
_HA_ATOMIC_INC(&agent->counters.nb_sending);
|
||||
spoe_update_stat_time(&ctx->stats.tv_request, &ctx->stats.t_request);
|
||||
ctx->stats.tv_queue = now;
|
||||
if (ctx->spoe_appctx)
|
||||
@ -2593,7 +2593,7 @@ spoe_stop_processing(struct spoe_agent *agent, struct spoe_context *ctx)
|
||||
|
||||
if (!(ctx->flags & SPOE_CTX_FL_PROCESS))
|
||||
return;
|
||||
_HA_ATOMIC_ADD(&agent->counters.nb_processed, 1);
|
||||
_HA_ATOMIC_INC(&agent->counters.nb_processed);
|
||||
if (sa) {
|
||||
if (sa->frag_ctx.ctx == ctx) {
|
||||
sa->frag_ctx.ctx = NULL;
|
||||
@ -2620,9 +2620,9 @@ spoe_stop_processing(struct spoe_agent *agent, struct spoe_context *ctx)
|
||||
|
||||
if (!LIST_ISEMPTY(&ctx->list)) {
|
||||
if (ctx->state == SPOE_CTX_ST_SENDING_MSGS)
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_sending, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_sending);
|
||||
else
|
||||
_HA_ATOMIC_SUB(&agent->counters.nb_waiting, 1);
|
||||
_HA_ATOMIC_DEC(&agent->counters.nb_waiting);
|
||||
|
||||
LIST_DEL(&ctx->list);
|
||||
LIST_INIT(&ctx->list);
|
||||
@ -2727,7 +2727,7 @@ spoe_process_messages(struct stream *s, struct spoe_context *ctx,
|
||||
spoe_update_stats(s, agent, ctx, dir);
|
||||
spoe_stop_processing(agent, ctx);
|
||||
if (ctx->status_code) {
|
||||
_HA_ATOMIC_ADD(&agent->counters.nb_errors, 1);
|
||||
_HA_ATOMIC_INC(&agent->counters.nb_errors);
|
||||
spoe_handle_processing_error(s, agent, ctx, dir);
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -5889,7 +5889,7 @@ __LJMP static int hlua_txn_done(lua_State *L)
|
||||
/* let's log the request time */
|
||||
s->logs.tv_request = now;
|
||||
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
|
||||
_HA_ATOMIC_ADD(&s->sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -121,13 +121,13 @@ static enum act_return http_action_set_req_line(struct act_rule *rule, struct pr
|
||||
goto leave;
|
||||
|
||||
fail_rewrite:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
|
||||
|
||||
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
|
||||
ret = ACT_RET_ERR;
|
||||
@ -250,13 +250,13 @@ static enum act_return http_action_replace_uri(struct act_rule *rule, struct pro
|
||||
goto leave;
|
||||
|
||||
fail_rewrite:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
|
||||
|
||||
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
|
||||
ret = ACT_RET_ERR;
|
||||
@ -326,13 +326,13 @@ static enum act_return action_http_set_status(struct act_rule *rule, struct prox
|
||||
struct session *sess, struct stream *s, int flags)
|
||||
{
|
||||
if (http_res_set_status(rule->arg.http.i, rule->arg.http.str, s) == -1) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
|
||||
|
||||
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
@ -403,10 +403,10 @@ static enum act_return http_action_reject(struct act_rule *rule, struct proxy *p
|
||||
s->req.analysers &= AN_REQ_FLT_END;
|
||||
s->res.analysers &= AN_RES_FLT_END;
|
||||
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.denied_req);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_req);
|
||||
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_PRXCOND;
|
||||
@ -964,7 +964,7 @@ static enum act_return http_action_auth(struct act_rule *rule, struct proxy *px,
|
||||
req->analysers &= AN_REQ_FLT_END;
|
||||
|
||||
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
|
||||
_HA_ATOMIC_ADD(&s->sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
|
||||
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_LOCAL;
|
||||
@ -1253,13 +1253,13 @@ static enum act_return http_action_set_header(struct act_rule *rule, struct prox
|
||||
goto leave;
|
||||
|
||||
fail_rewrite:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
|
||||
|
||||
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
|
||||
ret = ACT_RET_ERR;
|
||||
@ -1366,13 +1366,13 @@ static enum act_return http_action_replace_header(struct act_rule *rule, struct
|
||||
goto leave;
|
||||
|
||||
fail_rewrite:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_rewrites);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_rewrites);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_rewrites);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_rewrites, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_rewrites);
|
||||
|
||||
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
|
||||
ret = ACT_RET_ERR;
|
||||
@ -2040,7 +2040,7 @@ static enum act_return http_action_return(struct act_rule *rule, struct proxy *p
|
||||
req->analysers &= AN_REQ_FLT_END;
|
||||
|
||||
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
|
||||
_HA_ATOMIC_ADD(&s->sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
|
||||
}
|
||||
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
|
188
src/http_ana.c
188
src/http_ana.c
@ -212,7 +212,7 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
|
||||
struct acl_cond *cond;
|
||||
|
||||
s->flags |= SF_MONITOR;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
|
||||
|
||||
/* Check if we want to fail this monitor request or not */
|
||||
list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
|
||||
@ -310,16 +310,16 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
|
||||
txn->status = 500;
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
goto return_prx_cond;
|
||||
|
||||
return_bad_req:
|
||||
txn->status = 400;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
/* fall through */
|
||||
|
||||
return_prx_cond:
|
||||
@ -442,7 +442,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
|
||||
/* Proceed with the applets now. */
|
||||
if (unlikely(objt_applet(s->target))) {
|
||||
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
|
||||
|
||||
if (http_handle_expect_hdr(s, htx, msg) == -1)
|
||||
goto return_int_err;
|
||||
@ -524,11 +524,11 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
|
||||
if (!req->analyse_exp)
|
||||
req->analyse_exp = tick_add(now_ms, 0);
|
||||
stream_inc_http_err_ctr(s);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.denied_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_req);
|
||||
goto done_without_exp;
|
||||
|
||||
deny: /* this request was blocked (denied) */
|
||||
@ -540,29 +540,29 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
|
||||
|
||||
s->logs.tv_request = now;
|
||||
stream_inc_http_err_ctr(s);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.denied_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_req);
|
||||
goto return_prx_err;
|
||||
|
||||
return_int_err:
|
||||
txn->status = 500;
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
goto return_prx_err;
|
||||
|
||||
return_bad_req:
|
||||
txn->status = 400;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
/* fall through */
|
||||
|
||||
return_prx_err:
|
||||
@ -824,18 +824,18 @@ int http_process_request(struct stream *s, struct channel *req, int an_bit)
|
||||
txn->status = 500;
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
goto return_prx_cond;
|
||||
|
||||
return_bad_req: /* let's centralize all bad requests */
|
||||
txn->status = 400;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
/* fall through */
|
||||
|
||||
return_prx_cond:
|
||||
@ -950,18 +950,18 @@ int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit
|
||||
txn->status = 500;
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
goto return_prx_err;
|
||||
|
||||
return_bad_req: /* let's centralize all bad requests */
|
||||
txn->status = 400;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
/* fall through */
|
||||
|
||||
return_prx_err:
|
||||
@ -1214,24 +1214,24 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
|
||||
return 0;
|
||||
|
||||
return_cli_abort:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_CLICL;
|
||||
status = 400;
|
||||
goto return_prx_cond;
|
||||
|
||||
return_srv_abort:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.srv_aborts);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_SRVCL;
|
||||
status = 502;
|
||||
@ -1240,19 +1240,19 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
|
||||
return_int_err:
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
|
||||
status = 500;
|
||||
goto return_prx_cond;
|
||||
|
||||
return_bad_req:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
status = 400;
|
||||
/* fall through */
|
||||
|
||||
@ -1290,8 +1290,8 @@ static __inline int do_l7_retry(struct stream *s, struct stream_interface *si)
|
||||
goto no_retry;
|
||||
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.retries, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.retries, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.retries);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.retries);
|
||||
|
||||
/* Remove any write error from the request, and read error from the response */
|
||||
req->flags &= ~(CF_WRITE_ERROR | CF_WRITE_TIMEOUT | CF_SHUTW | CF_SHUTW_NOW);
|
||||
@ -1397,9 +1397,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
if (txn->flags & TX_NOT_FIRST)
|
||||
goto abort_keep_alive;
|
||||
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR);
|
||||
}
|
||||
|
||||
@ -1444,9 +1444,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
|
||||
}
|
||||
|
||||
@ -1467,12 +1467,12 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
|
||||
/* 3: client abort with an abortonclose */
|
||||
else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
|
||||
|
||||
rep->analysers &= AN_RES_FLT_END;
|
||||
txn->status = 400;
|
||||
@ -1503,9 +1503,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
if (txn->flags & TX_NOT_FIRST)
|
||||
goto abort_keep_alive;
|
||||
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
|
||||
}
|
||||
|
||||
@ -1529,9 +1529,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
if (txn->flags & TX_NOT_FIRST)
|
||||
goto abort_keep_alive;
|
||||
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
rep->analysers &= AN_RES_FLT_END;
|
||||
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
@ -1623,8 +1623,8 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
stream_inc_http_fail_ctr(s);
|
||||
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.p.http.rsp[n], 1);
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.p.http.cum_req, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.p.http.rsp[n]);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.p.http.cum_req);
|
||||
}
|
||||
|
||||
/* Adjust server's health based on status code. Note: status codes 501
|
||||
@ -1767,21 +1767,21 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
return 1;
|
||||
|
||||
return_int_err:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
|
||||
txn->status = 500;
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
goto return_prx_cond;
|
||||
|
||||
return_bad_res:
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
|
||||
}
|
||||
if ((s->be->retry_type & PR_RE_JUNK_REQUEST) &&
|
||||
@ -2074,32 +2074,32 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
|
||||
return 1;
|
||||
|
||||
deny:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_resp);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.denied_resp);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_resp);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.denied_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.denied_resp);
|
||||
goto return_prx_err;
|
||||
|
||||
return_int_err:
|
||||
txn->status = 500;
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
|
||||
goto return_prx_err;
|
||||
|
||||
return_bad_res:
|
||||
txn->status = 502;
|
||||
stream_inc_http_fail_ctr(s);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
|
||||
}
|
||||
/* fall through */
|
||||
@ -2341,43 +2341,43 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
|
||||
return 0;
|
||||
|
||||
return_srv_abort:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.srv_aborts);
|
||||
stream_inc_http_fail_ctr(s);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_SRVCL;
|
||||
goto return_error;
|
||||
|
||||
return_cli_abort:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_CLICL;
|
||||
goto return_error;
|
||||
|
||||
return_int_err:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
goto return_error;
|
||||
|
||||
return_bad_res:
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target)) {
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
|
||||
}
|
||||
stream_inc_http_fail_ctr(s);
|
||||
@ -2617,7 +2617,7 @@ int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struc
|
||||
req->analysers &= AN_REQ_FLT_END;
|
||||
|
||||
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
|
||||
_HA_ATOMIC_ADD(&s->sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.intercepted_req);
|
||||
}
|
||||
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
@ -4190,9 +4190,9 @@ enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
|
||||
s->flags |= SF_ERR_CLITO;
|
||||
if (!(s->flags & SF_FINST_MASK))
|
||||
s->flags |= SF_FINST_D;
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
http_reply_and_close(s, txn->status, http_error_message(s));
|
||||
ret = HTTP_RULE_RES_ABRT;
|
||||
goto end;
|
||||
@ -4507,12 +4507,12 @@ static void http_end_response(struct stream *s)
|
||||
}
|
||||
else if (chn->flags & CF_SHUTW) {
|
||||
txn->rsp.msg_state = HTTP_MSG_ERROR;
|
||||
_HA_ATOMIC_ADD(&strm_sess(s)->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&strm_sess(s)->fe->fe_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
if (strm_sess(s)->listener && strm_sess(s)->listener->counters)
|
||||
_HA_ATOMIC_ADD(&strm_sess(s)->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&strm_sess(s)->listener->counters->cli_aborts);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.cli_aborts);
|
||||
goto end;
|
||||
}
|
||||
DBG_TRACE_LEAVE(STRM_EV_HTTP_ANA, s, txn);
|
||||
|
@ -138,7 +138,7 @@ struct task *accept_queue_process(struct task *t, void *context, unsigned int st
|
||||
break;
|
||||
|
||||
li = __objt_listener(conn->target);
|
||||
_HA_ATOMIC_ADD(&li->thr_conn[tid], 1);
|
||||
_HA_ATOMIC_INC(&li->thr_conn[tid]);
|
||||
ret = li->accept(conn);
|
||||
if (ret <= 0) {
|
||||
/* connection was terminated by the application */
|
||||
@ -213,21 +213,21 @@ void listener_set_state(struct listener *l, enum li_state st)
|
||||
/* from state */
|
||||
switch (l->state) {
|
||||
case LI_NEW: /* first call */
|
||||
_HA_ATOMIC_ADD(&px->li_all, 1);
|
||||
_HA_ATOMIC_INC(&px->li_all);
|
||||
break;
|
||||
case LI_INIT:
|
||||
case LI_ASSIGNED:
|
||||
break;
|
||||
case LI_PAUSED:
|
||||
_HA_ATOMIC_SUB(&px->li_paused, 1);
|
||||
_HA_ATOMIC_DEC(&px->li_paused);
|
||||
break;
|
||||
case LI_LISTEN:
|
||||
_HA_ATOMIC_SUB(&px->li_bound, 1);
|
||||
_HA_ATOMIC_DEC(&px->li_bound);
|
||||
break;
|
||||
case LI_READY:
|
||||
case LI_FULL:
|
||||
case LI_LIMITED:
|
||||
_HA_ATOMIC_SUB(&px->li_ready, 1);
|
||||
_HA_ATOMIC_DEC(&px->li_ready);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -239,17 +239,17 @@ void listener_set_state(struct listener *l, enum li_state st)
|
||||
break;
|
||||
case LI_PAUSED:
|
||||
BUG_ON(l->rx.fd == -1);
|
||||
_HA_ATOMIC_ADD(&px->li_paused, 1);
|
||||
_HA_ATOMIC_INC(&px->li_paused);
|
||||
break;
|
||||
case LI_LISTEN:
|
||||
BUG_ON(l->rx.fd == -1);
|
||||
_HA_ATOMIC_ADD(&px->li_bound, 1);
|
||||
_HA_ATOMIC_INC(&px->li_bound);
|
||||
break;
|
||||
case LI_READY:
|
||||
case LI_FULL:
|
||||
case LI_LIMITED:
|
||||
BUG_ON(l->rx.fd == -1);
|
||||
_HA_ATOMIC_ADD(&px->li_ready, 1);
|
||||
_HA_ATOMIC_INC(&px->li_ready);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -664,8 +664,8 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
|
||||
l->extra_counters = NULL;
|
||||
|
||||
HA_SPIN_INIT(&l->lock);
|
||||
_HA_ATOMIC_ADD(&jobs, 1);
|
||||
_HA_ATOMIC_ADD(&listeners, 1);
|
||||
_HA_ATOMIC_INC(&jobs);
|
||||
_HA_ATOMIC_INC(&listeners);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@ -683,8 +683,8 @@ void __delete_listener(struct listener *listener)
|
||||
listener_set_state(listener, LI_INIT);
|
||||
LIST_DEL(&listener->rx.proto_list);
|
||||
listener->rx.proto->nb_receivers--;
|
||||
_HA_ATOMIC_SUB(&jobs, 1);
|
||||
_HA_ATOMIC_SUB(&listeners, 1);
|
||||
_HA_ATOMIC_DEC(&jobs);
|
||||
_HA_ATOMIC_DEC(&listeners);
|
||||
}
|
||||
}
|
||||
|
||||
@ -860,11 +860,11 @@ void listener_accept(struct listener *l)
|
||||
goto end;
|
||||
|
||||
case CO_AC_RETRY: /* likely a signal */
|
||||
_HA_ATOMIC_SUB(&l->nbconn, 1);
|
||||
_HA_ATOMIC_DEC(&l->nbconn);
|
||||
if (p)
|
||||
_HA_ATOMIC_SUB(&p->feconn, 1);
|
||||
_HA_ATOMIC_DEC(&p->feconn);
|
||||
if (!(l->options & LI_O_UNLIMITED))
|
||||
_HA_ATOMIC_SUB(&actconn, 1);
|
||||
_HA_ATOMIC_DEC(&actconn);
|
||||
continue;
|
||||
|
||||
case CO_AC_YIELD:
|
||||
@ -890,7 +890,7 @@ void listener_accept(struct listener *l)
|
||||
HA_ATOMIC_UPDATE_MAX(&global.cps_max, count);
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&activity[tid].accepted, 1);
|
||||
_HA_ATOMIC_INC(&activity[tid].accepted);
|
||||
|
||||
if (unlikely(cli_conn->handle.fd >= global.maxsock)) {
|
||||
send_log(p, LOG_EMERG,
|
||||
@ -1015,18 +1015,18 @@ void listener_accept(struct listener *l)
|
||||
*/
|
||||
ring = &accept_queue_rings[t];
|
||||
if (accept_queue_push_mp(ring, cli_conn)) {
|
||||
_HA_ATOMIC_ADD(&activity[t].accq_pushed, 1);
|
||||
_HA_ATOMIC_INC(&activity[t].accq_pushed);
|
||||
tasklet_wakeup(ring->tasklet);
|
||||
continue;
|
||||
}
|
||||
/* If the ring is full we do a synchronous accept on
|
||||
* the local thread here.
|
||||
*/
|
||||
_HA_ATOMIC_ADD(&activity[t].accq_full, 1);
|
||||
_HA_ATOMIC_INC(&activity[t].accq_full);
|
||||
}
|
||||
#endif // USE_THREAD
|
||||
|
||||
_HA_ATOMIC_ADD(&l->thr_conn[tid], 1);
|
||||
_HA_ATOMIC_INC(&l->thr_conn[tid]);
|
||||
ret = l->accept(cli_conn);
|
||||
if (unlikely(ret <= 0)) {
|
||||
/* The connection was closed by stream_accept(). Either
|
||||
@ -1059,13 +1059,13 @@ void listener_accept(struct listener *l)
|
||||
|
||||
end:
|
||||
if (next_conn)
|
||||
_HA_ATOMIC_SUB(&l->nbconn, 1);
|
||||
_HA_ATOMIC_DEC(&l->nbconn);
|
||||
|
||||
if (p && next_feconn)
|
||||
_HA_ATOMIC_SUB(&p->feconn, 1);
|
||||
_HA_ATOMIC_DEC(&p->feconn);
|
||||
|
||||
if (next_actconn)
|
||||
_HA_ATOMIC_SUB(&actconn, 1);
|
||||
_HA_ATOMIC_DEC(&actconn);
|
||||
|
||||
if ((l->state == LI_FULL && (!l->maxconn || l->nbconn < l->maxconn)) ||
|
||||
(l->state == LI_LIMITED &&
|
||||
@ -1123,11 +1123,11 @@ void listener_release(struct listener *l)
|
||||
struct proxy *fe = l->bind_conf->frontend;
|
||||
|
||||
if (!(l->options & LI_O_UNLIMITED))
|
||||
_HA_ATOMIC_SUB(&actconn, 1);
|
||||
_HA_ATOMIC_DEC(&actconn);
|
||||
if (fe)
|
||||
_HA_ATOMIC_SUB(&fe->feconn, 1);
|
||||
_HA_ATOMIC_SUB(&l->nbconn, 1);
|
||||
_HA_ATOMIC_SUB(&l->thr_conn[tid], 1);
|
||||
_HA_ATOMIC_DEC(&fe->feconn);
|
||||
_HA_ATOMIC_DEC(&l->nbconn);
|
||||
_HA_ATOMIC_DEC(&l->thr_conn[tid]);
|
||||
|
||||
if (l->state == LI_FULL || l->state == LI_LIMITED)
|
||||
resume_listener(l);
|
||||
|
18
src/log.c
18
src/log.c
@ -1918,7 +1918,7 @@ static inline void __do_send_log(struct logsrv *logsrv, int nblogger, int level,
|
||||
static char once;
|
||||
|
||||
if (errno == EAGAIN)
|
||||
_HA_ATOMIC_ADD(&dropped_logs, 1);
|
||||
_HA_ATOMIC_INC(&dropped_logs);
|
||||
else if (!once) {
|
||||
once = 1; /* note: no need for atomic ops here */
|
||||
ha_alert("sendmsg()/writev() failed in logger #%d: %s (errno=%d)\n",
|
||||
@ -3276,7 +3276,7 @@ void strm_log(struct stream *s)
|
||||
|
||||
size = build_logline(s, logline, global.max_syslog_len, &sess->fe->logformat);
|
||||
if (size > 0) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->log_count, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->log_count);
|
||||
__send_log(&sess->fe->logsrvs, &sess->fe->log_tag, level,
|
||||
logline, size + 1, logline_rfc5424, sd_size);
|
||||
s->logs.logwait = 0;
|
||||
@ -3315,7 +3315,7 @@ void sess_log(struct session *sess)
|
||||
|
||||
size = sess_build_logline(sess, NULL, logline, global.max_syslog_len, &sess->fe->logformat);
|
||||
if (size > 0) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->log_count, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->log_count);
|
||||
__send_log(&sess->fe->logsrvs, &sess->fe->log_tag, level,
|
||||
logline, size + 1, logline_rfc5424, sd_size);
|
||||
}
|
||||
@ -3704,7 +3704,7 @@ void syslog_fd_handler(int fd)
|
||||
buf->data = ret;
|
||||
|
||||
/* update counters */
|
||||
_HA_ATOMIC_ADD(&cum_log_messages, 1);
|
||||
_HA_ATOMIC_INC(&cum_log_messages);
|
||||
proxy_inc_fe_req_ctr(l, l->bind_conf->frontend);
|
||||
|
||||
parse_log_message(buf->area, buf->data, &level, &facility, metadata, &message, &size);
|
||||
@ -3811,7 +3811,7 @@ static void syslog_io_handler(struct appctx *appctx)
|
||||
co_skip(si_oc(si), to_skip);
|
||||
|
||||
/* update counters */
|
||||
_HA_ATOMIC_ADD(&cum_log_messages, 1);
|
||||
_HA_ATOMIC_INC(&cum_log_messages);
|
||||
proxy_inc_fe_req_ctr(l, frontend);
|
||||
|
||||
parse_log_message(buf->area, buf->data, &level, &facility, metadata, &message, &size);
|
||||
@ -3834,15 +3834,15 @@ static void syslog_io_handler(struct appctx *appctx)
|
||||
|
||||
parse_error:
|
||||
if (l->counters)
|
||||
_HA_ATOMIC_ADD(&l->counters->failed_req, 1);
|
||||
_HA_ATOMIC_ADD(&frontend->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->failed_req);
|
||||
_HA_ATOMIC_INC(&frontend->fe_counters.failed_req);
|
||||
|
||||
goto close;
|
||||
|
||||
cli_abort:
|
||||
if (l->counters)
|
||||
_HA_ATOMIC_ADD(&l->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&frontend->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&l->counters->cli_aborts);
|
||||
_HA_ATOMIC_INC(&frontend->fe_counters.cli_aborts);
|
||||
|
||||
close:
|
||||
si_shutw(si);
|
||||
|
24
src/mux_h1.c
24
src/mux_h1.c
@ -2409,10 +2409,10 @@ static int h1_handle_internal_err(struct h1c *h1c)
|
||||
|
||||
session_inc_http_req_ctr(sess);
|
||||
proxy_inc_fe_req_ctr(sess->listener, sess->fe);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.p.http.rsp[5], 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[5]);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
|
||||
h1c->errcode = 500;
|
||||
ret = h1_send_error(h1c);
|
||||
@ -2434,10 +2434,10 @@ static int h1_handle_bad_req(struct h1c *h1c)
|
||||
session_inc_http_req_ctr(sess);
|
||||
session_inc_http_err_ctr(sess);
|
||||
proxy_inc_fe_req_ctr(sess->listener, sess->fe);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.p.http.rsp[4], 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[4]);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
|
||||
h1c->errcode = 400;
|
||||
ret = h1_send_error(h1c);
|
||||
@ -2461,10 +2461,10 @@ static int h1_handle_not_impl_err(struct h1c *h1c)
|
||||
|
||||
session_inc_http_req_ctr(sess);
|
||||
proxy_inc_fe_req_ctr(sess->listener, sess->fe);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.p.http.rsp[4], 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[4]);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
|
||||
h1c->errcode = 501;
|
||||
ret = h1_send_error(h1c);
|
||||
@ -2487,10 +2487,10 @@ static int h1_handle_req_tout(struct h1c *h1c)
|
||||
|
||||
session_inc_http_req_ctr(sess);
|
||||
proxy_inc_fe_req_ctr(sess->listener, sess->fe);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.p.http.rsp[4], 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[4]);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
|
||||
h1c->errcode = 408;
|
||||
ret = h1_send_error(h1c);
|
||||
|
78
src/mux_h2.c
78
src/mux_h2.c
@ -1007,8 +1007,8 @@ static int h2_init(struct connection *conn, struct proxy *prx, struct session *s
|
||||
goto fail_stream;
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->open_conns, 1);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->total_conns, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->open_conns);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->total_conns);
|
||||
|
||||
/* prepare to read something */
|
||||
h2c_restart_reading(h2c, 1);
|
||||
@ -1091,7 +1091,7 @@ static void h2_release(struct h2c *h2c)
|
||||
conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
|
||||
&h2c->wait_event);
|
||||
|
||||
HA_ATOMIC_SUB(&h2c->px_counters->open_conns, 1);
|
||||
HA_ATOMIC_DEC(&h2c->px_counters->open_conns);
|
||||
|
||||
pool_free(pool_head_h2c, h2c);
|
||||
}
|
||||
@ -1371,7 +1371,7 @@ static inline void h2s_close(struct h2s *h2s)
|
||||
if (!(h2s->cs->flags & CS_FL_EOS) && !b_data(&h2s->rxbuf))
|
||||
h2s_notify_recv(h2s);
|
||||
}
|
||||
HA_ATOMIC_SUB(&h2s->h2c->px_counters->open_streams, 1);
|
||||
HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
|
||||
|
||||
TRACE_LEAVE(H2_EV_H2S_END, h2s->h2c->conn, h2s);
|
||||
}
|
||||
@ -1458,8 +1458,8 @@ static struct h2s *h2s_new(struct h2c *h2c, int id)
|
||||
h2c->nb_streams++;
|
||||
h2c->stream_cnt++;
|
||||
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->open_streams, 1);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->total_streams, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->open_streams);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->total_streams);
|
||||
|
||||
TRACE_LEAVE(H2_EV_H2S_NEW, h2c->conn, h2s);
|
||||
return h2s;
|
||||
@ -1684,7 +1684,7 @@ static int h2c_frt_recv_preface(struct h2c *h2c)
|
||||
if (ret1 < 0 || conn_xprt_read0_pending(h2c->conn)) {
|
||||
TRACE_ERROR("I/O error or short read", H2_EV_RX_FRAME|H2_EV_RX_PREFACE, h2c->conn);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
}
|
||||
ret2 = 0;
|
||||
goto out;
|
||||
@ -1828,7 +1828,7 @@ static int h2c_send_goaway_error(struct h2c *h2c, struct h2s *h2s)
|
||||
case H2_ERR_CANCEL:
|
||||
break;
|
||||
default:
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->goaway_resp, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->goaway_resp);
|
||||
}
|
||||
out:
|
||||
TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_GOAWAY, h2c->conn);
|
||||
@ -1975,7 +1975,7 @@ static int h2c_send_rst_stream(struct h2c *h2c, struct h2s *h2s)
|
||||
}
|
||||
|
||||
out:
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->rst_stream_resp, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->rst_stream_resp);
|
||||
TRACE_LEAVE(H2_EV_TX_FRAME|H2_EV_TX_RST, h2c->conn, h2s);
|
||||
return ret;
|
||||
}
|
||||
@ -2177,7 +2177,7 @@ static int h2c_handle_settings(struct h2c *h2c)
|
||||
if (arg < 16384 || arg > 16777215) { // RFC7540#6.5.2
|
||||
TRACE_ERROR("MAX_FRAME_SIZE out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
h2c->mfs = arg;
|
||||
@ -2186,7 +2186,7 @@ static int h2c_handle_settings(struct h2c *h2c)
|
||||
if (arg < 0 || arg > 1) { // RFC7540#6.5.2
|
||||
TRACE_ERROR("ENABLE_PUSH out of range", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn);
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
@ -2461,14 +2461,14 @@ static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
|
||||
if (!inc) {
|
||||
TRACE_ERROR("stream WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->strm_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
|
||||
goto strm_err;
|
||||
}
|
||||
|
||||
if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
|
||||
TRACE_ERROR("stream WINDOW_UPDATE inc<0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn, h2s);
|
||||
error = H2_ERR_FLOW_CONTROL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->strm_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
|
||||
goto strm_err;
|
||||
}
|
||||
|
||||
@ -2486,7 +2486,7 @@ static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
|
||||
if (!inc) {
|
||||
TRACE_ERROR("conn WINDOW_UPDATE inc=0", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto conn_err;
|
||||
}
|
||||
|
||||
@ -2559,7 +2559,7 @@ static int h2c_handle_priority(struct h2c *h2c)
|
||||
/* 7540#5.3 : can't depend on itself */
|
||||
TRACE_ERROR("PRIORITY depends on itself", H2_EV_RX_FRAME|H2_EV_RX_WU, h2c->conn);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
TRACE_DEVEL("leaving on error", H2_EV_RX_FRAME|H2_EV_RX_PRIO, h2c->conn);
|
||||
return 0;
|
||||
}
|
||||
@ -2655,7 +2655,7 @@ static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
|
||||
/* RFC7540#5.1.1 stream id > prev ones, and must be odd here */
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
TRACE_ERROR("HEADERS on invalid stream ID", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
sess_log(h2c->conn->owner);
|
||||
goto conn_err;
|
||||
}
|
||||
@ -2778,7 +2778,7 @@ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
|
||||
TRACE_ERROR("response HEADERS in invalid state", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
|
||||
h2s_error(h2s, H2_ERR_STREAM_CLOSED);
|
||||
h2c->st0 = H2_CS_FRAME_E;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->strm_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -2790,7 +2790,7 @@ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
|
||||
TRACE_ERROR("couldn't decode response HEADERS", H2_EV_RX_FRAME|H2_EV_RX_HDR, h2c->conn, h2s);
|
||||
h2s_error(h2s, H2_ERR_PROTOCOL_ERROR);
|
||||
h2c->st0 = H2_CS_FRAME_E;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->strm_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -2867,7 +2867,7 @@ static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
|
||||
/* RFC7540#8.1.2 */
|
||||
TRACE_ERROR("DATA frame larger than content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->strm_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
|
||||
goto strm_err;
|
||||
}
|
||||
if (!(h2c->flags & H2_CF_IS_BACK) &&
|
||||
@ -2925,7 +2925,7 @@ static int h2c_handle_data(struct h2c *h2c, struct h2s *h2s)
|
||||
/* RFC7540#8.1.2 */
|
||||
TRACE_ERROR("ES on DATA frame before content-length", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
|
||||
error = H2_ERR_PROTOCOL_ERROR;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->strm_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->strm_proto_err);
|
||||
goto strm_err;
|
||||
}
|
||||
}
|
||||
@ -2971,7 +2971,7 @@ static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
|
||||
/* only log if no other stream can report the error */
|
||||
sess_log(h2c->conn->owner);
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
TRACE_DEVEL("leaving in error (idle&!hdrs&!prio)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
|
||||
return 0;
|
||||
}
|
||||
@ -2980,7 +2980,7 @@ static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
|
||||
/* only PUSH_PROMISE would be permitted here */
|
||||
TRACE_ERROR("invalid frame type for IDLE state (back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
TRACE_DEVEL("leaving in error (idle&back)", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_PROTO_ERR, h2c->conn, h2s);
|
||||
return 0;
|
||||
}
|
||||
@ -2995,7 +2995,7 @@ static int h2_frame_check_vs_state(struct h2c *h2c, struct h2s *h2s)
|
||||
if (h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
|
||||
TRACE_ERROR("invalid frame type for HREM state", H2_EV_RX_FRAME|H2_EV_RX_FHDR, h2c->conn, h2s);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
}
|
||||
else {
|
||||
h2s_error(h2s, H2_ERR_STREAM_CLOSED);
|
||||
@ -3145,7 +3145,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
h2c->st0 = H2_CS_ERROR2;
|
||||
if (!(h2c->flags & H2_CF_IS_BACK))
|
||||
sess_log(h2c->conn->owner);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -3164,7 +3164,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
* deleted above.
|
||||
*/
|
||||
padlen = 0;
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->settings_rcvd, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
|
||||
goto new_frame;
|
||||
}
|
||||
}
|
||||
@ -3197,7 +3197,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
/* only log if no other stream can report the error */
|
||||
sess_log(h2c->conn->owner);
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3215,7 +3215,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
h2c_error(h2c, H2_ERR_FRAME_SIZE_ERROR);
|
||||
if (!(h2c->flags & H2_CF_IS_BACK))
|
||||
sess_log(h2c->conn->owner);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
hdr.len--;
|
||||
@ -3233,7 +3233,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
if (!(h2c->flags & H2_CF_IS_BACK))
|
||||
sess_log(h2c->conn->owner);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -3261,7 +3261,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
h2c_error(h2c, ret);
|
||||
if (!(h2c->flags & H2_CF_IS_BACK))
|
||||
sess_log(h2c->conn->owner);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
@ -3300,7 +3300,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
TRACE_PROTO("receiving H2 SETTINGS frame", H2_EV_RX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
|
||||
ret = h2c_handle_settings(h2c);
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->settings_rcvd, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->settings_rcvd);
|
||||
|
||||
if (h2c->st0 == H2_CS_FRAME_A) {
|
||||
TRACE_PROTO("sending H2 SETTINGS ACK frame", H2_EV_TX_FRAME|H2_EV_RX_SETTINGS, h2c->conn, h2s);
|
||||
@ -3337,7 +3337,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
if (!(h2c->flags & H2_CF_IS_BACK))
|
||||
sess_log(h2c->conn->owner);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
|
||||
case H2_FT_HEADERS:
|
||||
@ -3352,7 +3352,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->headers_rcvd, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->headers_rcvd);
|
||||
break;
|
||||
|
||||
case H2_FT_DATA:
|
||||
@ -3360,7 +3360,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
TRACE_PROTO("receiving H2 DATA frame", H2_EV_RX_FRAME|H2_EV_RX_DATA, h2c->conn, h2s);
|
||||
ret = h2c_handle_data(h2c, h2s);
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->data_rcvd, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->data_rcvd);
|
||||
|
||||
if (h2c->st0 == H2_CS_FRAME_A) {
|
||||
TRACE_PROTO("sending stream WINDOW_UPDATE frame", H2_EV_TX_FRAME|H2_EV_TX_WU, h2c->conn, h2s);
|
||||
@ -3380,7 +3380,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
TRACE_PROTO("receiving H2 RST_STREAM frame", H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
|
||||
ret = h2c_handle_rst_stream(h2c, h2s);
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->rst_stream_rcvd, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->rst_stream_rcvd);
|
||||
break;
|
||||
|
||||
case H2_FT_GOAWAY:
|
||||
@ -3388,7 +3388,7 @@ static void h2_process_demux(struct h2c *h2c)
|
||||
TRACE_PROTO("receiving H2 GOAWAY frame", H2_EV_RX_FRAME|H2_EV_RX_GOAWAY, h2c->conn, h2s);
|
||||
ret = h2c_handle_goaway(h2c);
|
||||
}
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->goaway_rcvd, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->goaway_rcvd);
|
||||
break;
|
||||
|
||||
/* implement all extra frame types here */
|
||||
@ -4610,7 +4610,7 @@ static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *f
|
||||
/* RFC7540#6.10: frame of unexpected type */
|
||||
TRACE_STATE("not continuation!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -4618,7 +4618,7 @@ static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *f
|
||||
/* RFC7540#6.10: frame of different stream */
|
||||
TRACE_STATE("different stream ID!", H2_EV_RX_FRAME|H2_EV_RX_FHDR|H2_EV_RX_HDR|H2_EV_RX_CONT|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -4671,7 +4671,7 @@ static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *f
|
||||
/* RFC7540#5.3.1 : stream dep may not depend on itself */
|
||||
TRACE_STATE("invalid stream dependency!", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
@ -4813,7 +4813,7 @@ static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *f
|
||||
/* It's a trailer but it's missing ES flag */
|
||||
TRACE_STATE("missing EH on trailers frame", H2_EV_RX_FRAME|H2_EV_RX_HDR|H2_EV_H2C_ERR|H2_EV_PROTO_ERR, h2c->conn);
|
||||
h2c_error(h2c, H2_ERR_PROTOCOL_ERROR);
|
||||
HA_ATOMIC_ADD(&h2c->px_counters->conn_proto_err, 1);
|
||||
HA_ATOMIC_INC(&h2c->px_counters->conn_proto_err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
28
src/peers.c
28
src/peers.c
@ -934,9 +934,9 @@ void __peer_session_deinit(struct peer *peer)
|
||||
return;
|
||||
|
||||
if (peer->appctx->st0 == PEER_SESS_ST_WAITMSG)
|
||||
HA_ATOMIC_SUB(&connected_peers, 1);
|
||||
HA_ATOMIC_DEC(&connected_peers);
|
||||
|
||||
HA_ATOMIC_SUB(&active_peers, 1);
|
||||
HA_ATOMIC_DEC(&active_peers);
|
||||
|
||||
flush_dcache(peer);
|
||||
|
||||
@ -1758,7 +1758,7 @@ static int peer_treat_updatemsg(struct appctx *appctx, struct peer *p, int updt,
|
||||
if (de) {
|
||||
data_ptr = stktable_data_ptr(st->table, ts, data_type);
|
||||
if (data_ptr) {
|
||||
HA_ATOMIC_ADD(&de->refcount, 1);
|
||||
HA_ATOMIC_INC(&de->refcount);
|
||||
stktable_data_cast(data_ptr, std_t_dict) = de;
|
||||
}
|
||||
}
|
||||
@ -2546,7 +2546,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
curpeer->flags |= PEER_F_ALIVE;
|
||||
appctx->ctx.peers.ptr = curpeer;
|
||||
appctx->st0 = PEER_SESS_ST_SENDSUCCESS;
|
||||
_HA_ATOMIC_ADD(&active_peers, 1);
|
||||
_HA_ATOMIC_INC(&active_peers);
|
||||
}
|
||||
/* fall through */
|
||||
case PEER_SESS_ST_SENDSUCCESS: {
|
||||
@ -2570,7 +2570,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
init_accepted_peer(curpeer, curpeers);
|
||||
|
||||
/* switch to waiting message state */
|
||||
_HA_ATOMIC_ADD(&connected_peers, 1);
|
||||
_HA_ATOMIC_INC(&connected_peers);
|
||||
appctx->st0 = PEER_SESS_ST_WAITMSG;
|
||||
goto switchstate;
|
||||
}
|
||||
@ -2635,7 +2635,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
appctx->st0 = PEER_SESS_ST_END;
|
||||
goto switchstate;
|
||||
}
|
||||
_HA_ATOMIC_ADD(&connected_peers, 1);
|
||||
_HA_ATOMIC_INC(&connected_peers);
|
||||
appctx->st0 = PEER_SESS_ST_WAITMSG;
|
||||
}
|
||||
/* fall through */
|
||||
@ -2698,7 +2698,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
}
|
||||
case PEER_SESS_ST_EXIT:
|
||||
if (prev_state == PEER_SESS_ST_WAITMSG)
|
||||
_HA_ATOMIC_SUB(&connected_peers, 1);
|
||||
_HA_ATOMIC_DEC(&connected_peers);
|
||||
prev_state = appctx->st0;
|
||||
if (peer_send_status_errormsg(appctx) == -1)
|
||||
goto out;
|
||||
@ -2706,7 +2706,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
goto switchstate;
|
||||
case PEER_SESS_ST_ERRSIZE: {
|
||||
if (prev_state == PEER_SESS_ST_WAITMSG)
|
||||
_HA_ATOMIC_SUB(&connected_peers, 1);
|
||||
_HA_ATOMIC_DEC(&connected_peers);
|
||||
prev_state = appctx->st0;
|
||||
if (peer_send_error_size_limitmsg(appctx) == -1)
|
||||
goto out;
|
||||
@ -2719,7 +2719,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
if (curpeer)
|
||||
curpeer->proto_err++;
|
||||
if (prev_state == PEER_SESS_ST_WAITMSG)
|
||||
_HA_ATOMIC_SUB(&connected_peers, 1);
|
||||
_HA_ATOMIC_DEC(&connected_peers);
|
||||
prev_state = appctx->st0;
|
||||
if (peer_send_error_protomsg(appctx) == -1) {
|
||||
TRACE_PROTO("could not send error message", PEERS_EV_PROTOERR);
|
||||
@ -2731,7 +2731,7 @@ static void peer_io_handler(struct appctx *appctx)
|
||||
/* fall through */
|
||||
case PEER_SESS_ST_END: {
|
||||
if (prev_state == PEER_SESS_ST_WAITMSG)
|
||||
_HA_ATOMIC_SUB(&connected_peers, 1);
|
||||
_HA_ATOMIC_DEC(&connected_peers);
|
||||
prev_state = appctx->st0;
|
||||
if (curpeer) {
|
||||
HA_SPIN_UNLOCK(PEER_LOCK, &curpeer->lock);
|
||||
@ -2853,7 +2853,7 @@ static struct appctx *peer_session_create(struct peers *peers, struct peer *peer
|
||||
|
||||
peer->appctx = appctx;
|
||||
task_wakeup(s->task, TASK_WOKEN_INIT);
|
||||
_HA_ATOMIC_ADD(&active_peers, 1);
|
||||
_HA_ATOMIC_INC(&active_peers);
|
||||
return appctx;
|
||||
|
||||
/* Error unrolling */
|
||||
@ -3036,7 +3036,7 @@ struct task *process_peer_sync(struct task * task, void *context, unsigned int s
|
||||
/* We've just received the signal */
|
||||
if (!(peers->flags & PEERS_F_DONOTSTOP)) {
|
||||
/* add DO NOT STOP flag if not present */
|
||||
_HA_ATOMIC_ADD(&jobs, 1);
|
||||
_HA_ATOMIC_INC(&jobs);
|
||||
peers->flags |= PEERS_F_DONOTSTOP;
|
||||
ps = peers->local;
|
||||
for (st = ps->tables; st ; st = st->next)
|
||||
@ -3060,7 +3060,7 @@ struct task *process_peer_sync(struct task * task, void *context, unsigned int s
|
||||
if (ps->flags & PEER_F_TEACH_COMPLETE) {
|
||||
if (peers->flags & PEERS_F_DONOTSTOP) {
|
||||
/* resync of new process was complete, current process can die now */
|
||||
_HA_ATOMIC_SUB(&jobs, 1);
|
||||
_HA_ATOMIC_DEC(&jobs);
|
||||
peers->flags &= ~PEERS_F_DONOTSTOP;
|
||||
for (st = ps->tables; st ; st = st->next)
|
||||
st->table->syncing--;
|
||||
@ -3084,7 +3084,7 @@ struct task *process_peer_sync(struct task * task, void *context, unsigned int s
|
||||
/* Other error cases */
|
||||
if (peers->flags & PEERS_F_DONOTSTOP) {
|
||||
/* unable to resync new process, current process can die now */
|
||||
_HA_ATOMIC_SUB(&jobs, 1);
|
||||
_HA_ATOMIC_DEC(&jobs);
|
||||
peers->flags &= ~PEERS_F_DONOTSTOP;
|
||||
for (st = ps->tables; st ; st = st->next)
|
||||
st->table->syncing--;
|
||||
|
18
src/pipe.c
18
src/pipe.c
@ -44,8 +44,8 @@ struct pipe *get_pipe()
|
||||
if (likely(ret)) {
|
||||
local_pipes = ret->next;
|
||||
local_pipes_free--;
|
||||
HA_ATOMIC_SUB(&pipes_free, 1);
|
||||
HA_ATOMIC_ADD(&pipes_used, 1);
|
||||
HA_ATOMIC_DEC(&pipes_free);
|
||||
HA_ATOMIC_INC(&pipes_used);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -56,13 +56,13 @@ struct pipe *get_pipe()
|
||||
pipes_live = ret->next;
|
||||
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
|
||||
if (ret) {
|
||||
HA_ATOMIC_SUB(&pipes_free, 1);
|
||||
HA_ATOMIC_ADD(&pipes_used, 1);
|
||||
HA_ATOMIC_DEC(&pipes_free);
|
||||
HA_ATOMIC_INC(&pipes_used);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(&pipes_used, 1);
|
||||
HA_ATOMIC_INC(&pipes_used);
|
||||
if (pipes_used + pipes_free >= global.maxpipes)
|
||||
goto fail;
|
||||
|
||||
@ -85,7 +85,7 @@ struct pipe *get_pipe()
|
||||
return ret;
|
||||
fail:
|
||||
pool_free(pool_head_pipe, ret);
|
||||
HA_ATOMIC_SUB(&pipes_used, 1);
|
||||
HA_ATOMIC_DEC(&pipes_used);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
@ -98,7 +98,7 @@ void kill_pipe(struct pipe *p)
|
||||
close(p->prod);
|
||||
close(p->cons);
|
||||
pool_free(pool_head_pipe, p);
|
||||
HA_ATOMIC_SUB(&pipes_used, 1);
|
||||
HA_ATOMIC_DEC(&pipes_used);
|
||||
}
|
||||
|
||||
/* put back a unused pipe into the live pool. If it still has data in it, it is
|
||||
@ -124,8 +124,8 @@ void put_pipe(struct pipe *p)
|
||||
pipes_live = p;
|
||||
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
|
||||
out:
|
||||
HA_ATOMIC_ADD(&pipes_free, 1);
|
||||
HA_ATOMIC_SUB(&pipes_used, 1);
|
||||
HA_ATOMIC_INC(&pipes_free);
|
||||
HA_ATOMIC_DEC(&pipes_used);
|
||||
}
|
||||
|
||||
/*
|
||||
|
14
src/pool.c
14
src/pool.c
@ -181,20 +181,20 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||
void *ptr = NULL;
|
||||
|
||||
if (limit && allocated >= limit) {
|
||||
_HA_ATOMIC_ADD(&pool->allocated, 1);
|
||||
_HA_ATOMIC_INC(&pool->allocated);
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
|
||||
if (!ptr) {
|
||||
_HA_ATOMIC_ADD(&pool->failed, 1);
|
||||
_HA_ATOMIC_INC(&pool->failed);
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&pool->allocated, 1);
|
||||
_HA_ATOMIC_ADD(&pool->used, 1);
|
||||
_HA_ATOMIC_INC(&pool->allocated);
|
||||
_HA_ATOMIC_INC(&pool->used);
|
||||
|
||||
#ifdef DEBUG_MEMORY_POOLS
|
||||
/* keep track of where the element was allocated from */
|
||||
@ -257,7 +257,7 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||
|
||||
ptr = pool_alloc_area(size + POOL_EXTRA);
|
||||
if (!ptr) {
|
||||
_HA_ATOMIC_ADD(&pool->failed, 1);
|
||||
_HA_ATOMIC_INC(&pool->failed);
|
||||
if (failed) {
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
@ -278,7 +278,7 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||
__ha_barrier_atomic_store();
|
||||
|
||||
_HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
|
||||
_HA_ATOMIC_ADD(&pool->used, 1);
|
||||
_HA_ATOMIC_INC(&pool->used);
|
||||
|
||||
#ifdef DEBUG_MEMORY_POOLS
|
||||
/* keep track of where the element was allocated from */
|
||||
@ -353,7 +353,7 @@ void pool_gc(struct pool_head *pool_ctx)
|
||||
if (HA_ATOMIC_DWCAS(&entry->free_list, &cmp, &new) == 0)
|
||||
continue;
|
||||
pool_free_area(cmp.free_list, entry->size + POOL_EXTRA);
|
||||
_HA_ATOMIC_SUB(&entry->allocated, 1);
|
||||
_HA_ATOMIC_DEC(&entry->allocated);
|
||||
}
|
||||
}
|
||||
|
||||
|
22
src/queue.c
22
src/queue.c
@ -193,8 +193,8 @@ void pendconn_unlink(struct pendconn *p)
|
||||
}
|
||||
HA_SPIN_UNLOCK(SERVER_LOCK, &p->srv->lock);
|
||||
if (done) {
|
||||
_HA_ATOMIC_SUB(&p->srv->nbpend, 1);
|
||||
_HA_ATOMIC_SUB(&p->px->totpend, 1);
|
||||
_HA_ATOMIC_DEC(&p->srv->nbpend);
|
||||
_HA_ATOMIC_DEC(&p->px->totpend);
|
||||
}
|
||||
}
|
||||
else {
|
||||
@ -206,8 +206,8 @@ void pendconn_unlink(struct pendconn *p)
|
||||
}
|
||||
HA_RWLOCK_WRUNLOCK(PROXY_LOCK, &p->px->lock);
|
||||
if (done) {
|
||||
_HA_ATOMIC_SUB(&p->px->nbpend, 1);
|
||||
_HA_ATOMIC_SUB(&p->px->totpend, 1);
|
||||
_HA_ATOMIC_DEC(&p->px->nbpend);
|
||||
_HA_ATOMIC_DEC(&p->px->totpend);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -309,22 +309,22 @@ static int pendconn_process_next_strm(struct server *srv, struct proxy *px)
|
||||
use_pp:
|
||||
/* Let's switch from the server pendconn to the proxy pendconn */
|
||||
__pendconn_unlink_prx(pp);
|
||||
_HA_ATOMIC_SUB(&px->nbpend, 1);
|
||||
_HA_ATOMIC_SUB(&px->totpend, 1);
|
||||
_HA_ATOMIC_DEC(&px->nbpend);
|
||||
_HA_ATOMIC_DEC(&px->totpend);
|
||||
px->queue_idx++;
|
||||
p = pp;
|
||||
goto unlinked;
|
||||
use_p:
|
||||
__pendconn_unlink_srv(p);
|
||||
_HA_ATOMIC_SUB(&srv->nbpend, 1);
|
||||
_HA_ATOMIC_SUB(&px->totpend, 1);
|
||||
_HA_ATOMIC_DEC(&srv->nbpend);
|
||||
_HA_ATOMIC_DEC(&px->totpend);
|
||||
srv->queue_idx++;
|
||||
unlinked:
|
||||
p->strm_flags |= SF_ASSIGNED;
|
||||
p->target = srv;
|
||||
|
||||
_HA_ATOMIC_ADD(&srv->served, 1);
|
||||
_HA_ATOMIC_ADD(&srv->proxy->served, 1);
|
||||
_HA_ATOMIC_INC(&srv->served);
|
||||
_HA_ATOMIC_INC(&srv->proxy->served);
|
||||
__ha_barrier_atomic_store();
|
||||
if (px->lbprm.server_take_conn)
|
||||
px->lbprm.server_take_conn(srv, 1);
|
||||
@ -431,7 +431,7 @@ struct pendconn *pendconn_add(struct stream *strm)
|
||||
}
|
||||
strm->pend_pos = p;
|
||||
|
||||
_HA_ATOMIC_ADD(&px->totpend, 1);
|
||||
_HA_ATOMIC_INC(&px->totpend);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
10
src/ring.c
10
src/ring.c
@ -237,9 +237,9 @@ void ring_detach_appctx(struct ring *ring, struct appctx *appctx, size_t ofs)
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= b_size(&ring->buf));
|
||||
LIST_DEL_INIT(&appctx->wait_entry);
|
||||
HA_ATOMIC_SUB(b_peek(&ring->buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(&ring->buf, ofs));
|
||||
}
|
||||
HA_ATOMIC_SUB(&ring->readers_count, 1);
|
||||
HA_ATOMIC_DEC(&ring->readers_count);
|
||||
HA_RWLOCK_WRUNLOCK(LOGSRV_LOCK, &ring->lock);
|
||||
}
|
||||
|
||||
@ -308,7 +308,7 @@ int cli_io_handler_show_ring(struct appctx *appctx)
|
||||
if (appctx->ctx.cli.i0 & 2)
|
||||
ofs += b_data(buf) - 1;
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
}
|
||||
|
||||
@ -317,7 +317,7 @@ int cli_io_handler_show_ring(struct appctx *appctx)
|
||||
*/
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= buf->size);
|
||||
HA_ATOMIC_SUB(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
||||
|
||||
/* in this loop, ofs always points to the counter byte that precedes
|
||||
* the message so that we can take our reference there if we have to
|
||||
@ -351,7 +351,7 @@ int cli_io_handler_show_ring(struct appctx *appctx)
|
||||
ofs += cnt + msg_len;
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
appctx->ctx.cli.o0 = ofs;
|
||||
HA_RWLOCK_RDUNLOCK(LOGSRV_LOCK, &ring->lock);
|
||||
|
@ -50,8 +50,8 @@ struct session *session_new(struct proxy *fe, struct listener *li, enum obj_type
|
||||
sess->task = NULL;
|
||||
sess->t_handshake = -1; /* handshake not done yet */
|
||||
sess->t_idle = -1;
|
||||
_HA_ATOMIC_ADD(&totalconn, 1);
|
||||
_HA_ATOMIC_ADD(&jobs, 1);
|
||||
_HA_ATOMIC_INC(&totalconn);
|
||||
_HA_ATOMIC_INC(&jobs);
|
||||
LIST_INIT(&sess->srv_list);
|
||||
sess->idle_conns = 0;
|
||||
sess->flags = SESS_FL_NONE;
|
||||
@ -90,7 +90,7 @@ void session_free(struct session *sess)
|
||||
pool_free(pool_head_sess_srv_list, srv_list);
|
||||
}
|
||||
pool_free(pool_head_session, sess);
|
||||
_HA_ATOMIC_SUB(&jobs, 1);
|
||||
_HA_ATOMIC_DEC(&jobs);
|
||||
}
|
||||
|
||||
/* callback used from the connection/mux layer to notify that a connection is
|
||||
@ -118,7 +118,7 @@ static void session_count_new(struct session *sess)
|
||||
|
||||
ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_CNT);
|
||||
if (ptr)
|
||||
HA_ATOMIC_ADD(&stktable_data_cast(ptr, sess_cnt), 1);
|
||||
HA_ATOMIC_INC(&stktable_data_cast(ptr, sess_cnt));
|
||||
|
||||
ptr = stktable_data_ptr(stkctr->table, stkctr_entry(stkctr), STKTABLE_DT_SESS_RATE);
|
||||
if (ptr)
|
||||
|
12
src/sink.c
12
src/sink.c
@ -357,7 +357,7 @@ static void sink_forward_io_handler(struct appctx *appctx)
|
||||
if (unlikely(ofs == ~0)) {
|
||||
ofs = 0;
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
}
|
||||
|
||||
@ -371,7 +371,7 @@ static void sink_forward_io_handler(struct appctx *appctx)
|
||||
*/
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= buf->size);
|
||||
HA_ATOMIC_SUB(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
||||
|
||||
ret = 1;
|
||||
while (ofs + 1 < b_data(buf)) {
|
||||
@ -401,7 +401,7 @@ static void sink_forward_io_handler(struct appctx *appctx)
|
||||
ofs += cnt + msg_len;
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
sft->ofs = ofs;
|
||||
}
|
||||
@ -497,7 +497,7 @@ static void sink_forward_oc_io_handler(struct appctx *appctx)
|
||||
if (unlikely(ofs == ~0)) {
|
||||
ofs = 0;
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
}
|
||||
|
||||
@ -511,7 +511,7 @@ static void sink_forward_oc_io_handler(struct appctx *appctx)
|
||||
*/
|
||||
ofs -= ring->ofs;
|
||||
BUG_ON(ofs >= buf->size);
|
||||
HA_ATOMIC_SUB(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_DEC(b_peek(buf, ofs));
|
||||
|
||||
ret = 1;
|
||||
while (ofs + 1 < b_data(buf)) {
|
||||
@ -545,7 +545,7 @@ static void sink_forward_oc_io_handler(struct appctx *appctx)
|
||||
ofs += cnt + msg_len;
|
||||
}
|
||||
|
||||
HA_ATOMIC_ADD(b_peek(buf, ofs), 1);
|
||||
HA_ATOMIC_INC(b_peek(buf, ofs));
|
||||
ofs += ring->ofs;
|
||||
sft->ofs = ofs;
|
||||
}
|
||||
|
@ -752,8 +752,8 @@ void ssl_async_fd_free(int fd)
|
||||
|
||||
/* Now we can safely call SSL_free, no more pending job in engines */
|
||||
SSL_free(ssl);
|
||||
_HA_ATOMIC_SUB(&sslconns, 1);
|
||||
_HA_ATOMIC_SUB(&jobs, 1);
|
||||
_HA_ATOMIC_DEC(&sslconns);
|
||||
_HA_ATOMIC_DEC(&jobs);
|
||||
}
|
||||
/*
|
||||
* function used to manage a returned SSL_ERROR_WANT_ASYNC
|
||||
@ -5312,8 +5312,8 @@ static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
|
||||
/* leave init state and start handshake */
|
||||
conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN;
|
||||
|
||||
_HA_ATOMIC_ADD(&sslconns, 1);
|
||||
_HA_ATOMIC_ADD(&totalsslconns, 1);
|
||||
_HA_ATOMIC_INC(&sslconns);
|
||||
_HA_ATOMIC_INC(&totalsslconns);
|
||||
*xprt_ctx = ctx;
|
||||
return 0;
|
||||
}
|
||||
@ -5345,8 +5345,8 @@ static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
|
||||
conn->flags |= CO_FL_EARLY_SSL_HS;
|
||||
#endif
|
||||
|
||||
_HA_ATOMIC_ADD(&sslconns, 1);
|
||||
_HA_ATOMIC_ADD(&totalsslconns, 1);
|
||||
_HA_ATOMIC_INC(&sslconns);
|
||||
_HA_ATOMIC_INC(&totalsslconns);
|
||||
*xprt_ctx = ctx;
|
||||
return 0;
|
||||
}
|
||||
@ -6253,7 +6253,7 @@ static void ssl_sock_close(struct connection *conn, void *xprt_ctx) {
|
||||
}
|
||||
tasklet_free(ctx->wait_event.tasklet);
|
||||
pool_free(ssl_sock_ctx_pool, ctx);
|
||||
_HA_ATOMIC_ADD(&jobs, 1);
|
||||
_HA_ATOMIC_INC(&jobs);
|
||||
return;
|
||||
}
|
||||
/* Else we can remove the fds from the fdtab
|
||||
@ -6270,7 +6270,7 @@ static void ssl_sock_close(struct connection *conn, void *xprt_ctx) {
|
||||
b_free(&ctx->early_buf);
|
||||
tasklet_free(ctx->wait_event.tasklet);
|
||||
pool_free(ssl_sock_ctx_pool, ctx);
|
||||
_HA_ATOMIC_SUB(&sslconns, 1);
|
||||
_HA_ATOMIC_DEC(&sslconns);
|
||||
}
|
||||
}
|
||||
|
||||
|
110
src/stream.c
110
src/stream.c
@ -620,7 +620,7 @@ static void stream_free(struct stream *s)
|
||||
if (objt_server(s->target)) { /* there may be requests left pending in queue */
|
||||
if (s->flags & SF_CURR_SESS) {
|
||||
s->flags &= ~SF_CURR_SESS;
|
||||
_HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
|
||||
_HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
|
||||
}
|
||||
if (may_dequeue_tasks(objt_server(s->target), s->be))
|
||||
process_srv_queue(objt_server(s->target));
|
||||
@ -969,9 +969,9 @@ static void sess_set_term_flags(struct stream *s)
|
||||
if (!(s->flags & SF_FINST_MASK)) {
|
||||
if (s->si[1].state == SI_ST_INI) {
|
||||
/* anything before REQ in fact */
|
||||
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.failed_req);
|
||||
if (strm_li(s) && strm_li(s)->counters)
|
||||
_HA_ATOMIC_ADD(&strm_li(s)->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&strm_li(s)->counters->failed_req);
|
||||
|
||||
s->flags |= SF_FINST_R;
|
||||
}
|
||||
@ -1029,7 +1029,7 @@ enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
|
||||
|
||||
if (rule->from != ACT_F_HTTP_REQ) {
|
||||
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.intercepted_req);
|
||||
|
||||
/* The flag SF_ASSIGNED prevent from server assignment. */
|
||||
s->flags |= SF_ASSIGNED;
|
||||
@ -1732,12 +1732,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
si_shutw(si_f);
|
||||
si_report_error(si_f);
|
||||
if (!(req->analysers) && !(res->analysers)) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.cli_aborts);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_CLICL;
|
||||
if (!(s->flags & SF_FINST_MASK))
|
||||
@ -1751,16 +1751,16 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
si_shutr(si_b);
|
||||
si_shutw(si_b);
|
||||
si_report_error(si_b);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.failed_resp);
|
||||
if (!(req->analysers) && !(res->analysers)) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.srv_aborts);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_SRVCL;
|
||||
if (!(s->flags & SF_FINST_MASK))
|
||||
@ -1811,7 +1811,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
if (srv) {
|
||||
if (s->flags & SF_CURR_SESS) {
|
||||
s->flags &= ~SF_CURR_SESS;
|
||||
_HA_ATOMIC_SUB(&srv->cur_sess, 1);
|
||||
_HA_ATOMIC_DEC(&srv->cur_sess);
|
||||
}
|
||||
sess_change_server(s, NULL);
|
||||
if (may_dequeue_tasks(srv, s->be))
|
||||
@ -2010,39 +2010,39 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
/* Report it if the client got an error or a read timeout expired */
|
||||
req->analysers = 0;
|
||||
if (req->flags & CF_READ_ERROR) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.cli_aborts);
|
||||
s->flags |= SF_ERR_CLICL;
|
||||
}
|
||||
else if (req->flags & CF_READ_TIMEOUT) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.cli_aborts);
|
||||
s->flags |= SF_ERR_CLITO;
|
||||
}
|
||||
else if (req->flags & CF_WRITE_ERROR) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.srv_aborts);
|
||||
s->flags |= SF_ERR_SRVCL;
|
||||
}
|
||||
else {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.srv_aborts);
|
||||
s->flags |= SF_ERR_SRVTO;
|
||||
}
|
||||
sess_set_term_flags(s);
|
||||
@ -2064,39 +2064,39 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
/* Report it if the server got an error or a read timeout expired */
|
||||
res->analysers = 0;
|
||||
if (res->flags & CF_READ_ERROR) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.srv_aborts);
|
||||
s->flags |= SF_ERR_SRVCL;
|
||||
}
|
||||
else if (res->flags & CF_READ_TIMEOUT) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.srv_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.srv_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->srv_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.srv_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.srv_aborts);
|
||||
s->flags |= SF_ERR_SRVTO;
|
||||
}
|
||||
else if (res->flags & CF_WRITE_ERROR) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.cli_aborts);
|
||||
s->flags |= SF_ERR_CLICL;
|
||||
}
|
||||
else {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->cli_aborts);
|
||||
if (srv)
|
||||
_HA_ATOMIC_ADD(&srv->counters.cli_aborts, 1);
|
||||
_HA_ATOMIC_INC(&srv->counters.cli_aborts);
|
||||
s->flags |= SF_ERR_CLITO;
|
||||
}
|
||||
sess_set_term_flags(s);
|
||||
@ -2489,7 +2489,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
DBG_TRACE_DEVEL("releasing", STRM_EV_STRM_PROC, s);
|
||||
|
||||
if (s->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_SUB(&s->be->beconn, 1);
|
||||
_HA_ATOMIC_DEC(&s->be->beconn);
|
||||
|
||||
if (unlikely((global.mode & MODE_DEBUG) &&
|
||||
(!(global.mode & MODE_QUIET) || (global.mode & MODE_VERBOSE)))) {
|
||||
@ -2513,12 +2513,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
|
||||
n = 0;
|
||||
|
||||
if (sess->fe->mode == PR_MODE_HTTP) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.p.http.rsp[n], 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.p.http.rsp[n]);
|
||||
}
|
||||
if ((s->flags & SF_BE_ASSIGNED) &&
|
||||
(s->be->mode == PR_MODE_HTTP)) {
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.p.http.rsp[n], 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.p.http.cum_req, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.p.http.rsp[n]);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.p.http.cum_req);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2611,8 +2611,8 @@ void sess_change_server(struct stream *strm, struct server *newsrv)
|
||||
return;
|
||||
|
||||
if (oldsrv) {
|
||||
_HA_ATOMIC_SUB(&oldsrv->served, 1);
|
||||
_HA_ATOMIC_SUB(&oldsrv->proxy->served, 1);
|
||||
_HA_ATOMIC_DEC(&oldsrv->served);
|
||||
_HA_ATOMIC_DEC(&oldsrv->proxy->served);
|
||||
__ha_barrier_atomic_store();
|
||||
if (oldsrv->proxy->lbprm.server_drop_conn)
|
||||
oldsrv->proxy->lbprm.server_drop_conn(oldsrv, 0);
|
||||
@ -2620,8 +2620,8 @@ void sess_change_server(struct stream *strm, struct server *newsrv)
|
||||
}
|
||||
|
||||
if (newsrv) {
|
||||
_HA_ATOMIC_ADD(&newsrv->served, 1);
|
||||
_HA_ATOMIC_ADD(&newsrv->proxy->served, 1);
|
||||
_HA_ATOMIC_INC(&newsrv->served);
|
||||
_HA_ATOMIC_INC(&newsrv->proxy->served);
|
||||
__ha_barrier_atomic_store();
|
||||
if (newsrv->proxy->lbprm.server_take_conn)
|
||||
newsrv->proxy->lbprm.server_take_conn(newsrv, 0);
|
||||
|
24
src/task.c
24
src/task.c
@ -94,8 +94,8 @@ void task_kill(struct task *t)
|
||||
/* Beware: tasks that have never run don't have their ->list empty yet! */
|
||||
MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list,
|
||||
(struct mt_list *)&((struct tasklet *)t)->list);
|
||||
_HA_ATOMIC_ADD(&task_per_thread[thr].rq_total, 1);
|
||||
_HA_ATOMIC_ADD(&task_per_thread[thr].tasks_in_list, 1);
|
||||
_HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
|
||||
_HA_ATOMIC_INC(&task_per_thread[thr].tasks_in_list);
|
||||
if (sleeping_thread_mask & (1UL << thr)) {
|
||||
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
||||
wake_thread(thr);
|
||||
@ -135,11 +135,11 @@ void __tasklet_wakeup_on(struct tasklet *tl, int thr)
|
||||
LIST_ADDQ(&sched->tasklets[sched->current_queue], &tl->list);
|
||||
sched->tl_class_mask |= 1 << sched->current_queue;
|
||||
}
|
||||
_HA_ATOMIC_ADD(&sched->rq_total, 1);
|
||||
_HA_ATOMIC_INC(&sched->rq_total);
|
||||
} else {
|
||||
/* this tasklet runs on a specific thread. */
|
||||
MT_LIST_ADDQ(&task_per_thread[thr].shared_tasklet_list, (struct mt_list *)&tl->list);
|
||||
_HA_ATOMIC_ADD(&task_per_thread[thr].rq_total, 1);
|
||||
_HA_ATOMIC_INC(&task_per_thread[thr].rq_total);
|
||||
if (sleeping_thread_mask & (1UL << thr)) {
|
||||
_HA_ATOMIC_AND(&sleeping_thread_mask, ~(1UL << thr));
|
||||
wake_thread(thr);
|
||||
@ -163,7 +163,7 @@ void __task_wakeup(struct task *t)
|
||||
if (t->thread_mask != tid_bit && global.nbthread != 1) {
|
||||
root = &rqueue;
|
||||
|
||||
_HA_ATOMIC_ADD(&grq_total, 1);
|
||||
_HA_ATOMIC_INC(&grq_total);
|
||||
HA_SPIN_LOCK(TASK_RQ_LOCK, &rq_lock);
|
||||
|
||||
global_tasks_mask |= t->thread_mask;
|
||||
@ -172,14 +172,14 @@ void __task_wakeup(struct task *t)
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
_HA_ATOMIC_ADD(&sched->rq_total, 1);
|
||||
_HA_ATOMIC_INC(&sched->rq_total);
|
||||
t->rq.key = ++sched->rqueue_ticks;
|
||||
}
|
||||
|
||||
if (likely(t->nice)) {
|
||||
int offset;
|
||||
|
||||
_HA_ATOMIC_ADD(&niced_tasks, 1);
|
||||
_HA_ATOMIC_INC(&niced_tasks);
|
||||
offset = t->nice * (int)global.tune.runqueue_depth;
|
||||
t->rq.key += offset;
|
||||
}
|
||||
@ -496,7 +496,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
t->calls++;
|
||||
sched->current = t;
|
||||
|
||||
_HA_ATOMIC_SUB(&sched->rq_total, 1);
|
||||
_HA_ATOMIC_DEC(&sched->rq_total);
|
||||
|
||||
if (state & TASK_F_TASKLET) {
|
||||
uint64_t before = 0;
|
||||
@ -521,7 +521,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
process(t, ctx, state);
|
||||
|
||||
if (unlikely(task_profiling_mask & tid_bit)) {
|
||||
HA_ATOMIC_ADD(&profile_entry->calls, 1);
|
||||
HA_ATOMIC_INC(&profile_entry->calls);
|
||||
HA_ATOMIC_ADD(&profile_entry->cpu_time, now_mono_time() - before);
|
||||
}
|
||||
|
||||
@ -538,7 +538,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
|
||||
/* OK then this is a regular task */
|
||||
|
||||
_HA_ATOMIC_SUB(&task_per_thread[tid].tasks_in_list, 1);
|
||||
_HA_ATOMIC_DEC(&task_per_thread[tid].tasks_in_list);
|
||||
if (unlikely(t->call_date)) {
|
||||
uint64_t now_ns = now_mono_time();
|
||||
uint64_t lat = now_ns - t->call_date;
|
||||
@ -547,7 +547,7 @@ unsigned int run_tasks_from_lists(unsigned int budgets[])
|
||||
t->call_date = now_ns;
|
||||
profile_entry = sched_activity_entry(sched_activity, t->process);
|
||||
HA_ATOMIC_ADD(&profile_entry->lat_time, lat);
|
||||
HA_ATOMIC_ADD(&profile_entry->calls, 1);
|
||||
HA_ATOMIC_INC(&profile_entry->calls);
|
||||
}
|
||||
|
||||
__ha_barrier_store();
|
||||
@ -763,7 +763,7 @@ void process_runnable_tasks()
|
||||
}
|
||||
#endif
|
||||
if (t->nice)
|
||||
_HA_ATOMIC_SUB(&niced_tasks, 1);
|
||||
_HA_ATOMIC_DEC(&niced_tasks);
|
||||
|
||||
/* Add it to the local task list */
|
||||
LIST_ADDQ(&tt->tasklets[TL_NORMAL], &((struct tasklet *)t)->list);
|
||||
|
@ -222,16 +222,16 @@ static enum act_return tcp_exec_action_silent_drop(struct act_rule *rule, struct
|
||||
strm->req.analysers &= AN_REQ_FLT_END;
|
||||
strm->res.analysers &= AN_RES_FLT_END;
|
||||
if (strm->flags & SF_BE_ASSIGNED)
|
||||
_HA_ATOMIC_ADD(&strm->be->be_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&strm->be->be_counters.denied_req);
|
||||
if (!(strm->flags & SF_ERR_MASK))
|
||||
strm->flags |= SF_ERR_PRXCOND;
|
||||
if (!(strm->flags & SF_FINST_MASK))
|
||||
strm->flags |= SF_FINST_R;
|
||||
}
|
||||
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_req);
|
||||
|
||||
return ACT_RET_ABRT;
|
||||
}
|
||||
|
@ -209,23 +209,23 @@ int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit)
|
||||
return 0;
|
||||
|
||||
deny:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_req);
|
||||
goto reject;
|
||||
|
||||
internal:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.internal_errors);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->internal_errors);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
goto reject;
|
||||
|
||||
invalid:
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.failed_req);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->failed_req);
|
||||
|
||||
reject:
|
||||
si_must_kill_conn(chn_prod(req));
|
||||
@ -371,29 +371,29 @@ int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
return 0;
|
||||
|
||||
deny:
|
||||
_HA_ATOMIC_ADD(&s->sess->fe->fe_counters.denied_resp, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.denied_resp);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.denied_resp);
|
||||
if (s->sess->listener && s->sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&s->sess->listener->counters->denied_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->listener->counters->denied_resp);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.denied_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.denied_resp);
|
||||
goto reject;
|
||||
|
||||
internal:
|
||||
_HA_ATOMIC_ADD(&s->sess->fe->fe_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.internal_errors);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.internal_errors);
|
||||
if (s->sess->listener && s->sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&s->sess->listener->counters->internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&s->sess->listener->counters->internal_errors);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.internal_errors, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.internal_errors);
|
||||
if (!(s->flags & SF_ERR_MASK))
|
||||
s->flags |= SF_ERR_INTERNAL;
|
||||
goto reject;
|
||||
|
||||
invalid:
|
||||
_HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&s->be->be_counters.failed_resp);
|
||||
if (objt_server(s->target))
|
||||
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
|
||||
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.failed_resp);
|
||||
|
||||
reject:
|
||||
si_must_kill_conn(chn_prod(rep));
|
||||
@ -468,9 +468,9 @@ int tcp_exec_l4_rules(struct session *sess)
|
||||
goto end;
|
||||
}
|
||||
else if (rule->action == ACT_ACTION_DENY) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_conn, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_conn);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_conn, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_conn);
|
||||
|
||||
result = 0;
|
||||
goto end;
|
||||
@ -553,9 +553,9 @@ int tcp_exec_l5_rules(struct session *sess)
|
||||
goto end;
|
||||
}
|
||||
else if (rule->action == ACT_ACTION_DENY) {
|
||||
_HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_sess, 1);
|
||||
_HA_ATOMIC_INC(&sess->fe->fe_counters.denied_sess);
|
||||
if (sess->listener && sess->listener->counters)
|
||||
_HA_ATOMIC_ADD(&sess->listener->counters->denied_sess, 1);
|
||||
_HA_ATOMIC_INC(&sess->listener->counters->denied_sess);
|
||||
|
||||
result = 0;
|
||||
goto end;
|
||||
|
Loading…
Reference in New Issue
Block a user