MEDIUM: stats: avoid 1 indirection by storing the shared stats directly in counters struct

Between 3.2 and 3.3-dev we noticed a noticeable performance regression
due to stats handling. After bisecting, Willy found out that recent
work to split stats computing accross multiple thread groups (stats
sharding) was responsible for that performance regression. We're looking
at roughly 20% performance loss.

More precisely, it is the added indirections, multiplied by the number
of statistics that are updated for each request, which in the end causes
a significant amount of time being spent resolving pointers.

We noticed that the fe_counters_shared and be_counters_shared structures
which are currently allocated in dedicated memory since a0dcab5c
("MAJOR: counters: add shared counters base infrastructure")
are no longer huge since 16eb0fab31 ("MAJOR: counters: dispatch counters
over thread groups") because they now essentially hold flags plus the
per-thread group id pointer mapping, not the counters themselves.

As such we decided to try merging fe_counters_shared and
be_counters_shared in their parent structures. The cost is slight memory
overhead for the parent structure, but it allows to get rid of one
pointer indirection. This patch alone yields visible performance gains
and almost restores 3.2 stats performance.

counters_fe_shared_get() was renamed to counters_fe_shared_prepare() and
now returns either failure or success instead of a pointer because we
don't need to retrieve a shared pointer anymore, the function takes care
of initializing existing pointer.
This commit is contained in:
Aurelien DARRAGON 2025-07-22 17:15:02 +02:00
parent 31adfb6c15
commit 75e480d107
29 changed files with 342 additions and 351 deletions

View File

@ -86,7 +86,7 @@ static inline int be_usable_srv(struct proxy *be)
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be

View File

@ -92,7 +92,7 @@ struct fe_counters_shared {
};
struct fe_counters {
struct fe_counters_shared *shared; /* shared counters */
struct fe_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
unsigned int cps_max; /* maximum of new connections received per second */
@ -145,7 +145,7 @@ struct be_counters_shared {
/* counters used by servers and backends */
struct be_counters {
struct be_counters_shared *shared; /* shared counters */
struct be_counters_shared shared; /* shared counters */
unsigned int conn_max; /* max # of active sessions */
unsigned int cps_max; /* maximum of new connections received per second */

View File

@ -27,8 +27,8 @@
#include <haproxy/counters-t.h>
#include <haproxy/guid-t.h>
struct fe_counters_shared *counters_fe_shared_get(const struct guid_node *guid);
struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid);
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
int counters_be_shared_init(struct be_counters_shared *counters, const struct guid_node *guid);
void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters);

View File

@ -135,10 +135,10 @@ static inline void proxy_reset_timeouts(struct proxy *proxy)
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_conn);
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->conn_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
}
@ -147,10 +147,10 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess);
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
}
@ -162,19 +162,19 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
unsigned int http_ver)
{
if (http_ver == 0 ||
http_ver > sizeof(fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver))
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
return;
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
_HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->cum_sess);
update_freq_ctr(&be->be_counters.shared->tg[tgid - 1]->sess_per_sec, 1);
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
@ -186,13 +186,13 @@ static inline void proxy_inc_be_ctr(struct proxy *be)
static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
unsigned int http_ver)
{
if (http_ver >= sizeof(fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req))
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
return;
_HA_ATOMIC_INC(&fe->fe_counters.shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
if (l && l->counters)
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared->tg[tgid - 1]->req_per_sec, 1);
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
}

View File

@ -182,8 +182,8 @@ const struct mux_ops *srv_get_ws_proto(struct server *srv);
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
_HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->cum_sess);
update_freq_ctr(&s->counters.shared->tg[tgid - 1]->sess_per_sec, 1);
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&s->counters.sps_max,
update_freq_ctr(&s->counters._sess_per_sec, 1));
}
@ -191,7 +191,7 @@ static inline void srv_inc_sess_ctr(struct server *s)
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* returns the current server throttle rate between 0 and 100% */

View File

@ -362,8 +362,8 @@ static inline void stream_choose_redispatch(struct stream *s)
s->scb->state = SC_ST_REQ;
} else {
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->retries);
s->scb->state = SC_ST_ASS;
}

View File

@ -825,8 +825,8 @@ int assign_server(struct stream *s)
goto out;
}
else if (srv != prev_srv) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cum_lbconn);
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cum_lbconn);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cum_lbconn);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cum_lbconn);
}
s->target = &srv->obj_type;
}
@ -1000,11 +1000,11 @@ int assign_server_and_queue(struct stream *s)
s->txn->flags |= TX_CK_DOWN;
}
s->flags |= SF_REDISP;
_HA_ATOMIC_INC(&prev_srv->counters.shared->tg[tgid - 1]->redispatches);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->redispatches);
_HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->redispatches);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->redispatches);
} else {
_HA_ATOMIC_INC(&prev_srv->counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&prev_srv->counters.shared.tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->retries);
}
}
}
@ -2091,13 +2091,13 @@ int connect_server(struct stream *s)
s->scb->flags |= SC_FL_NOLINGER;
if (s->flags & SF_SRV_REUSED) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->reuse);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->reuse);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->reuse);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->reuse);
} else {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->connect);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->connect);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->connect);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->connect);
}
err = do_connect_server(s, srv_conn);
@ -2286,8 +2286,8 @@ int srv_redispatch_connect(struct stream *s)
s->conn_err_type = STRM_ET_QUEUE_ERR;
}
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_conns);
return 1;
case SRV_STATUS_NOSRV:
@ -2296,7 +2296,7 @@ int srv_redispatch_connect(struct stream *s)
s->conn_err_type = STRM_ET_CONN_ERR;
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_conns);
return 1;
case SRV_STATUS_QUEUED:
@ -2325,8 +2325,8 @@ int srv_redispatch_connect(struct stream *s)
if (srv)
srv_set_sess_last(srv);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_conns);
/* release other streams waiting for this server */
if (may_dequeue_tasks(srv, s->be))
@ -2400,8 +2400,8 @@ void back_try_conn_req(struct stream *s)
if (srv)
srv_set_sess_last(srv);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_conns);
/* release other streams waiting for this server */
sess_change_server(s, NULL);
@ -2467,8 +2467,8 @@ void back_try_conn_req(struct stream *s)
pendconn_cond_unlink(s->pend_pos);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_conns);
sc_abort(sc);
sc_shutdown(sc);
req->flags |= CF_WRITE_TIMEOUT;
@ -2723,8 +2723,8 @@ void back_handle_st_cer(struct stream *s)
}
if (objt_server(s->target))
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_conns);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_conns);
sess_change_server(s, NULL);
if (may_dequeue_tasks(objt_server(s->target), s->be))
process_srv_queue(objt_server(s->target));
@ -2756,8 +2756,8 @@ void back_handle_st_cer(struct stream *s)
s->conn_err_type = STRM_ET_CONN_OTHER;
if (objt_server(s->target))
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&objt_server(s->target)->counters.shared.tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
sess_change_server(s, NULL);
if (may_dequeue_tasks(objt_server(s->target), s->be))
process_srv_queue(objt_server(s->target));
@ -2895,8 +2895,8 @@ void back_handle_st_rdy(struct stream *s)
void set_backend_down(struct proxy *be)
{
be->last_change = ns_to_sec(now_ns);
HA_ATOMIC_STORE(&be->be_counters.shared->tg[tgid - 1]->last_state_change, be->last_change);
_HA_ATOMIC_INC(&be->be_counters.shared->tg[tgid - 1]->down_trans);
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_state_change, be->last_change);
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->down_trans);
if (!(global.mode & MODE_STARTING)) {
ha_alert("%s '%s' has no server available!\n", proxy_type_str(be), be->id);
@ -3409,7 +3409,7 @@ smp_fetch_be_sess_rate(const struct arg *args, struct sample *smp, const char *k
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, sess_per_sec, read_freq_ctr);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->be_counters.shared.tg, sess_per_sec, read_freq_ctr);
return 1;
}
@ -3592,7 +3592,7 @@ smp_fetch_srv_sess_rate(const struct arg *args, struct sample *smp, const char *
{
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = COUNTERS_SHARED_TOTAL(args->data.srv->counters.shared->tg, sess_per_sec, read_freq_ctr);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(args->data.srv->counters.shared.tg, sess_per_sec, read_freq_ctr);
return 1;
}

View File

@ -2133,9 +2133,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
return ACT_RET_CONT;
if (px == strm_fe(s))
_HA_ATOMIC_INC(&px->fe_counters.shared->tg[tgid - 1]->p.http.cache_lookups);
_HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
else
_HA_ATOMIC_INC(&px->be_counters.shared->tg[tgid - 1]->p.http.cache_lookups);
_HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_lookups);
cache_tree = get_cache_tree_from_hash(cache, read_u32(s->txn->cache_hash));
@ -2222,9 +2222,9 @@ enum act_return http_action_req_cache_use(struct act_rule *rule, struct proxy *p
should_send_notmodified_response(cache, htxbuf(&s->req.buf), res);
if (px == strm_fe(s))
_HA_ATOMIC_INC(&px->fe_counters.shared->tg[tgid - 1]->p.http.cache_hits);
_HA_ATOMIC_INC(&px->fe_counters.shared.tg[tgid - 1]->p.http.cache_hits);
else
_HA_ATOMIC_INC(&px->be_counters.shared->tg[tgid - 1]->p.http.cache_hits);
_HA_ATOMIC_INC(&px->be_counters.shared.tg[tgid - 1]->p.http.cache_hits);
return ACT_RET_CONT;
} else {
s->target = NULL;

View File

@ -4281,8 +4281,7 @@ int check_config_validity()
if (curproxy->options2 & PR_O2_SOCKSTAT) {
listener->counters = calloc(1, sizeof(*listener->counters));
if (listener->counters) {
listener->counters->shared = counters_fe_shared_get(&listener->guid);
if (!listener->counters->shared) {
if (!counters_fe_shared_prepare(&listener->counters->shared, &listener->guid)) {
ha_free(&listener->counters);
ha_alert("config: %s '%s': out of memory.\n",
proxy_type_str(curproxy), curproxy->id);

View File

@ -513,7 +513,7 @@ void set_server_check_status(struct check *check, short status, const char *desc
if ((!(check->state & CHK_ST_AGENT) ||
(check->status >= HCHK_STATUS_L57DATA)) &&
(check->health > 0)) {
_HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->failed_checks);
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_checks);
report = 1;
check->health--;
if (check->health < check->rise)
@ -740,7 +740,7 @@ void __health_adjust(struct server *s, short status)
HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
HA_ATOMIC_STORE(&s->consecutive_errors, 0);
_HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->failed_hana);
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->failed_hana);
if (s->check.fastinter) {
/* timer might need to be advanced, it might also already be

View File

@ -38,7 +38,6 @@ static void _counters_shared_drop(void *counters)
free(shared->tg[it]);
it += 1;
}
free(counters);
}
/* release a shared fe counters struct */
@ -58,24 +57,20 @@ void counters_be_shared_drop(struct be_counters_shared *counters)
* if <guid> is not set, then sharing is disabled
* Returns the pointer on success or NULL on failure
*/
static void*_counters_shared_get(const struct guid_node *guid, size_t size)
static int _counters_shared_init(struct counters_shared *shared, const struct guid_node *guid, size_t size)
{
struct counters_shared *shared;
int it = 0;
/* no shared memory for now, simply allocate a memory block
* for the counters (zero-initialized), ignore guid
*/
shared = calloc(1, sizeof(*shared));
if (!shared)
return NULL;
if (!guid->node.key)
shared->flags |= COUNTERS_SHARED_F_LOCAL;
while (it < global.nbtgroups) {
shared->tg[it] = calloc(1, size);
if (!shared->tg[it]) {
_counters_shared_drop(shared);
return NULL;
return 0;
}
it += 1;
}
@ -84,17 +79,17 @@ static void*_counters_shared_get(const struct guid_node *guid, size_t size)
* only set one group, only latest value is considered
*/
HA_ATOMIC_STORE(&shared->tg[0]->last_state_change, ns_to_sec(now_ns));
return shared;
return 1;
}
/* retrieve shared fe counters pointer for a given <guid> object */
struct fe_counters_shared *counters_fe_shared_get(const struct guid_node *guid)
/* prepare shared fe counters pointer for a given <guid> object */
int counters_fe_shared_prepare(struct fe_counters_shared *shared, const struct guid_node *guid)
{
return _counters_shared_get(guid, sizeof(struct fe_counters_shared_tg));
return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg));
}
/* retrieve shared be counters pointer for a given <guid> object */
struct be_counters_shared *counters_be_shared_get(const struct guid_node *guid)
/* prepare shared be counters pointer for a given <guid> object */
int counters_be_shared_init(struct be_counters_shared *shared, const struct guid_node *guid)
{
return _counters_shared_get(guid, sizeof(struct be_counters_shared_tg));
return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg));
}

View File

@ -446,12 +446,12 @@ static int fcgi_flt_http_headers(struct stream *s, struct filter *filter, struct
goto end;
rewrite_err:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
hdr_rule_err:
node = ebpt_first(&hdr_rules);
while (node) {

View File

@ -393,14 +393,14 @@ comp_http_payload(struct stream *s, struct filter *filter, struct http_msg *msg,
if (st->comp_ctx[dir] && st->comp_ctx[dir]->cur_lvl > 0) {
update_freq_ctr(&global.comp_bps_in, consumed);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_in[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_in[dir], consumed);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared.tg[tgid - 1]->comp_in[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared.tg[tgid - 1]->comp_in[dir], consumed);
update_freq_ctr(&global.comp_bps_out, to_forward);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_out[dir], to_forward);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_out[dir], to_forward);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared.tg[tgid - 1]->comp_out[dir], to_forward);
_HA_ATOMIC_ADD(&s->be->be_counters.shared.tg[tgid - 1]->comp_out[dir], to_forward);
} else {
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->comp_byp[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->comp_byp[dir], consumed);
_HA_ATOMIC_ADD(&strm_fe(s)->fe_counters.shared.tg[tgid - 1]->comp_byp[dir], consumed);
_HA_ATOMIC_ADD(&s->be->be_counters.shared.tg[tgid - 1]->comp_byp[dir], consumed);
}
return to_forward;
@ -419,9 +419,9 @@ comp_http_end(struct stream *s, struct filter *filter,
goto end;
if (strm_fe(s)->mode == PR_MODE_HTTP)
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->p.http.comp_rsp);
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared.tg[tgid - 1]->p.http.comp_rsp);
if ((s->flags & SF_BE_ASSIGNED) && (s->be->mode == PR_MODE_HTTP))
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.comp_rsp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->p.http.comp_rsp);
end:
return 1;
}

View File

@ -261,7 +261,7 @@ smp_fetch_fe_req_rate(const struct arg *args, struct sample *smp, const char *kw
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, req_per_sec, read_freq_ctr);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared.tg, req_per_sec, read_freq_ctr);
return 1;
}
@ -281,7 +281,7 @@ smp_fetch_fe_sess_rate(const struct arg *args, struct sample *smp, const char *k
smp->flags = SMP_F_VOL_TEST;
smp->data.type = SMP_T_SINT;
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, sess_per_sec, read_freq_ctr);
smp->data.u.sint = COUNTERS_SHARED_TOTAL(px->fe_counters.shared.tg, sess_per_sec, read_freq_ctr);
return 1;
}

View File

@ -819,7 +819,7 @@ static void sig_dump_state(struct sig_handler *sh)
"SIGHUP: Server %s/%s is %s. Conn: %d act, %d pend, %llu tot.",
p->id, s->id,
(s->cur_state != SRV_ST_STOPPED) ? "UP" : "DOWN",
s->cur_sess, s->queueslength, (ullong)COUNTERS_SHARED_TOTAL(s->counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
s->cur_sess, s->queueslength, (ullong)COUNTERS_SHARED_TOTAL(s->counters.shared.tg, cum_sess, HA_ATOMIC_LOAD));
ha_warning("%s\n", trash.area);
send_log(p, LOG_NOTICE, "%s\n", trash.area);
s = s->next;
@ -830,19 +830,19 @@ static void sig_dump_state(struct sig_handler *sh)
chunk_printf(&trash,
"SIGHUP: Proxy %s has no servers. Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id,
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared.tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared.tg, cum_sess, HA_ATOMIC_LOAD));
} else if (p->srv_act == 0) {
chunk_printf(&trash,
"SIGHUP: Proxy %s %s ! Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id,
(p->srv_bck) ? "is running on backup servers" : "has no server available",
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared.tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared.tg, cum_sess, HA_ATOMIC_LOAD));
} else {
chunk_printf(&trash,
"SIGHUP: Proxy %s has %d active servers and %d backup servers available."
" Conn: act(FE+BE): %d+%d, %d pend (%d unass), tot(FE+BE): %llu+%llu.",
p->id, p->srv_act, p->srv_bck,
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD));
p->feconn, p->beconn, p->totpend, p->queueslength, (ullong)COUNTERS_SHARED_TOTAL(p->fe_counters.shared.tg, cum_conn, HA_ATOMIC_LOAD), (ullong)COUNTERS_SHARED_TOTAL(p->be_counters.shared.tg, cum_sess, HA_ATOMIC_LOAD));
}
ha_warning("%s\n", trash.area);
send_log(p, LOG_NOTICE, "%s\n", trash.area);

View File

@ -8922,7 +8922,7 @@ __LJMP static int hlua_txn_done(lua_State *L)
/* let's log the request time */
s->logs.request_ts = now_ns;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
}
done:

View File

@ -116,13 +116,13 @@ static enum act_return http_action_set_req_line(struct act_rule *rule, struct pr
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -386,13 +386,13 @@ static enum act_return http_action_normalize_uri(struct act_rule *rule, struct p
goto leave;
fail_rewrite:
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites, 1);
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites, 1);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites, 1);
_HA_ATOMIC_ADD(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites, 1);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites, 1);
_HA_ATOMIC_ADD(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites, 1);
if (objt_server(s->target))
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites, 1);
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites, 1);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -562,13 +562,13 @@ static enum act_return http_action_replace_uri(struct act_rule *rule, struct pro
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -642,13 +642,13 @@ static enum act_return action_http_set_status(struct act_rule *rule, struct prox
struct session *sess, struct stream *s, int flags)
{
if (http_res_set_status(rule->arg.http.i, rule->arg.http.str, s) == -1) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
if (!(s->txn->req.flags & HTTP_MSGF_SOFT_RW)) {
if (!(s->flags & SF_ERR_MASK))
@ -717,10 +717,10 @@ static enum act_return http_action_reject(struct act_rule *rule, struct proxy *p
s->req.analysers &= AN_REQ_FLT_END;
s->res.analysers &= AN_RES_FLT_END;
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_req);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
@ -1281,7 +1281,7 @@ static enum act_return http_action_auth(struct act_rule *rule, struct proxy *px,
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_LOCAL;
@ -1449,13 +1449,13 @@ static enum act_return http_action_set_header(struct act_rule *rule, struct prox
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -1581,13 +1581,13 @@ static enum act_return http_action_replace_header(struct act_rule *rule, struct
goto leave;
fail_rewrite:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
@ -2319,7 +2319,7 @@ static enum act_return http_action_return(struct act_rule *rule, struct proxy *p
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
}
return ACT_RET_ABRT;

View File

@ -233,7 +233,7 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
struct acl_cond *cond;
s->flags |= SF_MONITOR;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
/* Check if we want to fail this monitor request or not */
list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
@ -342,17 +342,17 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_prx_cond;
return_bad_req:
txn->status = 400;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scb, strm_tevt_type_proto_err);
/* fall through */
@ -486,7 +486,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
/* Proceed with the applets now. */
if (unlikely(objt_applet(s->target))) {
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
if (http_handle_expect_hdr(s, htx, msg) == -1)
goto return_int_err;
@ -562,11 +562,11 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
if (!req->analyse_exp)
req->analyse_exp = tick_add(now_ms, 0);
stream_inc_http_err_ctr(s);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_req);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto done_without_exp;
@ -579,43 +579,43 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
s->logs.request_ts = now_ns;
stream_inc_http_err_ctr(s);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_req);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto return_prx_err;
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto return_prx_err;
return_bad_req:
txn->status = 400;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
/* fall through */
@ -748,24 +748,24 @@ int http_process_request(struct stream *s, struct channel *req, int an_bit)
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
http_set_term_flags(s);
@ -871,19 +871,19 @@ int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
if (s->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto return_prx_err;
return_bad_req: /* let's centralize all bad requests */
txn->status = 400;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
/* fall through */
@ -1100,24 +1100,24 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
return 0;
return_cli_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((req->flags & CF_READ_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
status = 400;
goto return_prx_cond;
return_srv_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->srv_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->srv_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((req->flags & CF_WRITE_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
status = 502;
@ -1126,20 +1126,20 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
return_int_err:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
status = 500;
goto return_prx_cond;
return_bad_req:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
status = 400;
/* fall through */
@ -1173,9 +1173,9 @@ static __inline int do_l7_retry(struct stream *s, struct stconn *sc)
s->flags &= ~SF_CURR_SESS;
_HA_ATOMIC_DEC(&__objt_server(s->target)->cur_sess);
}
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->retries);
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->retries);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->retries);
req = &s->req;
res = &s->res;
@ -1292,9 +1292,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
/* if the server refused the early data, just send a 425 */
if (conn && conn->err_code == CO_ER_SSL_EARLY_FAILED)
@ -1329,9 +1329,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 0;
}
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
txn->status = 504;
stream_inc_http_fail_ctr(s);
@ -1350,12 +1350,12 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
/* 3: client abort with an abortonclose */
else if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->scb->flags & SC_FL_SHUT_DONE) &&
(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->cli_aborts);
txn->status = 400;
@ -1388,9 +1388,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
txn->status = 502;
stream_inc_http_fail_ctr(s);
@ -1411,9 +1411,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (s->flags & SF_SRV_REUSED)
goto abort_keep_alive;
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
rep->analysers &= AN_RES_FLT_END;
if (!(s->flags & SF_ERR_MASK))
@ -1517,8 +1517,8 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (n < 1 || n > 5)
n = 0;
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->p.http.cum_req);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->p.http.cum_req);
}
/*
@ -1662,12 +1662,12 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 1;
return_int_err:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->internal_errors);
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
@ -1683,9 +1683,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 0;
}
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
txn->status = 502;
stream_inc_http_fail_ctr(s);
@ -1982,36 +1982,36 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
return 1;
deny:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->denied_resp);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->denied_resp);
stream_report_term_evt(s->scb, strm_tevt_type_intercepted);
goto return_prx_err;
return_fail_rewrite:
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_rewrites);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_rewrites);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_rewrites);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_rewrites);
/* fall through */
return_int_err:
txn->status = 500;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->internal_errors);
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_prx_err;
@ -2019,9 +2019,9 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
s->logs.t_data = -1; /* was not a valid response */
txn->status = 502;
stream_inc_http_fail_ctr(s);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target)) {
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
}
stream_report_term_evt(s->scb, strm_tevt_type_proto_err);
@ -2251,44 +2251,44 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
return 0;
return_srv_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->srv_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->srv_aborts);
stream_inc_http_fail_ctr(s);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((res->flags & CF_READ_TIMEOUT) ? SF_ERR_SRVTO : SF_ERR_SRVCL);
goto return_error;
return_cli_abort:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= ((res->flags & CF_WRITE_TIMEOUT) ? SF_ERR_CLITO : SF_ERR_CLICL);
goto return_error;
return_int_err:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scb, strm_tevt_type_internal_err);
goto return_error;
return_bad_res:
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target)) {
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
}
stream_inc_http_fail_ctr(s);
@ -2571,7 +2571,7 @@ int http_apply_redirect_rule(struct redirect_rule *rule, struct stream *s, struc
req->analysers &= AN_REQ_FLT_END;
if (s->sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
}
out:
@ -4282,9 +4282,9 @@ enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
txn->status = 408;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLITO;
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
goto abort;
abort_res:

View File

@ -1094,13 +1094,13 @@ void listener_accept(struct listener *l)
int it;
for (it = 0; it < global.nbtgroups; it++)
max += freq_ctr_remain(&p->fe_counters.shared->tg[it]->sess_per_sec, p->fe_sps_lim, 0);
max += freq_ctr_remain(&p->fe_counters.shared.tg[it]->sess_per_sec, p->fe_sps_lim, 0);
if (unlikely(!max)) {
unsigned int min_wait = 0;
for (it = 0; it < global.nbtgroups; it++) {
unsigned int cur_wait = next_event_delay(&p->fe_counters.shared->tg[it]->sess_per_sec, p->fe_sps_lim, 0);
unsigned int cur_wait = next_event_delay(&p->fe_counters.shared.tg[it]->sess_per_sec, p->fe_sps_lim, 0);
if (!it || cur_wait < min_wait)
min_wait = cur_wait;
}
@ -1586,7 +1586,7 @@ void listener_accept(struct listener *l)
dequeue_all_listeners();
if (p && !MT_LIST_ISEMPTY(&p->listener_queue) &&
(!p->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared->tg, sess_per_sec, freq_ctr_remain, p->fe_sps_lim, 0) > 0))
(!p->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared.tg, sess_per_sec, freq_ctr_remain, p->fe_sps_lim, 0) > 0))
dequeue_proxy_listeners(p, 0);
}
return;
@ -1645,14 +1645,14 @@ void listener_release(struct listener *l)
dequeue_all_listeners();
if (fe && !MT_LIST_ISEMPTY(&fe->listener_queue) &&
(!fe->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared->tg, sess_per_sec, freq_ctr_remain, fe->fe_sps_lim, 0) > 0))
(!fe->fe_sps_lim || COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared.tg, sess_per_sec, freq_ctr_remain, fe->fe_sps_lim, 0) > 0))
dequeue_proxy_listeners(fe, 0);
else if (fe) {
unsigned int wait;
int expire = TICK_ETERNITY;
if (fe->task && fe->fe_sps_lim &&
(wait = COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared->tg, sess_per_sec, next_event_delay, fe->fe_sps_lim, 0))) {
(wait = COUNTERS_SHARED_TOTAL_ARG2(fe->fe_counters.shared.tg, sess_per_sec, next_event_delay, fe->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which
* means in 1 ms before estimated expiration date, because the

View File

@ -5947,15 +5947,15 @@ static void syslog_io_handler(struct appctx *appctx)
parse_error:
if (l->counters)
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&frontend->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&frontend->fe_counters.shared.tg[tgid - 1]->failed_req);
goto error;
cli_abort:
if (l->counters)
_HA_ATOMIC_INC(&l->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&frontend->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&frontend->fe_counters.shared.tg[tgid - 1]->cli_aborts);
error:
applet_set_eos(appctx);

View File

@ -3735,10 +3735,10 @@ static int h1_handle_internal_err(struct h1c *h1c)
}
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[5]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->p.http.rsp[5]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
h1c->errcode = 500;
ret = h1_send_error(h1c);
@ -3771,10 +3771,10 @@ static int h1_handle_parsing_error(struct h1c *h1c)
session_inc_http_req_ctr(sess);
session_inc_http_err_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
if (!h1c->errcode)
h1c->errcode = 400;
@ -3808,10 +3808,10 @@ static int h1_handle_not_impl_err(struct h1c *h1c)
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
h1c->errcode = 501;
ret = h1_send_error(h1c);
@ -3843,10 +3843,10 @@ static int h1_handle_req_tout(struct h1c *h1c)
session_inc_http_req_ctr(sess);
proxy_inc_fe_req_ctr(sess->listener, sess->fe, 1);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->p.http.rsp[4]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
h1c->errcode = 408;
ret = h1_send_error(h1c);

View File

@ -222,8 +222,8 @@ static inline void proxy_free_common(struct proxy *px)
ha_free(&px->id);
LIST_DEL_INIT(&px->global_list);
drop_file_name(&px->conf.file);
counters_fe_shared_drop(px->fe_counters.shared);
counters_be_shared_drop(px->be_counters.shared);
counters_fe_shared_drop(&px->fe_counters.shared);
counters_be_shared_drop(&px->be_counters.shared);
ha_free(&px->check_command);
ha_free(&px->check_path);
ha_free(&px->cookie_name);
@ -402,7 +402,7 @@ void deinit_proxy(struct proxy *p)
free(l->label);
free(l->per_thr);
if (l->counters) {
counters_fe_shared_drop(l->counters->shared);
counters_fe_shared_drop(&l->counters->shared);
free(l->counters);
}
task_destroy(l->rx.rhttp.task);
@ -1710,8 +1710,8 @@ int setup_new_proxy(struct proxy *px, const char *name, unsigned int cap, char *
ha_free(&px->defsrv);
ha_free(&px->id);
counters_fe_shared_drop(px->fe_counters.shared);
counters_be_shared_drop(px->be_counters.shared);
counters_fe_shared_drop(&px->fe_counters.shared);
counters_be_shared_drop(&px->be_counters.shared);
return 0;
}
@ -1756,8 +1756,7 @@ static int proxy_postcheck(struct proxy *px)
* proxy postparsing, see proxy_postparse()
*/
if (px->cap & PR_CAP_FE) {
px->fe_counters.shared = counters_fe_shared_get(&px->guid);
if (!px->fe_counters.shared) {
if (!counters_fe_shared_prepare(&px->fe_counters.shared, &px->guid)) {
ha_alert("out of memory while setting up shared counters for %s %s\n",
proxy_type_str(px), px->id);
err_code |= ERR_ALERT | ERR_FATAL;
@ -1769,8 +1768,7 @@ static int proxy_postcheck(struct proxy *px)
* be_counters may be used even if the proxy lacks the backend
* capability
*/
px->be_counters.shared = counters_be_shared_get(&px->guid);
if (!px->be_counters.shared) {
if (!counters_be_shared_init(&px->be_counters.shared, &px->guid)) {
ha_alert("out of memory while setting up shared counters for %s %s\n",
proxy_type_str(px), px->id);
err_code |= ERR_ALERT | ERR_FATAL;
@ -2124,9 +2122,9 @@ void proxy_cond_disable(struct proxy *p)
* the data plane but on the control plane.
*/
if (p->cap & PR_CAP_FE)
cum_conn = COUNTERS_SHARED_TOTAL(p->fe_counters.shared->tg, cum_conn, HA_ATOMIC_LOAD);
cum_conn = COUNTERS_SHARED_TOTAL(p->fe_counters.shared.tg, cum_conn, HA_ATOMIC_LOAD);
if (p->cap & PR_CAP_BE)
cum_sess = COUNTERS_SHARED_TOTAL(p->be_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD);
cum_sess = COUNTERS_SHARED_TOTAL(p->be_counters.shared.tg, cum_sess, HA_ATOMIC_LOAD);
if ((p->mode == PR_MODE_TCP || p->mode == PR_MODE_HTTP || p->mode == PR_MODE_SYSLOG || p->mode == PR_MODE_SPOP) && !(p->cap & PR_CAP_INT))
ha_warning("Proxy %s stopped (cumulated conns: FE: %lld, BE: %lld).\n",
@ -2221,7 +2219,7 @@ struct task *manage_proxy(struct task *t, void *context, unsigned int state)
goto out;
if (p->fe_sps_lim &&
(wait = COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared->tg, sess_per_sec, next_event_delay, p->fe_sps_lim, 0))) {
(wait = COUNTERS_SHARED_TOTAL_ARG2(p->fe_counters.shared.tg, sess_per_sec, next_event_delay, p->fe_sps_lim, 0))) {
/* we're blocking because a limit was reached on the number of
* requests/s on the frontend. We want to re-check ASAP, which

View File

@ -3131,7 +3131,7 @@ void srv_free_params(struct server *srv)
free(srv->tcp_md5sig);
free(srv->addr_node.key);
free(srv->lb_nodes);
counters_be_shared_drop(srv->counters.shared);
counters_be_shared_drop(&srv->counters.shared);
if (srv->log_target) {
deinit_log_target(srv->log_target);
free(srv->log_target);
@ -3450,8 +3450,7 @@ int srv_init(struct server *srv)
if (err_code & ERR_CODE)
goto out;
srv->counters.shared = counters_be_shared_get(&srv->guid);
if (!srv->counters.shared) {
if (!counters_be_shared_init(&srv->counters.shared, &srv->guid)) {
ha_alert("memory error while setting up shared counters for %s/%s server\n", srv->proxy->id, srv->id);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
@ -7115,7 +7114,7 @@ static void srv_update_status(struct server *s, int type, int cause)
}
else if (s->cur_state == SRV_ST_STOPPED) {
/* server was up and is currently down */
HA_ATOMIC_INC(&s->counters.shared->tg[tgid - 1]->down_trans);
HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->down_trans);
_srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, cb_data.common, s);
}
@ -7127,7 +7126,7 @@ static void srv_update_status(struct server *s, int type, int cause)
HA_ATOMIC_STORE(&s->proxy->ready_srv, NULL);
s->last_change = ns_to_sec(now_ns);
HA_ATOMIC_STORE(&s->counters.shared->tg[tgid - 1]->last_state_change, s->last_change);
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_state_change, s->last_change);
/* publish the state change */
_srv_event_hdl_prepare_state(&cb_data.state,
@ -7147,7 +7146,7 @@ static void srv_update_status(struct server *s, int type, int cause)
if (last_change < ns_to_sec(now_ns)) // ignore negative times
s->proxy->down_time += ns_to_sec(now_ns) - last_change;
s->proxy->last_change = ns_to_sec(now_ns);
HA_ATOMIC_STORE(&s->proxy->be_counters.shared->tg[tgid - 1]->last_state_change, s->proxy->last_change);
HA_ATOMIC_STORE(&s->proxy->be_counters.shared.tg[tgid - 1]->last_state_change, s->proxy->last_change);
}
}

View File

@ -322,7 +322,7 @@ static void srv_state_srv_update(struct server *srv, int version, char **params)
}
srv->last_change = ns_to_sec(now_ns) - srv_last_time_change;
HA_ATOMIC_STORE(&srv->counters.shared->tg[0]->last_state_change, srv->last_change);
HA_ATOMIC_STORE(&srv->counters.shared.tg[0]->last_state_change, srv->last_change);
srv->check.status = srv_check_status;
srv->check.result = srv_check_result;

View File

@ -281,7 +281,7 @@ static int parse_stat_line(struct ist line,
if (!(px->cap & PR_CAP_FE))
return 0; /* silently ignored fe/be mismatch */
base_off_shared = (char *)px->fe_counters.shared->tg[0];
base_off_shared = (char *)px->fe_counters.shared.tg[0];
base_off = (char *)&px->fe_counters;
off = 0;
@ -290,7 +290,7 @@ static int parse_stat_line(struct ist line,
if (!(px->cap & PR_CAP_BE))
return 0; /* silently ignored fe/be mismatch */
base_off_shared = (char *)px->be_counters.shared->tg[0];
base_off_shared = (char *)px->be_counters.shared.tg[0];
base_off = (char *)&px->be_counters;
off = 1;
@ -310,7 +310,7 @@ static int parse_stat_line(struct ist line,
if (!li->counters)
return 0;
base_off_shared = (char *)li->counters->shared->tg[0];
base_off_shared = (char *)li->counters->shared.tg[0];
base_off = (char *)li->counters;
off = 0;
@ -321,7 +321,7 @@ static int parse_stat_line(struct ist line,
goto err;
srv = __objt_server(node->obj_type);
base_off_shared = (char *)srv->counters.shared->tg[0];
base_off_shared = (char *)srv->counters.shared.tg[0];
base_off = (char *)&srv->counters;
off = 1;

View File

@ -246,9 +246,9 @@ static int stcol_hide(enum stat_idx_px idx, enum obj_type *objt)
case ST_I_PX_LASTSESS:
if (srv)
return !COUNTERS_SHARED_LAST(srv->counters.shared->tg, last_sess);
return !COUNTERS_SHARED_LAST(srv->counters.shared.tg, last_sess);
else if (px)
return !COUNTERS_SHARED_LAST(px->be_counters.shared->tg, last_sess);
return !COUNTERS_SHARED_LAST(px->be_counters.shared.tg, last_sess);
else
return 0;
@ -284,7 +284,7 @@ static struct field me_generate_field(const struct stat_col *col,
case STATS_PX_CAP_FE:
case STATS_PX_CAP_LI:
if (col->flags & STAT_COL_FL_SHARED) {
counter = (char *)&((struct fe_counters *)counters)->shared->tg;
counter = (char *)&((struct fe_counters *)counters)->shared.tg;
offset = col->metric.offset[0];
}
else
@ -295,7 +295,7 @@ static struct field me_generate_field(const struct stat_col *col,
case STATS_PX_CAP_BE:
case STATS_PX_CAP_SRV:
if (col->flags & STAT_COL_FL_SHARED) {
counter = (char *)&((struct be_counters *)counters)->shared->tg;
counter = (char *)&((struct be_counters *)counters)->shared.tg;
offset = col->metric.offset[1];
}
else
@ -315,13 +315,13 @@ static struct field me_generate_field(const struct stat_col *col,
if (idx == ST_I_PX_REQ_TOT && cap == STATS_PX_CAP_FE && !stat_file) {
struct proxy *px = __objt_proxy(objt);
const size_t nb_reqs =
sizeof(px->fe_counters.shared->tg[0]->p.http.cum_req) /
sizeof(*px->fe_counters.shared->tg[0]->p.http.cum_req);
sizeof(px->fe_counters.shared.tg[0]->p.http.cum_req) /
sizeof(*px->fe_counters.shared.tg[0]->p.http.cum_req);
uint64_t total_req = 0;
int i;
for (i = 0; i < nb_reqs; i++)
total_req += COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, p.http.cum_req[i], HA_ATOMIC_LOAD);
total_req += COUNTERS_SHARED_TOTAL(px->fe_counters.shared.tg, p.http.cum_req[i], HA_ATOMIC_LOAD);
return mkf_u64(FN_COUNTER, total_req);
}
@ -488,11 +488,11 @@ int stats_fill_fe_line(struct proxy *px, int flags, struct field *line, int len,
int i;
uint64_t total_sess;
size_t nb_sess =
sizeof(px->fe_counters.shared->tg[0]->cum_sess_ver) / sizeof(*px->fe_counters.shared->tg[0]->cum_sess_ver);
sizeof(px->fe_counters.shared.tg[0]->cum_sess_ver) / sizeof(*px->fe_counters.shared.tg[0]->cum_sess_ver);
total_sess = COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, cum_sess, HA_ATOMIC_LOAD);
total_sess = COUNTERS_SHARED_TOTAL(px->fe_counters.shared.tg, cum_sess, HA_ATOMIC_LOAD);
for (i = 0; i < nb_sess; i++)
total_sess -= COUNTERS_SHARED_TOTAL(px->fe_counters.shared->tg, cum_sess_ver[i], HA_ATOMIC_LOAD);
total_sess -= COUNTERS_SHARED_TOTAL(px->fe_counters.shared.tg, cum_sess_ver[i], HA_ATOMIC_LOAD);
total_sess = (int64_t)total_sess < 0 ? 0 : total_sess;
field = mkf_u64(FN_COUNTER, total_sess);
@ -828,7 +828,7 @@ int stats_fill_sv_line(struct proxy *px, struct server *sv, int flags,
if (index == NULL || *index == ST_I_PX_QTIME ||
*index == ST_I_PX_CTIME || *index == ST_I_PX_RTIME ||
*index == ST_I_PX_TTIME) {
srv_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(sv->counters.shared->tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(sv->counters.shared->tg, cum_lbconn, HA_ATOMIC_LOAD);
srv_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(sv->counters.shared.tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(sv->counters.shared.tg, cum_lbconn, HA_ATOMIC_LOAD);
if (srv_samples_counter < TIME_STATS_SAMPLES && srv_samples_counter > 0)
srv_samples_window = srv_samples_counter;
}
@ -1207,7 +1207,7 @@ int stats_fill_be_line(struct proxy *px, int flags, struct field *line, int len,
if (!index || *index == ST_I_PX_QTIME ||
*index == ST_I_PX_CTIME || *index == ST_I_PX_RTIME ||
*index == ST_I_PX_TTIME) {
be_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(px->be_counters.shared->tg, cum_lbconn, HA_ATOMIC_LOAD);
be_samples_counter = (px->mode == PR_MODE_HTTP) ? COUNTERS_SHARED_TOTAL(px->be_counters.shared.tg, p.http.cum_req, HA_ATOMIC_LOAD) : COUNTERS_SHARED_TOTAL(px->be_counters.shared.tg, cum_lbconn, HA_ATOMIC_LOAD);
if (be_samples_counter < TIME_STATS_SAMPLES && be_samples_counter > 0)
be_samples_window = be_samples_counter;
}

View File

@ -823,14 +823,14 @@ void stream_process_counters(struct stream *s)
bytes = s->req.total - s->logs.bytes_in;
s->logs.bytes_in = s->req.total;
if (bytes) {
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->bytes_in, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->bytes_in, bytes);
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared.tg[tgid - 1]->bytes_in, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared.tg[tgid - 1]->bytes_in, bytes);
if (objt_server(s->target))
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->bytes_in, bytes);
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->bytes_in, bytes);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->bytes_in, bytes);
_HA_ATOMIC_ADD(&sess->listener->counters->shared.tg[tgid - 1]->bytes_in, bytes);
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
if (!stkctr_inc_bytes_in_ctr(&s->stkctr[i], bytes))
@ -841,14 +841,14 @@ void stream_process_counters(struct stream *s)
bytes = s->res.total - s->logs.bytes_out;
s->logs.bytes_out = s->res.total;
if (bytes) {
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared->tg[tgid - 1]->bytes_out, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared->tg[tgid - 1]->bytes_out, bytes);
_HA_ATOMIC_ADD(&sess->fe->fe_counters.shared.tg[tgid - 1]->bytes_out, bytes);
_HA_ATOMIC_ADD(&s->be->be_counters.shared.tg[tgid - 1]->bytes_out, bytes);
if (objt_server(s->target))
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->bytes_out, bytes);
_HA_ATOMIC_ADD(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->bytes_out, bytes);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_ADD(&sess->listener->counters->shared->tg[tgid - 1]->bytes_out, bytes);
_HA_ATOMIC_ADD(&sess->listener->counters->shared.tg[tgid - 1]->bytes_out, bytes);
for (i = 0; i < global.tune.nb_stk_ctr; i++) {
if (!stkctr_inc_bytes_out_ctr(&s->stkctr[i], bytes))
@ -1012,9 +1012,9 @@ void sess_set_term_flags(struct stream *s)
if (!(s->flags & SF_FINST_MASK)) {
if (s->scb->state == SC_ST_INI) {
/* anything before REQ in fact */
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&strm_fe(s)->fe_counters.shared.tg[tgid - 1]->failed_req);
if (strm_li(s) && strm_li(s)->counters)
_HA_ATOMIC_INC(&strm_li(s)->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&strm_li(s)->counters->shared.tg[tgid - 1]->failed_req);
s->flags |= SF_FINST_R;
}
@ -1061,7 +1061,7 @@ enum act_return process_use_service(struct act_rule *rule, struct proxy *px,
if (rule->from != ACT_F_HTTP_REQ) {
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->intercepted_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->intercepted_req);
/* The flag SF_ASSIGNED prevent from server assignment. */
s->flags |= SF_ASSIGNED;
@ -1846,12 +1846,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
sc_shutdown(scf);
if (!(req->analysers) && !(res->analysers)) {
COUNT_IF(1, "Report a client abort (no analysers)");
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cli_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLICL;
if (!(s->flags & SF_FINST_MASK))
@ -1864,17 +1864,17 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
if (sc_state_in(scb->state, SC_SB_EST|SC_SB_DIS)) {
sc_abort(scb);
sc_shutdown(scb);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->failed_resp);
if (!(req->analysers) && !(res->analysers)) {
COUNT_IF(1, "Report a client abort (no analysers)");
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->srv_aborts);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL;
if (!(s->flags & SF_FINST_MASK))
@ -2178,32 +2178,32 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
req->analysers &= AN_REQ_FLT_END;
channel_auto_close(req);
if (scf->flags & SC_FL_ERROR) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLICL;
COUNT_IF(1, "Report unhandled client error");
}
else if (req->flags & CF_READ_TIMEOUT) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLITO;
COUNT_IF(1, "Report unhandled client timeout (RD)");
}
else {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVTO;
COUNT_IF(1, "Report unhandled server timeout (WR)");
}
@ -2227,32 +2227,32 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
res->analysers &= AN_RES_FLT_END;
channel_auto_close(res);
if (scb->flags & SC_FL_ERROR) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVCL;
COUNT_IF(1, "Report unhandled server error");
}
else if (res->flags & CF_READ_TIMEOUT) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->srv_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->srv_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->srv_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->srv_aborts);
s->flags |= SF_ERR_SRVTO;
COUNT_IF(1, "Report unhandled server timeout (RD)");
}
else {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->cli_aborts);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->cli_aborts);
if (srv)
_HA_ATOMIC_INC(&srv->counters.shared->tg[tgid - 1]->cli_aborts);
_HA_ATOMIC_INC(&srv->counters.shared.tg[tgid - 1]->cli_aborts);
s->flags |= SF_ERR_CLITO;
COUNT_IF(1, "Report unhandled client timeout (WR)");
}
@ -2623,12 +2623,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
n = 0;
if (sess->fe->mode == PR_MODE_HTTP) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->p.http.rsp[n]);
}
if ((s->flags & SF_BE_ASSIGNED) &&
(s->be->mode == PR_MODE_HTTP)) {
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->p.http.cum_req);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->p.http.rsp[n]);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->p.http.cum_req);
}
}
@ -2693,7 +2693,7 @@ void stream_update_time_stats(struct stream *s)
srv = objt_server(s->target);
if (srv) {
samples_window = (((s->be->mode == PR_MODE_HTTP) ?
HA_ATOMIC_LOAD(&srv->counters.shared->tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&srv->counters.shared->tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
HA_ATOMIC_LOAD(&srv->counters.shared.tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&srv->counters.shared.tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
swrate_add_dynamic(&srv->counters.q_time, samples_window, t_queue);
swrate_add_dynamic(&srv->counters.c_time, samples_window, t_connect);
swrate_add_dynamic(&srv->counters.d_time, samples_window, t_data);
@ -2704,7 +2704,7 @@ void stream_update_time_stats(struct stream *s)
HA_ATOMIC_UPDATE_MAX(&srv->counters.ttime_max, t_close);
}
samples_window = (((s->be->mode == PR_MODE_HTTP) ?
HA_ATOMIC_LOAD(&s->be->be_counters.shared->tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&s->be->be_counters.shared->tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
HA_ATOMIC_LOAD(&s->be->be_counters.shared.tg[tgid - 1]->p.http.cum_req) : HA_ATOMIC_LOAD(&s->be->be_counters.shared.tg[tgid - 1]->cum_lbconn)) > TIME_STATS_SAMPLES) ? TIME_STATS_SAMPLES : 0;
swrate_add_dynamic(&s->be->be_counters.q_time, samples_window, t_queue);
swrate_add_dynamic(&s->be->be_counters.c_time, samples_window, t_connect);
swrate_add_dynamic(&s->be->be_counters.d_time, samples_window, t_data);

View File

@ -397,16 +397,16 @@ static enum act_return tcp_exec_action_silent_drop(struct act_rule *rule, struct
strm->req.analysers &= AN_REQ_FLT_END;
strm->res.analysers &= AN_RES_FLT_END;
if (strm->flags & SF_BE_ASSIGNED)
_HA_ATOMIC_INC(&strm->be->be_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&strm->be->be_counters.shared.tg[tgid - 1]->denied_req);
if (!(strm->flags & SF_ERR_MASK))
strm->flags |= SF_ERR_PRXCOND;
if (!(strm->flags & SF_FINST_MASK))
strm->flags |= SF_FINST_R;
}
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_req);
return ACT_RET_ABRT;
}

View File

@ -264,25 +264,25 @@ int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit)
return 0;
deny:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_req);
stream_report_term_evt(s->scf, strm_tevt_type_intercepted);
goto reject;
internal:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto reject;
invalid:
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->failed_req);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->failed_req);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->failed_req);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
reject:
@ -486,31 +486,31 @@ int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
return 0;
deny:
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared.tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->denied_resp);
if (s->sess->listener && s->sess->listener->counters)
_HA_ATOMIC_INC(&s->sess->listener->counters->shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&s->sess->listener->counters->shared.tg[tgid - 1]->denied_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->denied_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->denied_resp);
stream_report_term_evt(s->scb, strm_tevt_type_intercepted);
goto reject;
internal:
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->sess->fe->fe_counters.shared.tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->internal_errors);
if (s->sess->listener && s->sess->listener->counters)
_HA_ATOMIC_INC(&s->sess->listener->counters->shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&s->sess->listener->counters->shared.tg[tgid - 1]->internal_errors);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->internal_errors);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->internal_errors);
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL;
stream_report_term_evt(s->scf, strm_tevt_type_internal_err);
goto reject;
invalid:
_HA_ATOMIC_INC(&s->be->be_counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&s->be->be_counters.shared.tg[tgid - 1]->failed_resp);
if (objt_server(s->target))
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared->tg[tgid - 1]->failed_resp);
_HA_ATOMIC_INC(&__objt_server(s->target)->counters.shared.tg[tgid - 1]->failed_resp);
stream_report_term_evt(s->scf, strm_tevt_type_proto_err);
reject:
@ -585,9 +585,9 @@ int tcp_exec_l4_rules(struct session *sess)
goto end;
}
else if (rule->action == ACT_ACTION_DENY) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_conn);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_conn);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_conn);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_conn);
result = 0;
goto end;
@ -673,9 +673,9 @@ int tcp_exec_l5_rules(struct session *sess)
goto end;
}
else if (rule->action == ACT_ACTION_DENY) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared->tg[tgid - 1]->denied_sess);
_HA_ATOMIC_INC(&sess->fe->fe_counters.shared.tg[tgid - 1]->denied_sess);
if (sess->listener && sess->listener->counters)
_HA_ATOMIC_INC(&sess->listener->counters->shared->tg[tgid - 1]->denied_sess);
_HA_ATOMIC_INC(&sess->listener->counters->shared.tg[tgid - 1]->denied_sess);
result = 0;
goto end;