MEDIUM: http: Use the new _HA_ATOMIC_* macros.

Use the new _HA_ATOMIC_* macros and add barriers where needed.
This commit is contained in:
Olivier Houchard 2019-03-08 18:52:00 +01:00 committed by Olivier Houchard
parent b23a61f78a
commit a798bf56e2
3 changed files with 147 additions and 147 deletions

View File

@ -192,10 +192,10 @@ static enum act_return http_action_reject(struct act_rule *rule, struct proxy *p
s->req.analysers = 0; s->req.analysers = 0;
s->res.analysers = 0; s->res.analysers = 0;
HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
if (sess->listener && sess->listener->counters) if (sess->listener && sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND; s->flags |= SF_ERR_PRXCOND;

View File

@ -721,9 +721,9 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
stream_inc_http_req_ctr(s); stream_inc_http_req_ctr(s);
proxy_inc_fe_req_ctr(sess->fe); proxy_inc_fe_req_ctr(sess->fe);
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
if (!(s->flags & SF_FINST_MASK)) if (!(s->flags & SF_FINST_MASK))
s->flags |= SF_FINST_R; s->flags |= SF_FINST_R;
@ -754,9 +754,9 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
stream_inc_http_req_ctr(s); stream_inc_http_req_ctr(s);
proxy_inc_fe_req_ctr(sess->fe); proxy_inc_fe_req_ctr(sess->fe);
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
if (!(s->flags & SF_FINST_MASK)) if (!(s->flags & SF_FINST_MASK))
s->flags |= SF_FINST_R; s->flags |= SF_FINST_R;
@ -784,9 +784,9 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
stream_inc_http_req_ctr(s); stream_inc_http_req_ctr(s);
proxy_inc_fe_req_ctr(sess->fe); proxy_inc_fe_req_ctr(sess->fe);
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
if (!(s->flags & SF_FINST_MASK)) if (!(s->flags & SF_FINST_MASK))
s->flags |= SF_FINST_R; s->flags |= SF_FINST_R;
@ -899,7 +899,7 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
struct acl_cond *cond; struct acl_cond *cond;
s->flags |= SF_MONITOR; s->flags |= SF_MONITOR;
HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
/* Check if we want to fail this monitor request or not */ /* Check if we want to fail this monitor request or not */
list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) { list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
@ -1178,9 +1178,9 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
txn->status = 400; txn->status = 400;
http_reply_and_close(s, txn->status, http_error_message(s)); http_reply_and_close(s, txn->status, http_error_message(s));
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return_prx_cond: return_prx_cond:
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
@ -1631,11 +1631,11 @@ resume_execution:
replace->area, s->uniq_id); replace->area, s->uniq_id);
} }
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
} }
free_trash_chunk(replace); free_trash_chunk(replace);
@ -1983,13 +1983,13 @@ resume_execution:
replace->area, s->uniq_id); replace->area, s->uniq_id);
} }
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_rewrites, 1);
} }
free_trash_chunk(replace); free_trash_chunk(replace);
@ -2634,7 +2634,7 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
unlikely(objt_applet(s->target) == &http_cache_applet)) { unlikely(objt_applet(s->target) == &http_cache_applet)) {
/* process the stats request now */ /* process the stats request now */
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */ if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
if (!(s->flags & SF_ERR_MASK)) // this is not really an error but it is if (!(s->flags & SF_ERR_MASK)) // this is not really an error but it is
s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy
@ -2708,11 +2708,11 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
if (!req->analyse_exp) if (!req->analyse_exp)
req->analyse_exp = tick_add(now_ms, 0); req->analyse_exp = tick_add(now_ms, 0);
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
goto done_without_exp; goto done_without_exp;
deny: /* this request was blocked (denied) */ deny: /* this request was blocked (denied) */
@ -2727,11 +2727,11 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
s->logs.tv_request = now; s->logs.tv_request = now;
http_reply_and_close(s, txn->status, http_error_message(s)); http_reply_and_close(s, txn->status, http_error_message(s));
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
goto return_prx_cond; goto return_prx_cond;
return_bad_req: return_bad_req:
@ -2748,9 +2748,9 @@ int http_process_req_common(struct stream *s, struct channel *req, int an_bit, s
txn->status = 400; txn->status = 400;
http_reply_and_close(s, txn->status, http_error_message(s)); http_reply_and_close(s, txn->status, http_error_message(s));
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return_prx_cond: return_prx_cond:
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
@ -3095,9 +3095,9 @@ int http_process_request(struct stream *s, struct channel *req, int an_bit)
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
http_reply_and_close(s, txn->status, http_error_message(s)); http_reply_and_close(s, txn->status, http_error_message(s));
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND; s->flags |= SF_ERR_PRXCOND;
@ -3307,9 +3307,9 @@ int http_wait_for_request_body(struct stream *s, struct channel *req, int an_bit
return_err_msg: return_err_msg:
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return 0; return 0;
} }
@ -3404,7 +3404,7 @@ void http_end_txn_clean_session(struct stream *s)
} }
if (s->flags & SF_BE_ASSIGNED) { if (s->flags & SF_BE_ASSIGNED) {
HA_ATOMIC_SUB(&be->beconn, 1); _HA_ATOMIC_SUB(&be->beconn, 1);
if (unlikely(s->srv_conn)) if (unlikely(s->srv_conn))
sess_change_server(s, NULL); sess_change_server(s, NULL);
} }
@ -3420,12 +3420,12 @@ void http_end_txn_clean_session(struct stream *s)
n = 0; n = 0;
if (fe->mode == PR_MODE_HTTP) { if (fe->mode == PR_MODE_HTTP) {
HA_ATOMIC_ADD(&fe->fe_counters.p.http.rsp[n], 1); _HA_ATOMIC_ADD(&fe->fe_counters.p.http.rsp[n], 1);
} }
if ((s->flags & SF_BE_ASSIGNED) && if ((s->flags & SF_BE_ASSIGNED) &&
(be->mode == PR_MODE_HTTP)) { (be->mode == PR_MODE_HTTP)) {
HA_ATOMIC_ADD(&be->be_counters.p.http.rsp[n], 1); _HA_ATOMIC_ADD(&be->be_counters.p.http.rsp[n], 1);
HA_ATOMIC_ADD(&be->be_counters.p.http.cum_req, 1); _HA_ATOMIC_ADD(&be->be_counters.p.http.cum_req, 1);
} }
} }
@ -3471,7 +3471,7 @@ void http_end_txn_clean_session(struct stream *s)
if (objt_server(s->target)) { if (objt_server(s->target)) {
if (s->flags & SF_CURR_SESS) { if (s->flags & SF_CURR_SESS) {
s->flags &= ~SF_CURR_SESS; s->flags &= ~SF_CURR_SESS;
HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1); _HA_ATOMIC_SUB(&__objt_server(s->target)->cur_sess, 1);
} }
if (may_dequeue_tasks(objt_server(s->target), be)) if (may_dequeue_tasks(objt_server(s->target), be))
process_srv_queue(objt_server(s->target)); process_srv_queue(objt_server(s->target));
@ -3822,9 +3822,9 @@ int http_sync_res_state(struct stream *s)
else if (chn->flags & CF_SHUTW) { else if (chn->flags & CF_SHUTW) {
txn->rsp.err_state = txn->rsp.msg_state; txn->rsp.err_state = txn->rsp.msg_state;
txn->rsp.msg_state = HTTP_MSG_ERROR; txn->rsp.msg_state = HTTP_MSG_ERROR;
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
} }
goto wait_other_side; goto wait_other_side;
} }
@ -4101,10 +4101,10 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
s->flags |= SF_FINST_D; s->flags |= SF_FINST_D;
} }
HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
goto return_bad_req_stats_ok; goto return_bad_req_stats_ok;
} }
@ -4139,9 +4139,9 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
return 0; return 0;
return_bad_req: /* let's centralize all bad requests */ return_bad_req: /* let's centralize all bad requests */
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return_bad_req_stats_ok: return_bad_req_stats_ok:
txn->req.err_state = txn->req.msg_state; txn->req.err_state = txn->req.msg_state;
@ -4179,10 +4179,10 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
s->res.analysers &= AN_RES_FLT_END; /* we're in data phase, we want to abort both directions */ s->res.analysers &= AN_RES_FLT_END; /* we're in data phase, we want to abort both directions */
HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL; s->flags |= SF_ERR_SRVCL;
@ -4312,9 +4312,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (msg->msg_state == HTTP_MSG_ERROR || msg->err_pos >= 0) if (msg->msg_state == HTTP_MSG_ERROR || msg->err_pos >= 0)
http_capture_bad_message(s->be, s, msg, msg->err_state, sess->fe); http_capture_bad_message(s->be, s, msg, msg->err_state, sess->fe);
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_HDRRSP); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
} }
abort_response: abort_response:
@ -4347,9 +4347,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
else if (txn->flags & TX_NOT_FIRST) else if (txn->flags & TX_NOT_FIRST)
goto abort_keep_alive; goto abort_keep_alive;
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR);
} }
@ -4383,9 +4383,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (msg->err_pos >= 0) if (msg->err_pos >= 0)
http_capture_bad_message(s->be, s, msg, msg->err_state, sess->fe); http_capture_bad_message(s->be, s, msg, msg->err_state, sess->fe);
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
} }
@ -4405,10 +4405,10 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
/* client abort with an abortonclose */ /* client abort with an abortonclose */
else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) { else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
rep->analysers &= AN_RES_FLT_END; rep->analysers &= AN_RES_FLT_END;
channel_auto_close(rep); channel_auto_close(rep);
@ -4433,9 +4433,9 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
else if (txn->flags & TX_NOT_FIRST) else if (txn->flags & TX_NOT_FIRST)
goto abort_keep_alive; goto abort_keep_alive;
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
} }
@ -4460,7 +4460,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
else if (txn->flags & TX_NOT_FIRST) else if (txn->flags & TX_NOT_FIRST)
goto abort_keep_alive; goto abort_keep_alive;
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
rep->analysers &= AN_RES_FLT_END; rep->analysers &= AN_RES_FLT_END;
channel_auto_close(rep); channel_auto_close(rep);
@ -4501,7 +4501,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.p.http.rsp[n], 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.p.http.rsp[n], 1);
/* RFC7230#2.6 has enforced the format of the HTTP version string to be /* RFC7230#2.6 has enforced the format of the HTTP version string to be
* exactly one digit "." one digit. This check may be disabled using * exactly one digit "." one digit. This check may be disabled using
@ -4935,10 +4935,10 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
if (apply_filters_to_response(s, rep, rule_set) < 0) { if (apply_filters_to_response(s, rep, rule_set) < 0) {
return_bad_resp: return_bad_resp:
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
} }
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
return_srv_prx_502: return_srv_prx_502:
rep->analysers &= AN_RES_FLT_END; rep->analysers &= AN_RES_FLT_END;
txn->status = 502; txn->status = 502;
@ -4957,12 +4957,12 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
/* has the response been denied ? */ /* has the response been denied ? */
if (txn->flags & TX_SVDENY) { if (txn->flags & TX_SVDENY) {
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1);
HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
goto return_srv_prx_502; goto return_srv_prx_502;
} }
@ -5109,12 +5109,12 @@ int http_process_res_common(struct stream *s, struct channel *rep, int an_bit, s
* the 'checkcache' option, and send an alert. * the 'checkcache' option, and send an alert.
*/ */
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1);
HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
ha_alert("Blocking cacheable cookie in response from instance %s, server %s.\n", ha_alert("Blocking cacheable cookie in response from instance %s, server %s.\n",
s->be->id, objt_server(s->target) ? objt_server(s->target)->id : "<dispatch>"); s->be->id, objt_server(s->target) ? objt_server(s->target)->id : "<dispatch>");
@ -5301,9 +5301,9 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
if (!ci_data(res)) { if (!ci_data(res)) {
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL; s->flags |= SF_ERR_SRVCL;
HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1);
goto return_bad_res_stats_ok; goto return_bad_res_stats_ok;
} }
} }
@ -5339,9 +5339,9 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
return 0; return 0;
return_bad_res: /* let's centralize all bad responses */ return_bad_res: /* let's centralize all bad responses */
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
return_bad_res_stats_ok: return_bad_res_stats_ok:
txn->rsp.err_state = txn->rsp.msg_state; txn->rsp.err_state = txn->rsp.msg_state;
@ -5367,10 +5367,10 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
res->analysers &= AN_RES_FLT_END; res->analysers &= AN_RES_FLT_END;
s->req.analysers &= AN_REQ_FLT_END; /* we're in data phase, we want to abort both directions */ s->req.analysers &= AN_REQ_FLT_END; /* we're in data phase, we want to abort both directions */
HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLICL; s->flags |= SF_ERR_CLICL;
@ -7306,7 +7306,7 @@ void http_reset_txn(struct stream *s)
s->target = NULL; s->target = NULL;
/* re-init store persistence */ /* re-init store persistence */
s->store_count = 0; s->store_count = 0;
s->uniq_id = HA_ATOMIC_XADD(&global.req_count, 1); s->uniq_id = _HA_ATOMIC_XADD(&global.req_count, 1);
s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */ s->req.flags |= CF_READ_DONTWAIT; /* one read is usually enough */

View File

@ -159,9 +159,9 @@ int htx_wait_for_request(struct stream *s, struct channel *req, int an_bit)
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
stream_inc_http_req_ctr(s); stream_inc_http_req_ctr(s);
proxy_inc_fe_req_ctr(sess->fe); proxy_inc_fe_req_ctr(sess->fe);
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
txn->status = 400; txn->status = 400;
msg->err_state = msg->msg_state; msg->err_state = msg->msg_state;
@ -188,9 +188,9 @@ int htx_wait_for_request(struct stream *s, struct channel *req, int an_bit)
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
stream_inc_http_req_ctr(s); stream_inc_http_req_ctr(s);
proxy_inc_fe_req_ctr(sess->fe); proxy_inc_fe_req_ctr(sess->fe);
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
txn->status = 408; txn->status = 408;
msg->err_state = msg->msg_state; msg->err_state = msg->msg_state;
@ -217,9 +217,9 @@ int htx_wait_for_request(struct stream *s, struct channel *req, int an_bit)
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
stream_inc_http_req_ctr(s); stream_inc_http_req_ctr(s);
proxy_inc_fe_req_ctr(sess->fe); proxy_inc_fe_req_ctr(sess->fe);
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
txn->status = 400; txn->status = 400;
msg->err_state = msg->msg_state; msg->err_state = msg->msg_state;
@ -344,7 +344,7 @@ int htx_wait_for_request(struct stream *s, struct channel *req, int an_bit)
struct acl_cond *cond; struct acl_cond *cond;
s->flags |= SF_MONITOR; s->flags |= SF_MONITOR;
HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
/* Check if we want to fail this monitor request or not */ /* Check if we want to fail this monitor request or not */
list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) { list_for_each_entry(cond, &sess->fe->mon_fail_cond, list) {
@ -457,9 +457,9 @@ int htx_wait_for_request(struct stream *s, struct channel *req, int an_bit)
txn->req.err_state = txn->req.msg_state; txn->req.err_state = txn->req.msg_state;
txn->req.msg_state = HTTP_MSG_ERROR; txn->req.msg_state = HTTP_MSG_ERROR;
htx_reply_and_close(s, txn->status, htx_error_message(s)); htx_reply_and_close(s, txn->status, htx_error_message(s));
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return_prx_cond: return_prx_cond:
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
@ -616,7 +616,7 @@ int htx_process_req_common(struct stream *s, struct channel *req, int an_bit, st
unlikely(objt_applet(s->target) == &http_cache_applet)) { unlikely(objt_applet(s->target) == &http_cache_applet)) {
/* process the stats request now */ /* process the stats request now */
if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */ if (sess->fe == s->be) /* report it if the request was intercepted by the frontend */
HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.intercepted_req, 1);
if (!(s->flags & SF_ERR_MASK)) // this is not really an error but it is if (!(s->flags & SF_ERR_MASK)) // this is not really an error but it is
s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy
@ -690,11 +690,11 @@ int htx_process_req_common(struct stream *s, struct channel *req, int an_bit, st
if (!req->analyse_exp) if (!req->analyse_exp)
req->analyse_exp = tick_add(now_ms, 0); req->analyse_exp = tick_add(now_ms, 0);
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
goto done_without_exp; goto done_without_exp;
deny: /* this request was blocked (denied) */ deny: /* this request was blocked (denied) */
@ -709,11 +709,11 @@ int htx_process_req_common(struct stream *s, struct channel *req, int an_bit, st
s->logs.tv_request = now; s->logs.tv_request = now;
htx_reply_and_close(s, txn->status, htx_error_message(s)); htx_reply_and_close(s, txn->status, htx_error_message(s));
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_req, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_req, 1);
goto return_prx_cond; goto return_prx_cond;
return_bad_req: return_bad_req:
@ -722,9 +722,9 @@ int htx_process_req_common(struct stream *s, struct channel *req, int an_bit, st
txn->status = 400; txn->status = 400;
htx_reply_and_close(s, txn->status, htx_error_message(s)); htx_reply_and_close(s, txn->status, htx_error_message(s));
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return_prx_cond: return_prx_cond:
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
@ -984,9 +984,9 @@ int htx_process_request(struct stream *s, struct channel *req, int an_bit)
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
htx_reply_and_close(s, txn->status, htx_error_message(s)); htx_reply_and_close(s, txn->status, htx_error_message(s));
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND; s->flags |= SF_ERR_PRXCOND;
@ -1148,9 +1148,9 @@ int htx_wait_for_request_body(struct stream *s, struct channel *req, int an_bit)
return_err_msg: return_err_msg:
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return 0; return 0;
} }
@ -1321,10 +1321,10 @@ int htx_request_forward_body(struct stream *s, struct channel *req, int an_bit)
s->flags |= SF_FINST_D; s->flags |= SF_FINST_D;
} }
HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
goto return_bad_req_stats_ok; goto return_bad_req_stats_ok;
} }
@ -1362,9 +1362,9 @@ int htx_request_forward_body(struct stream *s, struct channel *req, int an_bit)
return 0; return 0;
return_bad_req: /* let's centralize all bad requests */ return_bad_req: /* let's centralize all bad requests */
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_req, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_req, 1);
return_bad_req_stats_ok: return_bad_req_stats_ok:
txn->req.err_state = txn->req.msg_state; txn->req.err_state = txn->req.msg_state;
@ -1402,10 +1402,10 @@ int htx_request_forward_body(struct stream *s, struct channel *req, int an_bit)
req->analysers &= AN_REQ_FLT_END; req->analysers &= AN_REQ_FLT_END;
s->res.analysers &= AN_RES_FLT_END; /* we're in data phase, we want to abort both directions */ s->res.analysers &= AN_RES_FLT_END; /* we're in data phase, we want to abort both directions */
HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.srv_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL; s->flags |= SF_ERR_SRVCL;
@ -1485,9 +1485,9 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (txn->flags & TX_NOT_FIRST) if (txn->flags & TX_NOT_FIRST)
goto abort_keep_alive; goto abort_keep_alive;
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_ERROR);
} }
@ -1516,9 +1516,9 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
/* 2: read timeout : return a 504 to the client. */ /* 2: read timeout : return a 504 to the client. */
else if (rep->flags & CF_READ_TIMEOUT) { else if (rep->flags & CF_READ_TIMEOUT) {
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_READ_TIMEOUT);
} }
@ -1536,10 +1536,10 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
/* 3: client abort with an abortonclose */ /* 3: client abort with an abortonclose */
else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) { else if ((rep->flags & CF_SHUTR) && ((s->req.flags & (CF_SHUTR|CF_SHUTW)) == (CF_SHUTR|CF_SHUTW))) {
HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.cli_aborts, 1);
rep->analysers &= AN_RES_FLT_END; rep->analysers &= AN_RES_FLT_END;
txn->status = 400; txn->status = 400;
@ -1559,9 +1559,9 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (txn->flags & TX_NOT_FIRST) if (txn->flags & TX_NOT_FIRST)
goto abort_keep_alive; goto abort_keep_alive;
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_BROKEN_PIPE);
} }
@ -1582,7 +1582,7 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
if (txn->flags & TX_NOT_FIRST) if (txn->flags & TX_NOT_FIRST)
goto abort_keep_alive; goto abort_keep_alive;
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
rep->analysers &= AN_RES_FLT_END; rep->analysers &= AN_RES_FLT_END;
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
@ -1653,7 +1653,7 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
stream_inc_http_err_ctr(s); stream_inc_http_err_ctr(s);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.p.http.rsp[n], 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.p.http.rsp[n], 1);
/* Adjust server's health based on status code. Note: status codes 501 /* Adjust server's health based on status code. Note: status codes 501
* and 505 are triggered on demand by client request, so we must not * and 505 are triggered on demand by client request, so we must not
@ -1771,9 +1771,9 @@ int htx_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 1; return 1;
return_bad_res: return_bad_res:
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_HDRRSP); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_HDRRSP);
} }
txn->status = 502; txn->status = 502;
@ -1894,12 +1894,12 @@ int htx_process_res_common(struct stream *s, struct channel *rep, int an_bit, st
/* has the response been denied ? */ /* has the response been denied ? */
if (txn->flags & TX_SVDENY) { if (txn->flags & TX_SVDENY) {
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_secu, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_secu, 1);
HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
goto return_srv_prx_502; goto return_srv_prx_502;
} }
@ -2047,12 +2047,12 @@ int htx_process_res_common(struct stream *s, struct channel *rep, int an_bit, st
* the 'checkcache' option, and send an alert. * the 'checkcache' option, and send an alert.
*/ */
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_secu, 1);
HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.denied_resp, 1);
HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.denied_resp, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1); _HA_ATOMIC_ADD(&sess->listener->counters->denied_resp, 1);
ha_alert("Blocking cacheable cookie in response from instance %s, server %s.\n", ha_alert("Blocking cacheable cookie in response from instance %s, server %s.\n",
s->be->id, objt_server(s->target) ? objt_server(s->target)->id : "<dispatch>"); s->be->id, objt_server(s->target) ? objt_server(s->target)->id : "<dispatch>");
@ -2081,10 +2081,10 @@ int htx_process_res_common(struct stream *s, struct channel *rep, int an_bit, st
return_bad_resp: return_bad_resp:
if (objt_server(s->target)) { if (objt_server(s->target)) {
HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&__objt_server(s->target)->counters.failed_resp, 1);
health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP); health_adjust(__objt_server(s->target), HANA_STATUS_HTTP_RSP);
} }
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
return_srv_prx_502: return_srv_prx_502:
rep->analysers &= AN_RES_FLT_END; rep->analysers &= AN_RES_FLT_END;
@ -2261,9 +2261,9 @@ int htx_response_forward_body(struct stream *s, struct channel *res, int an_bit)
if (htx_is_empty(htx)) { if (htx_is_empty(htx)) {
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_SRVCL; s->flags |= SF_ERR_SRVCL;
HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.srv_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.srv_aborts, 1);
goto return_bad_res_stats_ok; goto return_bad_res_stats_ok;
} }
} }
@ -2292,9 +2292,9 @@ int htx_response_forward_body(struct stream *s, struct channel *res, int an_bit)
return 0; return 0;
return_bad_res: /* let's centralize all bad responses */ return_bad_res: /* let's centralize all bad responses */
HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_resp, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_resp, 1);
return_bad_res_stats_ok: return_bad_res_stats_ok:
txn->rsp.err_state = txn->rsp.msg_state; txn->rsp.err_state = txn->rsp.msg_state;
@ -2320,10 +2320,10 @@ int htx_response_forward_body(struct stream *s, struct channel *res, int an_bit)
res->analysers &= AN_RES_FLT_END; res->analysers &= AN_RES_FLT_END;
s->req.analysers &= AN_REQ_FLT_END; /* we're in data phase, we want to abort both directions */ s->req.analysers &= AN_REQ_FLT_END; /* we're in data phase, we want to abort both directions */
HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.cli_aborts, 1);
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_CLICL; s->flags |= SF_ERR_CLICL;
@ -2924,11 +2924,11 @@ static enum rule_result htx_req_get_intercept_rule(struct proxy *px, struct list
send_log(px, LOG_WARNING, "Proxy %s failed to add or set the request header '%.*s' for request #%u. You might need to increase tune.maxrewrite.", px->id, (int)n.len, n.ptr, s->uniq_id); send_log(px, LOG_WARNING, "Proxy %s failed to add or set the request header '%.*s' for request #%u. You might need to increase tune.maxrewrite.", px->id, (int)n.len, n.ptr, s->uniq_id);
} }
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
} }
free_trash_chunk(replace); free_trash_chunk(replace);
break; break;
@ -3260,13 +3260,13 @@ resume_execution:
send_log(px, LOG_WARNING, "Proxy %s failed to add or set the response header '%.*s' for request #%u. You might need to increase tune.maxrewrite.", px->id, (int)n.len, n.ptr, s->uniq_id); send_log(px, LOG_WARNING, "Proxy %s failed to add or set the response header '%.*s' for request #%u. You might need to increase tune.maxrewrite.", px->id, (int)n.len, n.ptr, s->uniq_id);
} }
HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->fe->fe_counters.failed_rewrites, 1);
if (sess->fe != s->be) if (sess->fe != s->be)
HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&s->be->be_counters.failed_rewrites, 1);
if (sess->listener->counters) if (sess->listener->counters)
HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1); _HA_ATOMIC_ADD(&sess->listener->counters->failed_rewrites, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_rewrites, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.failed_rewrites, 1);
} }
free_trash_chunk(replace); free_trash_chunk(replace);
break; break;
@ -5278,9 +5278,9 @@ static void htx_end_response(struct stream *s)
else if (chn->flags & CF_SHUTW) { else if (chn->flags & CF_SHUTW) {
txn->rsp.err_state = txn->rsp.msg_state; txn->rsp.err_state = txn->rsp.msg_state;
txn->rsp.msg_state = HTTP_MSG_ERROR; txn->rsp.msg_state = HTTP_MSG_ERROR;
HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1); _HA_ATOMIC_ADD(&s->be->be_counters.cli_aborts, 1);
if (objt_server(s->target)) if (objt_server(s->target))
HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1); _HA_ATOMIC_ADD(&objt_server(s->target)->counters.cli_aborts, 1);
goto end; goto end;
} }
return; return;