MEDIUM: quic: limit handshake per listener

Implement a limit per listener for concurrent number of QUIC
connections. When reached, INITIAL packets for new connections are
automatically dropped until the number of handshakes is reduced.

The limit value is automatically based on listener backlog, which itself
defaults to maxconn.

This feature is important to ensure CPU and memory resources are not
consume if too many handshakes attempt are started in parallel.

Special care is taken if a connection is released before handshake
completion. In this case, counter must be decremented. This forces to
ensure that member <qc.state> is set early in qc_new_conn() before any
quic_conn_release() invocation.
This commit is contained in:
Amaury Denoyelle 2023-11-06 16:34:38 +01:00
parent 278808915b
commit 3df6a60113
8 changed files with 97 additions and 24 deletions

View File

@ -4659,19 +4659,26 @@ backlog <conns>
system, it may represent the number of already acknowledged system, it may represent the number of already acknowledged
connections, of non-acknowledged ones, or both. connections, of non-acknowledged ones, or both.
In order to protect against SYN flood attacks, one solution is to increase This option is both used for stream and datagram listeners.
the system's SYN backlog size. Depending on the system, sometimes it is just
tunable via a system parameter, sometimes it is not adjustable at all, and In order to protect against SYN flood attacks on a stream-based listener, one
sometimes the system relies on hints given by the application at the time of solution is to increase the system's SYN backlog size. Depending on the
the listen() syscall. By default, HAProxy passes the frontend's maxconn value system, sometimes it is just tunable via a system parameter, sometimes it is
to the listen() syscall. On systems which can make use of this value, it can not adjustable at all, and sometimes the system relies on hints given by the
sometimes be useful to be able to specify a different value, hence this application at the time of the listen() syscall. By default, HAProxy passes
backlog parameter. the frontend's maxconn value to the listen() syscall. On systems which can
make use of this value, it can sometimes be useful to be able to specify a
different value, hence this backlog parameter.
On Linux 2.4, the parameter is ignored by the system. On Linux 2.6, it is On Linux 2.4, the parameter is ignored by the system. On Linux 2.6, it is
used as a hint and the system accepts up to the smallest greater power of used as a hint and the system accepts up to the smallest greater power of
two, and never more than some limits (usually 32768). two, and never more than some limits (usually 32768).
When using a QUIC listener, this option has a similar albeit not quite
equivalent meaning. It will set the maximum number of connections waiting for
handshake completion. When this limit is reached, INITIAL packets are dropped
to prevent creation of a new QUIC connection.
See also : "maxconn" and the target operating system's tuning guide. See also : "maxconn" and the target operating system's tuning guide.

View File

@ -69,6 +69,8 @@ void qc_want_recv(struct quic_conn *qc);
void quic_accept_push_qc(struct quic_conn *qc); void quic_accept_push_qc(struct quic_conn *qc);
int quic_listener_max_handshake(const struct listener *l);
#endif /* USE_QUIC */ #endif /* USE_QUIC */
#endif /* _HAPROXY_QUIC_SOCK_H */ #endif /* _HAPROXY_QUIC_SOCK_H */

View File

@ -81,6 +81,7 @@ struct receiver {
#ifdef USE_QUIC #ifdef USE_QUIC
struct mt_list rxbuf_list; /* list of buffers to receive and dispatch QUIC datagrams. */ struct mt_list rxbuf_list; /* list of buffers to receive and dispatch QUIC datagrams. */
enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */ enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
unsigned int quic_curr_handshake; /* count of active QUIC handshakes */
#endif #endif
struct { struct {
struct task *task; /* Task used to open connection for reverse. */ struct task *task; /* Task used to open connection for reverse. */

View File

@ -4187,6 +4187,7 @@ init_proxies_list_stage2:
if (listener->bind_conf->xprt == xprt_get(XPRT_QUIC)) { if (listener->bind_conf->xprt == xprt_get(XPRT_QUIC)) {
/* quic_conn are counted against maxconn. */ /* quic_conn are counted against maxconn. */
listener->bind_conf->options |= BC_O_XPRT_MAXCONN; listener->bind_conf->options |= BC_O_XPRT_MAXCONN;
listener->rx.quic_curr_handshake = 0;
# ifdef USE_QUIC_OPENSSL_COMPAT # ifdef USE_QUIC_OPENSSL_COMPAT
/* store the last checked bind_conf in bind_conf */ /* store the last checked bind_conf in bind_conf */

View File

@ -1137,6 +1137,30 @@ struct task *qc_process_timer(struct task *task, void *ctx, unsigned int state)
return task; return task;
} }
/* Try to increment <l> handshake current counter. If listener limit is
* reached, incrementation is rejected and 0 is returned.
*/
static int quic_increment_curr_handshake(struct listener *l)
{
unsigned int count, next;
const int max = quic_listener_max_handshake(l);
do {
count = l->rx.quic_curr_handshake;
if (count >= max) {
/* maxconn reached */
next = 0;
goto end;
}
/* try to increment quic_curr_handshake */
next = count + 1;
} while (!_HA_ATOMIC_CAS(&l->rx.quic_curr_handshake, &count, next) && __ha_cpu_relax());
end:
return next;
}
/* Allocate a new QUIC connection with <version> as QUIC version. <ipv4> /* Allocate a new QUIC connection with <version> as QUIC version. <ipv4>
* boolean is set to 1 for IPv4 connection, 0 for IPv6. <server> is set to 1 * boolean is set to 1 for IPv4 connection, 0 for IPv6. <server> is set to 1
* for QUIC servers (or haproxy listeners). * for QUIC servers (or haproxy listeners).
@ -1161,7 +1185,7 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
struct quic_conn *qc = NULL; struct quic_conn *qc = NULL;
struct listener *l = NULL; struct listener *l = NULL;
struct quic_cc_algo *cc_algo = NULL; struct quic_cc_algo *cc_algo = NULL;
unsigned int next_actconn = 0, next_sslconn = 0; unsigned int next_actconn = 0, next_sslconn = 0, next_handshake = 0;
TRACE_ENTER(QUIC_EV_CONN_INIT); TRACE_ENTER(QUIC_EV_CONN_INIT);
@ -1178,6 +1202,14 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
goto err; goto err;
} }
if (server) {
next_handshake = quic_increment_curr_handshake(owner);
if (!next_handshake) {
TRACE_STATE("max handshake reached", QUIC_EV_CONN_INIT);
goto err;
}
}
qc = pool_alloc(pool_head_quic_conn); qc = pool_alloc(pool_head_quic_conn);
if (!qc) { if (!qc) {
TRACE_ERROR("Could not allocate a new connection", QUIC_EV_CONN_INIT); TRACE_ERROR("Could not allocate a new connection", QUIC_EV_CONN_INIT);
@ -1187,7 +1219,7 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
/* Now that quic_conn instance is allocated, quic_conn_release() will /* Now that quic_conn instance is allocated, quic_conn_release() will
* ensure global accounting is decremented. * ensure global accounting is decremented.
*/ */
next_sslconn = next_actconn = 0; next_handshake = next_sslconn = next_actconn = 0;
/* Initialize in priority qc members required for a safe dealloc. */ /* Initialize in priority qc members required for a safe dealloc. */
qc->nictx = NULL; qc->nictx = NULL;
@ -1237,20 +1269,6 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
/* Required to safely call quic_conn_prx_cntrs_update() from quic_conn_release(). */ /* Required to safely call quic_conn_prx_cntrs_update() from quic_conn_release(). */
qc->prx_counters = NULL; qc->prx_counters = NULL;
/* Now proceeds to allocation of qc members. */
qc->rx.buf.area = pool_alloc(pool_head_quic_conn_rxbuf);
if (!qc->rx.buf.area) {
TRACE_ERROR("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT, qc);
goto err;
}
qc->cids = pool_alloc(pool_head_quic_cids);
if (!qc->cids) {
TRACE_ERROR("Could not allocate a new CID tree", QUIC_EV_CONN_INIT, qc);
goto err;
}
*qc->cids = EB_ROOT;
/* QUIC Server (or listener). */ /* QUIC Server (or listener). */
if (server) { if (server) {
struct proxy *prx; struct proxy *prx;
@ -1281,6 +1299,20 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
qc->mux_state = QC_MUX_NULL; qc->mux_state = QC_MUX_NULL;
qc->err = quic_err_transport(QC_ERR_NO_ERROR); qc->err = quic_err_transport(QC_ERR_NO_ERROR);
/* Now proceeds to allocation of qc members. */
qc->rx.buf.area = pool_alloc(pool_head_quic_conn_rxbuf);
if (!qc->rx.buf.area) {
TRACE_ERROR("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT, qc);
goto err;
}
qc->cids = pool_alloc(pool_head_quic_cids);
if (!qc->cids) {
TRACE_ERROR("Could not allocate a new CID tree", QUIC_EV_CONN_INIT, qc);
goto err;
}
*qc->cids = EB_ROOT;
conn_id->qc = qc; conn_id->qc = qc;
if (HA_ATOMIC_LOAD(&l->rx.quic_mode) == QUIC_SOCK_MODE_CONN && if (HA_ATOMIC_LOAD(&l->rx.quic_mode) == QUIC_SOCK_MODE_CONN &&
@ -1401,6 +1433,8 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
_HA_ATOMIC_DEC(&actconn); _HA_ATOMIC_DEC(&actconn);
if (next_sslconn) if (next_sslconn)
_HA_ATOMIC_DEC(&global.sslconns); _HA_ATOMIC_DEC(&global.sslconns);
if (next_handshake)
_HA_ATOMIC_DEC(&l->rx.quic_curr_handshake);
TRACE_LEAVE(QUIC_EV_CONN_INIT); TRACE_LEAVE(QUIC_EV_CONN_INIT);
return NULL; return NULL;
@ -1537,6 +1571,14 @@ void quic_conn_release(struct quic_conn *qc)
HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn); HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
} }
/* Connection released before handshake completion. */
if (unlikely(qc->state < QUIC_HS_ST_COMPLETE)) {
if (qc_is_listener(qc)) {
BUG_ON(qc->li->rx.quic_curr_handshake == 0);
HA_ATOMIC_DEC(&qc->li->rx.quic_curr_handshake);
}
}
pool_free(pool_head_quic_conn, qc); pool_free(pool_head_quic_conn, qc);
qc = NULL; qc = NULL;

View File

@ -1958,6 +1958,14 @@ static struct quic_conn *quic_rx_pkt_retrieve_conn(struct quic_rx_packet *pkt,
struct quic_connection_id *conn_id; struct quic_connection_id *conn_id;
int ipv4; int ipv4;
/* Reject INITIAL early if listener limits reached. */
if (unlikely(HA_ATOMIC_LOAD(&l->rx.quic_curr_handshake) >=
quic_listener_max_handshake(l))) {
TRACE_DATA("Drop INITIAL on max handshake",
QUIC_EV_CONN_LPKT, NULL, NULL, NULL, pkt->version);
goto out;
}
if (!pkt->token_len && !(l->bind_conf->options & BC_O_QUIC_FORCE_RETRY) && if (!pkt->token_len && !(l->bind_conf->options & BC_O_QUIC_FORCE_RETRY) &&
HA_ATOMIC_LOAD(&prx_counters->half_open_conn) >= global.tune.quic_retry_threshold) { HA_ATOMIC_LOAD(&prx_counters->half_open_conn) >= global.tune.quic_retry_threshold) {
TRACE_PROTO("Initial without token, sending retry", TRACE_PROTO("Initial without token, sending retry",

View File

@ -966,6 +966,15 @@ struct task *quic_accept_run(struct task *t, void *ctx, unsigned int i)
return NULL; return NULL;
} }
/* Returns the maximum number of QUIC connections waiting for handshake to
* complete in parallel on listener <l> instance. This reuses the listener
* backlog value.
*/
int quic_listener_max_handshake(const struct listener *l)
{
return listener_backlog(l);
}
static int quic_alloc_accept_queues(void) static int quic_alloc_accept_queues(void)
{ {
int i; int i;

View File

@ -572,6 +572,9 @@ int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
qc->state = QUIC_HS_ST_CONFIRMED; qc->state = QUIC_HS_ST_CONFIRMED;
/* The connection is ready to be accepted. */ /* The connection is ready to be accepted. */
quic_accept_push_qc(qc); quic_accept_push_qc(qc);
BUG_ON(qc->li->rx.quic_curr_handshake == 0);
HA_ATOMIC_DEC(&qc->li->rx.quic_curr_handshake);
} }
else { else {
qc->state = QUIC_HS_ST_COMPLETE; qc->state = QUIC_HS_ST_COMPLETE;