MINOR: quic: Remove pool_zalloc() from qc_new_conn()

qc_new_conn() is ued to initialize QUIC connections with quic_conn struct objects.
This function calls quic_conn_release() when it fails to initialize a connection.
quic_conn_release() is also called to release the memory allocated by a QUIC
connection.

Replace pool_zalloc() by pool_alloc() in this function and initialize
all quic_conn struct members which are referenced by quic_conn_release() to
prevent use of non initialized variables in this fonction.
The ebtrees, the lists attached to quic_conn struct must be initialized.
The tasks must be reset to their NULL default values to be safely destroyed
by task_destroy(). This is all the case for all the TLS cipher contexts
of the encryption levels (struct quic_enc_level) and those for the keyupdate.
The packet number spaces (struct quic_pktns) must also be initialized.
->prx_counters pointer must be initialized to prevent quic_conn_prx_cntrs_update()
from dereferencing this pointer.
->latest_rtt member of quic_loss struct must also be initialized. This is done
by quic_loss_init() called by quic_path_init().
This commit is contained in:
Frédéric Lécaille 2023-06-15 07:53:42 +02:00
parent 4ae29be18c
commit ddc616933c
3 changed files with 110 additions and 40 deletions

View File

@ -34,6 +34,7 @@
static inline void quic_loss_init(struct quic_loss *ql) static inline void quic_loss_init(struct quic_loss *ql)
{ {
ql->latest_rtt = 0;
ql->srtt = QUIC_LOSS_INITIAL_RTT << 3; ql->srtt = QUIC_LOSS_INITIAL_RTT << 3;
ql->rtt_var = (QUIC_LOSS_INITIAL_RTT >> 1) << 2; ql->rtt_var = (QUIC_LOSS_INITIAL_RTT >> 1) << 2;
ql->rtt_min = 0; ql->rtt_min = 0;

View File

@ -357,6 +357,20 @@ static inline enum quic_tls_pktns quic_tls_pktns(enum quic_tls_enc_level level)
} }
} }
/* Reset all members of <ctx> to default values. */
static inline void quic_tls_ctx_reset(struct quic_tls_ctx *ctx)
{
ctx->rx.ctx = NULL;
ctx->rx.hp_ctx = NULL;
ctx->rx.iv = NULL;
ctx->rx.key = NULL;
ctx->tx.ctx = NULL;
ctx->tx.hp_ctx = NULL;
ctx->tx.iv = NULL;
ctx->tx.key = NULL;
}
/* Erase and free the secrets for a QUIC encryption level with <ctx> as /* Erase and free the secrets for a QUIC encryption level with <ctx> as
* context. * context.
* Always succeeds. * Always succeeds.
@ -394,8 +408,7 @@ static inline void quic_tls_ctx_secs_free(struct quic_tls_ctx *ctx)
pool_free(pool_head_quic_tls_iv, ctx->tx.iv); pool_free(pool_head_quic_tls_iv, ctx->tx.iv);
pool_free(pool_head_quic_tls_key, ctx->tx.key); pool_free(pool_head_quic_tls_key, ctx->tx.key);
ctx->rx.iv = ctx->tx.iv = NULL; quic_tls_ctx_reset(ctx);
ctx->rx.key = ctx->tx.key = NULL;
} }
/* Allocate the secrete keys for a QUIC encryption level with <ctx> as context. /* Allocate the secrete keys for a QUIC encryption level with <ctx> as context.
@ -612,6 +625,15 @@ static inline int qc_new_isecs(struct quic_conn *qc,
return 0; return 0;
} }
/* Reset all members of <tls_kp> to default values. */
static inline void quic_tls_ku_reset(struct quic_tls_kp *tls_kp)
{
tls_kp->ctx = NULL;
tls_kp->secret = NULL;
tls_kp->iv = NULL;
tls_kp->key = NULL;
}
/* Release the memory allocated for all the key update key phase /* Release the memory allocated for all the key update key phase
* structures for <qc> QUIC connection. * structures for <qc> QUIC connection.
* Always succeeds. * Always succeeds.
@ -622,14 +644,17 @@ static inline void quic_tls_ku_free(struct quic_conn *qc)
pool_free(pool_head_quic_tls_secret, qc->ku.prv_rx.secret); pool_free(pool_head_quic_tls_secret, qc->ku.prv_rx.secret);
pool_free(pool_head_quic_tls_iv, qc->ku.prv_rx.iv); pool_free(pool_head_quic_tls_iv, qc->ku.prv_rx.iv);
pool_free(pool_head_quic_tls_key, qc->ku.prv_rx.key); pool_free(pool_head_quic_tls_key, qc->ku.prv_rx.key);
quic_tls_ku_reset(&qc->ku.prv_rx);
EVP_CIPHER_CTX_free(qc->ku.nxt_rx.ctx); EVP_CIPHER_CTX_free(qc->ku.nxt_rx.ctx);
pool_free(pool_head_quic_tls_secret, qc->ku.nxt_rx.secret); pool_free(pool_head_quic_tls_secret, qc->ku.nxt_rx.secret);
pool_free(pool_head_quic_tls_iv, qc->ku.nxt_rx.iv); pool_free(pool_head_quic_tls_iv, qc->ku.nxt_rx.iv);
pool_free(pool_head_quic_tls_key, qc->ku.nxt_rx.key); pool_free(pool_head_quic_tls_key, qc->ku.nxt_rx.key);
quic_tls_ku_reset(&qc->ku.nxt_rx);
EVP_CIPHER_CTX_free(qc->ku.nxt_tx.ctx); EVP_CIPHER_CTX_free(qc->ku.nxt_tx.ctx);
pool_free(pool_head_quic_tls_secret, qc->ku.nxt_tx.secret); pool_free(pool_head_quic_tls_secret, qc->ku.nxt_tx.secret);
pool_free(pool_head_quic_tls_iv, qc->ku.nxt_tx.iv); pool_free(pool_head_quic_tls_iv, qc->ku.nxt_tx.iv);
pool_free(pool_head_quic_tls_key, qc->ku.nxt_tx.key); pool_free(pool_head_quic_tls_key, qc->ku.nxt_tx.key);
quic_tls_ku_reset(&qc->ku.nxt_tx);
} }
/* Initialize <kp> key update secrets, allocating the required memory. /* Initialize <kp> key update secrets, allocating the required memory.

View File

@ -5528,17 +5528,13 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
{ {
int i; int i;
struct quic_conn *qc; struct quic_conn *qc;
/* Initial CID. */
char *buf_area = NULL;
struct listener *l = NULL; struct listener *l = NULL;
struct quic_cc_algo *cc_algo = NULL; struct quic_cc_algo *cc_algo = NULL;
struct quic_tls_ctx *ictx; struct quic_tls_ctx *ictx, *app_ctx;
TRACE_ENTER(QUIC_EV_CONN_INIT); TRACE_ENTER(QUIC_EV_CONN_INIT);
/* TODO replace pool_zalloc by pool_alloc(). This requires special care
* to properly initialized internal quic_conn members to safely use qc = pool_alloc(pool_head_quic_conn);
* quic_conn_release() on alloc failure.
*/
qc = pool_zalloc(pool_head_quic_conn);
if (!qc) { if (!qc) {
TRACE_ERROR("Could not allocate a new connection", QUIC_EV_CONN_INIT); TRACE_ERROR("Could not allocate a new connection", QUIC_EV_CONN_INIT);
goto err; goto err;
@ -5551,20 +5547,74 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
LIST_INIT(&qc->rx.pkt_list); LIST_INIT(&qc->rx.pkt_list);
qc->streams_by_id = EB_ROOT_UNIQUE;
/* Required to call free_quic_conn_cids() from quic_conn_release() */
qc->cids = EB_ROOT;
qc_init_fd(qc); qc_init_fd(qc);
LIST_INIT(&qc->back_refs); LIST_INIT(&qc->back_refs);
LIST_INIT(&qc->el_th_ctx); LIST_INIT(&qc->el_th_ctx);
/* Now proceeds to allocation of qc members. */ qc->wait_event.tasklet = NULL;
buf_area = pool_alloc(pool_head_quic_conn_rxbuf); /* Required to destroy <qc> tasks from quic_conn_release() */
if (!buf_area) { qc->timer_task = NULL;
qc->idle_timer_task = NULL;
qc->xprt_ctx = NULL;
qc->conn = NULL;
qc->qcc = NULL;
qc->app_ops = NULL;
/* Keyupdate: required to safely call quic_tls_ku_free() from
* quic_conn_release().
*/
quic_tls_ku_reset(&qc->ku.prv_rx);
quic_tls_ku_reset(&qc->ku.nxt_rx);
quic_tls_ku_reset(&qc->ku.nxt_tx);
/* Encryption level: required to safely call quic_conn_enc_level_uninit()
* from quic_conn_release().
*/
for (i = 0; i < QUIC_TLS_ENC_LEVEL_MAX; i++) {
struct quic_enc_level *qel = &qc->els[i];
struct quic_tls_ctx *ctx = &qel->tls_ctx;
quic_tls_ctx_reset(ctx);
qel->tx.crypto.nb_buf = 0;
qel->tx.crypto.bufs = NULL;
qel->cstream = NULL;
qel->pktns = NULL;
}
quic_tls_ctx_reset(&qc->negotiated_ictx);
app_ctx = &qc->els[QUIC_TLS_ENC_LEVEL_APP].tls_ctx;
app_ctx->rx.secret = NULL;
app_ctx->tx.secret = NULL;
/* Packet number spaces: required to safely call quic_pktns_tx_pkts_release()
* from quic_conn_release().
*/
for (i = 0; i < QUIC_TLS_PKTNS_MAX; i++) {
struct quic_pktns *pktns = &qc->pktns[i];
LIST_INIT(&pktns->tx.frms);
pktns->tx.pkts = EB_ROOT_UNIQUE;
pktns->rx.arngs.root = EB_ROOT_UNIQUE;
}
/* Required to safely call quic_conn_prx_cntrs_update() from quic_conn_release(). */
qc->prx_counters = NULL;
/* Now proceeds to allocation of qc members. */
qc->rx.buf.area = pool_alloc(pool_head_quic_conn_rxbuf);
if (!qc->rx.buf.area) {
TRACE_ERROR("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT, qc); TRACE_ERROR("Could not allocate a new RX buffer", QUIC_EV_CONN_INIT, qc);
goto err; goto err;
} }
qc->cids = EB_ROOT;
/* QUIC Server (or listener). */ /* QUIC Server (or listener). */
if (server) { if (server) {
struct proxy *prx; struct proxy *prx;
@ -5575,16 +5625,12 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
qc->prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe, qc->prx_counters = EXTRA_COUNTERS_GET(prx->extra_counters_fe,
&quic_stats_module); &quic_stats_module);
qc->flags |= QUIC_FL_CONN_LISTENER; qc->flags = QUIC_FL_CONN_LISTENER;
qc->state = QUIC_HS_ST_SERVER_INITIAL; qc->state = QUIC_HS_ST_SERVER_INITIAL;
/* Copy the client original DCID. */ /* Copy the client original DCID. */
qc->odcid.len = dcid->len; qc->odcid = *dcid;
memcpy(qc->odcid.data, dcid->data, dcid->len); /* Copy the packet SCID to reuse it as DCID for sending */
qc->dcid = *scid;
/* copy the packet SCID to reuse it as DCID for sending */
if (scid->len)
memcpy(qc->dcid.data, scid->data, scid->len);
qc->dcid.len = scid->len;
qc->tx.buf = BUF_NULL; qc->tx.buf = BUF_NULL;
qc->li = l; qc->li = l;
} }
@ -5594,6 +5640,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
if (dcid->len) if (dcid->len)
memcpy(qc->dcid.data, dcid->data, dcid->len); memcpy(qc->dcid.data, dcid->data, dcid->len);
qc->dcid.len = dcid->len; qc->dcid.len = dcid->len;
qc->li = NULL;
} }
qc->mux_state = QC_MUX_NULL; qc->mux_state = QC_MUX_NULL;
qc->err = quic_err_transport(QC_ERR_NO_ERROR); qc->err = quic_err_transport(QC_ERR_NO_ERROR);
@ -5632,6 +5679,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
} }
qc->original_version = qv; qc->original_version = qv;
qc->negotiated_version = NULL;
qc->tps_tls_ext = (qc->original_version->num & 0xff000000) == 0xff000000 ? qc->tps_tls_ext = (qc->original_version->num & 0xff000000) == 0xff000000 ?
TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS_DRAFT: TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS_DRAFT:
TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS; TLS_EXTENSION_QUIC_TRANSPORT_PARAMETERS;
@ -5639,11 +5687,11 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
LIST_INIT(&qc->tx.frms_to_send); LIST_INIT(&qc->tx.frms_to_send);
qc->tx.nb_buf = QUIC_CONN_TX_BUFS_NB; qc->tx.nb_buf = QUIC_CONN_TX_BUFS_NB;
qc->tx.wbuf = qc->tx.rbuf = 0; qc->tx.wbuf = qc->tx.rbuf = 0;
qc->tx.bytes = 0; qc->tx.bytes = qc->tx.prep_bytes = 0;
qc->tx.buf = BUF_NULL; qc->tx.buf = BUF_NULL;
/* RX part. */ /* RX part. */
qc->rx.bytes = 0; qc->rx.bytes = 0;
qc->rx.buf = b_make(buf_area, QUIC_CONN_RX_BUFSZ, 0, 0); qc->rx.buf = b_make(qc->rx.buf.area, QUIC_CONN_RX_BUFSZ, 0, 0);
for (i = 0; i < QCS_MAX_TYPES; i++) for (i = 0; i < QCS_MAX_TYPES; i++)
qc->rx.strms[i].nb_streams = 0; qc->rx.strms[i].nb_streams = 0;
@ -5655,11 +5703,11 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
goto err; goto err;
} }
/* XXX TO DO: Only one path at this time. */ qc->max_ack_delay = 0;
/* Only one path at this time (multipath not supported) */
qc->path = &qc->paths[0]; qc->path = &qc->paths[0];
quic_path_init(qc->path, ipv4, cc_algo ? cc_algo : default_quic_cc_algo, qc); quic_path_init(qc->path, ipv4, cc_algo ? cc_algo : default_quic_cc_algo, qc);
qc->streams_by_id = EB_ROOT_UNIQUE;
qc->stream_buf_count = 0; qc->stream_buf_count = 0;
memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr)); memcpy(&qc->local_addr, local_addr, sizeof(qc->local_addr));
memcpy(&qc->peer_addr, peer_addr, sizeof qc->peer_addr); memcpy(&qc->peer_addr, peer_addr, sizeof qc->peer_addr);
@ -5704,11 +5752,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
return qc; return qc;
err: err:
pool_free(pool_head_quic_conn_rxbuf, buf_area);
if (qc) {
qc->rx.buf.area = NULL;
quic_conn_release(qc); quic_conn_release(qc);
}
TRACE_LEAVE(QUIC_EV_CONN_INIT); TRACE_LEAVE(QUIC_EV_CONN_INIT);
return NULL; return NULL;
} }
@ -5751,6 +5795,9 @@ void quic_conn_release(struct quic_conn *qc)
TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc); TRACE_ENTER(QUIC_EV_CONN_CLOSE, qc);
if (!qc)
goto leave;
/* We must not free the quic-conn if the MUX is still allocated. */ /* We must not free the quic-conn if the MUX is still allocated. */
BUG_ON(qc->mux_state == QC_MUX_READY); BUG_ON(qc->mux_state == QC_MUX_READY);
@ -5786,15 +5833,11 @@ void quic_conn_release(struct quic_conn *qc)
pool_free(pool_head_quic_rx_packet, pkt); pool_free(pool_head_quic_rx_packet, pkt);
} }
if (qc->idle_timer_task) {
task_destroy(qc->idle_timer_task); task_destroy(qc->idle_timer_task);
qc->idle_timer_task = NULL; qc->idle_timer_task = NULL;
}
if (qc->timer_task) {
task_destroy(qc->timer_task); task_destroy(qc->timer_task);
qc->timer_task = NULL; qc->timer_task = NULL;
}
tasklet_free(qc->wait_event.tasklet); tasklet_free(qc->wait_event.tasklet);
@ -5827,11 +5870,12 @@ void quic_conn_release(struct quic_conn *qc)
quic_conn_prx_cntrs_update(qc); quic_conn_prx_cntrs_update(qc);
pool_free(pool_head_quic_conn_rxbuf, qc->rx.buf.area); pool_free(pool_head_quic_conn_rxbuf, qc->rx.buf.area);
qc->rx.buf.area = NULL;
pool_free(pool_head_quic_conn, qc); pool_free(pool_head_quic_conn, qc);
qc = NULL; qc = NULL;
TRACE_PROTO("QUIC conn. freed", QUIC_EV_CONN_FREED, qc); TRACE_PROTO("QUIC conn. freed", QUIC_EV_CONN_FREED, qc);
leave:
TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc); TRACE_LEAVE(QUIC_EV_CONN_CLOSE, qc);
} }