BUG/MINOR: quic: fix FD usage for quic_conn_closed on backend side

On the frontend side, QUIC transfer can be performed either via a
connection owned FD or multiplex on the listener one. When a quic_conn
is freed and converted to quic_conn_closed instance, its FD if open is
closed and all exchanges are now multiplex via the listener FD.

This is different for the backend as connections only has the choice to
use their owned FD. Thus, special care care must be taken when freeing a
connection and converting it to a quic_conn_closed instance. In this
case, qc_release_fd() is delayed to the quic_conn_closed release.

Furthermore, when the FD is transferred, its iocb and owner fields are
updated to the new quic_conn_closed instance. Without it, a crash will
occur when accessing the freed quic_conn tasklet. A newly dedicated
handler quic_conn_closed_sock_fd_iocb is used to ensure access to
quic_conn_closed members only.
This commit is contained in:
Amaury Denoyelle 2025-11-19 11:41:05 +01:00
parent 46c5c232d7
commit d54d78fe9a
3 changed files with 40 additions and 4 deletions

View File

@ -49,6 +49,7 @@ int qc_snd_buf(struct quic_conn *qc, const struct buffer *buf, size_t count,
int flags, uint16_t gso_size);
int qc_rcv_buf(struct quic_conn *qc);
void quic_conn_sock_fd_iocb(int fd);
void quic_conn_closed_sock_fd_iocb(int fd);
void qc_alloc_fd(struct quic_conn *qc, const struct sockaddr_storage *src,
const struct sockaddr_storage *dst);

View File

@ -683,6 +683,9 @@ static void quic_release_cc_conn(struct quic_conn_closed *cc_qc)
TRACE_ENTER(QUIC_EV_CONN_IO_CB, cc_qc);
if (qc_is_back(qc))
qc_release_fd(qc, 0);
task_destroy(cc_qc->idle_timer_task);
cc_qc->idle_timer_task = NULL;
tasklet_free(qc->wait_event.tasklet);
@ -763,10 +766,14 @@ static struct quic_conn_closed *qc_new_cc_conn(struct quic_conn *qc)
quic_conn_mv_cids_to_cc_conn(cc_qc, qc);
if (qc_is_back(qc))
if (qc_is_back(qc)) {
cc_qc->fd = qc->fd;
else
fdtab[cc_qc->fd].iocb = quic_conn_closed_sock_fd_iocb;
fdtab[cc_qc->fd].owner = cc_qc;
}
else {
qc_init_fd((struct quic_conn *)cc_qc);
}
cc_qc->flags = qc->flags;
cc_qc->err = qc->err;
@ -1557,8 +1564,14 @@ int quic_conn_release(struct quic_conn *qc)
if (!qc_is_back(qc) && qc_test_fd(qc))
_HA_ATOMIC_DEC(&jobs);
/* Close quic-conn socket FD on the frontend side. Remaining exchanges
* will be multiplexed on the listener socket. On backend side the FD
* is reinitialized to cc_qc instance via qc_new_cc_conn().
*/
if (!qc_is_back(qc) || !cc_qc) {
/* Close quic-conn socket fd. */
qc_release_fd(qc, 0);
}
/* in the unlikely (but possible) case the connection was just added to
* the accept_list we must delete it from there.

View File

@ -580,6 +580,28 @@ void quic_conn_sock_fd_iocb(int fd)
TRACE_LEAVE(QUIC_EV_CONN_RCV, qc);
}
/* quic_conn_closed FD handler (only used on backend side) */
void quic_conn_closed_sock_fd_iocb(int fd)
{
struct quic_conn_closed *cc_qc = fdtab[fd].owner;
TRACE_ENTER(QUIC_EV_CONN_RCV, cc_qc);
if (fd_send_active(fd) && fd_send_ready(fd)) {
TRACE_DEVEL("send ready", QUIC_EV_CONN_RCV, cc_qc);
fd_stop_send(fd);
tasklet_wakeup_after(NULL, cc_qc->wait_event.tasklet);
}
if (fd_recv_ready(fd)) {
TRACE_DEVEL("recv ready", QUIC_EV_CONN_RCV, cc_qc);
tasklet_wakeup_after(NULL, cc_qc->wait_event.tasklet);
fd_stop_recv(fd);
}
TRACE_LEAVE(QUIC_EV_CONN_RCV, cc_qc);
}
static void cmsg_set_saddr(struct msghdr *msg, struct cmsghdr **cmsg,
struct sockaddr_storage *saddr)
{