mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-05 14:47:07 +02:00
BUG/MINOR: quic: ensure Tx buf is always purged
quic_conn API for sending was recently refactored. The main objective was to regroup the different functions present for both handshake and application emission. After this refactoring, an optimization was introduced to avoid calling qc_send() if there was nothing new to emit. However, this prevent the Tx buffer to be purged if previous sending was interrupted, until new frames are finally available. To fix this, simply remove the optimization. qc_send() is thus now always called in quic_conn IO handlers. The impact of this bug should be minimal as it happens only on sending temporary error. However in this case, this could cause extra latency or even a complete sending freeze in the worst scenario. This must be backported up to 3.0.
This commit is contained in:
parent
50470a5181
commit
0ef94e2dff
@ -551,11 +551,8 @@ struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int sta
|
||||
{
|
||||
struct list send_list = LIST_HEAD_INIT(send_list);
|
||||
struct quic_conn *qc = context;
|
||||
struct quic_enc_level *qel;
|
||||
|
||||
TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
|
||||
|
||||
qel = qc->ael;
|
||||
TRACE_STATE("connection handshake state", QUIC_EV_CONN_IO_CB, qc, &qc->state);
|
||||
|
||||
if (qc_test_fd(qc))
|
||||
@ -594,11 +591,10 @@ struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int sta
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!qel_need_sending(qel, qc))
|
||||
goto out;
|
||||
|
||||
/* XXX TODO: how to limit the list frames to send */
|
||||
qel_register_send(&send_list, qel, &qel->pktns->tx.frms);
|
||||
if (qel_need_sending(qc->ael, qc))
|
||||
qel_register_send(&send_list, qc->ael, &qc->ael->pktns->tx.frms);
|
||||
|
||||
if (!qc_send(qc, 0, &send_list)) {
|
||||
TRACE_DEVEL("qc_send() failed", QUIC_EV_CONN_IO_CB, qc);
|
||||
goto out;
|
||||
@ -804,10 +800,6 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
|
||||
qel_register_send(&send_list, qel, &qel->pktns->tx.frms);
|
||||
}
|
||||
|
||||
/* Skip sending if no QEL with frames to sent. */
|
||||
if (LIST_ISEMPTY(&send_list))
|
||||
goto out;
|
||||
|
||||
if (!qc_send(qc, 0, &send_list)) {
|
||||
TRACE_DEVEL("qc_send() failed", QUIC_EV_CONN_IO_CB, qc);
|
||||
goto out;
|
||||
|
@ -691,7 +691,7 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf,
|
||||
int qc_send(struct quic_conn *qc, int old_data, struct list *send_list)
|
||||
{
|
||||
struct quic_enc_level *qel, *tmp_qel;
|
||||
int ret, status = 0;
|
||||
int ret = 0, status = 0;
|
||||
struct buffer *buf;
|
||||
|
||||
TRACE_ENTER(QUIC_EV_CONN_TXPKT, qc);
|
||||
@ -713,7 +713,7 @@ int qc_send(struct quic_conn *qc, int old_data, struct list *send_list)
|
||||
}
|
||||
|
||||
/* Prepare and send packets until we could not further prepare packets. */
|
||||
do {
|
||||
while (!LIST_ISEMPTY(send_list)) {
|
||||
/* Buffer must always be empty before qc_prep_pkts() usage.
|
||||
* qc_send_ppkts() ensures it is cleared on success.
|
||||
*/
|
||||
@ -727,7 +727,12 @@ int qc_send(struct quic_conn *qc, int old_data, struct list *send_list)
|
||||
qc_txb_release(qc);
|
||||
goto out;
|
||||
}
|
||||
} while (ret > 0 && !LIST_ISEMPTY(send_list));
|
||||
|
||||
if (ret <= 0) {
|
||||
TRACE_DEVEL("stopping on qc_prep_pkts() return", QUIC_EV_CONN_TXPKT, qc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qc_txb_release(qc);
|
||||
if (ret < 0)
|
||||
|
Loading…
Reference in New Issue
Block a user