diff --git a/include/haproxy/quic_cc-t.h b/include/haproxy/quic_cc-t.h index e66fc726c..f869dda0b 100644 --- a/include/haproxy/quic_cc-t.h +++ b/include/haproxy/quic_cc-t.h @@ -63,7 +63,7 @@ struct quic_cc_event { unsigned int now_ms; unsigned int max_ack_delay; size_t lost_bytes; - unsigned int newest_time_sent; + unsigned int time_sent; unsigned int period; } loss; }; diff --git a/include/haproxy/quic_cc.h b/include/haproxy/quic_cc.h index dbf0284ea..048a03a62 100644 --- a/include/haproxy/quic_cc.h +++ b/include/haproxy/quic_cc.h @@ -61,7 +61,7 @@ static inline void quic_cc_event_trace(struct buffer *buf, const struct quic_cc_ " time_sent=%u period=%u", ev->loss.now_ms, ev->loss.max_ack_delay, (unsigned long long)ev->loss.lost_bytes, - ev->loss.newest_time_sent, ev->loss.period); + ev->loss.time_sent, ev->loss.period); break; case QUIC_CC_EVT_ECN_CE: chunk_appendf(buf, "ecn_ce"); diff --git a/src/quic_cc_newreno.c b/src/quic_cc_newreno.c index 6ed5f06fb..e15b302b2 100644 --- a/src/quic_cc_newreno.c +++ b/src/quic_cc_newreno.c @@ -108,20 +108,13 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev) break; case QUIC_CC_EVT_LOSS: - if (ev->loss.newest_time_sent > cc->algo_state.nr.recovery_start_time) { - cc->algo_state.nr.recovery_start_time = ev->loss.now_ms; - cc->algo_state.nr.cwnd = QUIC_MAX(cc->algo_state.nr.cwnd >> 1, path->min_cwnd); - cc->algo_state.nr.ssthresh = cc->algo_state.nr.cwnd; - } - if (quic_loss_persistent_congestion(&path->loss, - ev->loss.period, - ev->loss.now_ms, - ev->loss.max_ack_delay)) { - cc->algo_state.nr.cwnd = path->min_cwnd; - /* Re-entering slow start state. */ - cc->algo_state.nr.state = QUIC_CC_ST_SS; - cc->algo_state.nr.recovery_start_time = 0; - } + /* Do not decrease the congestion window when already in recovery period. */ + if (ev->loss.time_sent <= cc->algo_state.nr.recovery_start_time) + goto out; + + cc->algo_state.nr.recovery_start_time = now_ms; + cc->algo_state.nr.ssthresh = cc->algo_state.nr.cwnd; + cc->algo_state.nr.cwnd = QUIC_MAX(cc->algo_state.nr.cwnd >> 1, path->min_cwnd); path->cwnd = cc->algo_state.nr.cwnd; break; diff --git a/src/xprt_quic.c b/src/xprt_quic.c index 555041089..797324045 100644 --- a/src/xprt_quic.c +++ b/src/xprt_quic.c @@ -1575,7 +1575,7 @@ static inline void qc_cc_loss_event(struct quic_conn *qc, .loss.now_ms = now_ms, .loss.max_ack_delay = qc->max_ack_delay, .loss.lost_bytes = lost_bytes, - .loss.newest_time_sent = newest_time_sent, + .loss.time_sent = newest_time_sent, .loss.period = period, }; @@ -1658,10 +1658,30 @@ static inline void qc_release_lost_pkts(struct quic_conn *qc, } } + if (newest_lost) { + /* Sent a congestion event to the controller */ + struct quic_cc_event ev = { + .type = QUIC_CC_EVT_LOSS, + .loss.time_sent = newest_lost->time_sent, + }; + + quic_cc_event(&qc->path->cc, &ev); + } + + /* If an RTT have been already sampled, has been set. + * We must check if we are experiencing a persistent congestion. + * If this is the case, the congestion controller must re-enter + * slow start state. + */ + if (qc->path->loss.rtt_min && newest_lost != oldest_lost) { + unsigned int period = newest_lost->time_sent - oldest_lost->time_sent; + + if (quic_loss_persistent_congestion(&qc->path->loss, period, + now_ms, qc->max_ack_delay)) + qc->path->cc.algo->slow_start(&qc->path->cc); + } + if (lost_bytes) { - /* Sent a packet loss event to the congestion controller. */ - qc_cc_loss_event(qc, lost_bytes, newest_lost->time_sent, - newest_lost->time_sent - oldest_lost->time_sent, now_us); quic_tx_packet_refdec(oldest_lost); if (newest_lost != oldest_lost) quic_tx_packet_refdec(newest_lost);