mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
BUG/MINOR: quic: fix subscribe operation
Subscribing was not properly designed between quic-conn and quic MUX layers. Align this as with in other haproxy components : <subs> field is moved from the MUX to the quic-conn structure. All mention of qcc MUX is cleaned up in quic_conn_subscribe()/quic_conn_unsubscribe(). Thanks to this change, ACK reception notification has been simplified. It's now unnecessary to check for the MUX existence before waking it. Instead, if <subs> quic-conn field is set, just wake-up the upper layer tasklet without mentionning MUX. This should probably be extended to other part in quic-conn code. This should be backported up to 2.6.
This commit is contained in:
parent
0aba11e9e7
commit
bbb1c68508
@ -97,7 +97,6 @@ struct qcc {
|
||||
struct list send_retry_list; /* list of qcs eligible to send retry */
|
||||
|
||||
struct wait_event wait_event; /* To be used if we're waiting for I/Os */
|
||||
struct wait_event *subs;
|
||||
|
||||
struct proxy *proxy;
|
||||
|
||||
|
@ -706,6 +706,7 @@ struct quic_conn {
|
||||
int stream_buf_count; /* total count of allocated stream buffers for this connection */
|
||||
|
||||
struct wait_event wait_event;
|
||||
struct wait_event *subs;
|
||||
|
||||
/* MUX */
|
||||
struct qcc *qcc;
|
||||
|
@ -1990,7 +1990,6 @@ static int qc_init(struct connection *conn, struct proxy *prx,
|
||||
|
||||
LIST_INIT(&qcc->send_retry_list);
|
||||
|
||||
qcc->subs = NULL;
|
||||
qcc->wait_event.tasklet->process = qc_io_cb;
|
||||
qcc->wait_event.tasklet->context = qcc;
|
||||
qcc->wait_event.events = 0;
|
||||
|
@ -1716,14 +1716,12 @@ static inline void qc_treat_acked_tx_frm(struct quic_conn *qc,
|
||||
qc_release_frm(qc, frm);
|
||||
}
|
||||
|
||||
if (stream_acked && qc->mux_state == QC_MUX_READY) {
|
||||
struct qcc *qcc = qc->qcc;
|
||||
|
||||
if (qcc->subs && qcc->subs->events & SUB_RETRY_SEND) {
|
||||
tasklet_wakeup(qcc->subs->tasklet);
|
||||
qcc->subs->events &= ~SUB_RETRY_SEND;
|
||||
if (!qcc->subs->events)
|
||||
qcc->subs = NULL;
|
||||
if (stream_acked) {
|
||||
if (qc->subs && qc->subs->events & SUB_RETRY_SEND) {
|
||||
tasklet_wakeup(qc->subs->tasklet);
|
||||
qc->subs->events &= ~SUB_RETRY_SEND;
|
||||
if (!qc->subs->events)
|
||||
qc->subs = NULL;
|
||||
}
|
||||
}
|
||||
leave:
|
||||
@ -4614,15 +4612,12 @@ struct task *qc_process_timer(struct task *task, void *ctx, unsigned int state)
|
||||
|
||||
if (qc->path->in_flight) {
|
||||
pktns = quic_pto_pktns(qc, qc->state >= QUIC_HS_ST_COMPLETE, NULL);
|
||||
if (qc->mux_state == QC_MUX_READY && qc->qcc->subs &&
|
||||
qc->qcc->subs->events & SUB_RETRY_SEND) {
|
||||
struct qcc *qcc = qc->qcc;
|
||||
|
||||
if (qc->subs && qc->subs->events & SUB_RETRY_SEND) {
|
||||
pktns->tx.pto_probe = QUIC_MAX_NB_PTO_DGRAMS;
|
||||
tasklet_wakeup(qcc->subs->tasklet);
|
||||
qcc->subs->events &= ~SUB_RETRY_SEND;
|
||||
if (!qcc->subs->events)
|
||||
qcc->subs = NULL;
|
||||
tasklet_wakeup(qc->subs->tasklet);
|
||||
qc->subs->events &= ~SUB_RETRY_SEND;
|
||||
if (!qc->subs->events)
|
||||
qc->subs = NULL;
|
||||
}
|
||||
else {
|
||||
qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED;
|
||||
@ -4865,6 +4860,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
|
||||
*/
|
||||
qc->tid = quic_get_cid_tid(qc->scid.data, l->bind_conf);
|
||||
qc->wait_event.tasklet->tid = qc->tid;
|
||||
qc->subs = NULL;
|
||||
|
||||
if (qc_conn_alloc_ssl_ctx(qc) ||
|
||||
!quic_conn_init_timer(qc) ||
|
||||
|
@ -47,21 +47,24 @@ static void quic_close(struct connection *conn, void *xprt_ctx)
|
||||
static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
|
||||
{
|
||||
struct quic_conn *qc = conn->handle.qc;
|
||||
struct qcc *qcc = qc->qcc;
|
||||
|
||||
TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
|
||||
|
||||
BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
|
||||
BUG_ON(qcc->subs && qcc->subs != es);
|
||||
BUG_ON(qc->subs && qc->subs != es);
|
||||
|
||||
es->events |= event_type;
|
||||
qcc->subs = es;
|
||||
qc->subs = es;
|
||||
|
||||
/* TODO implement a check_events to detect if subscriber should be
|
||||
* woken up immediately ?
|
||||
*/
|
||||
|
||||
if (event_type & SUB_RETRY_RECV)
|
||||
TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc, qcc);
|
||||
TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
|
||||
|
||||
if (event_type & SUB_RETRY_SEND)
|
||||
TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc, qcc);
|
||||
TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
|
||||
|
||||
TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
|
||||
|
||||
@ -74,22 +77,24 @@ static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int even
|
||||
*/
|
||||
static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
|
||||
{
|
||||
int ret;
|
||||
struct quic_conn *qc = conn->handle.qc;
|
||||
struct qcc *qcc = qc->qcc;
|
||||
|
||||
TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
|
||||
|
||||
if (event_type & SUB_RETRY_RECV)
|
||||
TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc, qcc);
|
||||
TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
|
||||
if (event_type & SUB_RETRY_SEND)
|
||||
TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc, qcc);
|
||||
TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
|
||||
|
||||
ret = conn_unsubscribe(conn, xprt_ctx, event_type, es);
|
||||
es->events &= ~event_type;
|
||||
if (!es->events)
|
||||
qc->subs = NULL;
|
||||
|
||||
/* TODO implement ignore_events similar to conn_unsubscribe() ? */
|
||||
|
||||
TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Store in <xprt_ctx> the context attached to <conn>.
|
||||
|
Loading…
Reference in New Issue
Block a user