BUG/MINOR: quic: fix subscribe operation

Subscribing was not properly designed between quic-conn and quic MUX
layers. Align this as with in other haproxy components : <subs> field is
moved from the MUX to the quic-conn structure. All mention of qcc MUX is
cleaned up in quic_conn_subscribe()/quic_conn_unsubscribe().

Thanks to this change, ACK reception notification has been simplified.
It's now unnecessary to check for the MUX existence before waking it.
Instead, if <subs> quic-conn field is set, just wake-up the upper layer
tasklet without mentionning MUX. This should probably be extended to
other part in quic-conn code.

This should be backported up to 2.6.
This commit is contained in:
Amaury Denoyelle 2022-09-28 15:15:51 +02:00
parent 0aba11e9e7
commit bbb1c68508
5 changed files with 29 additions and 29 deletions

View File

@ -97,7 +97,6 @@ struct qcc {
struct list send_retry_list; /* list of qcs eligible to send retry */ struct list send_retry_list; /* list of qcs eligible to send retry */
struct wait_event wait_event; /* To be used if we're waiting for I/Os */ struct wait_event wait_event; /* To be used if we're waiting for I/Os */
struct wait_event *subs;
struct proxy *proxy; struct proxy *proxy;

View File

@ -706,6 +706,7 @@ struct quic_conn {
int stream_buf_count; /* total count of allocated stream buffers for this connection */ int stream_buf_count; /* total count of allocated stream buffers for this connection */
struct wait_event wait_event; struct wait_event wait_event;
struct wait_event *subs;
/* MUX */ /* MUX */
struct qcc *qcc; struct qcc *qcc;

View File

@ -1990,7 +1990,6 @@ static int qc_init(struct connection *conn, struct proxy *prx,
LIST_INIT(&qcc->send_retry_list); LIST_INIT(&qcc->send_retry_list);
qcc->subs = NULL;
qcc->wait_event.tasklet->process = qc_io_cb; qcc->wait_event.tasklet->process = qc_io_cb;
qcc->wait_event.tasklet->context = qcc; qcc->wait_event.tasklet->context = qcc;
qcc->wait_event.events = 0; qcc->wait_event.events = 0;

View File

@ -1716,14 +1716,12 @@ static inline void qc_treat_acked_tx_frm(struct quic_conn *qc,
qc_release_frm(qc, frm); qc_release_frm(qc, frm);
} }
if (stream_acked && qc->mux_state == QC_MUX_READY) { if (stream_acked) {
struct qcc *qcc = qc->qcc; if (qc->subs && qc->subs->events & SUB_RETRY_SEND) {
tasklet_wakeup(qc->subs->tasklet);
if (qcc->subs && qcc->subs->events & SUB_RETRY_SEND) { qc->subs->events &= ~SUB_RETRY_SEND;
tasklet_wakeup(qcc->subs->tasklet); if (!qc->subs->events)
qcc->subs->events &= ~SUB_RETRY_SEND; qc->subs = NULL;
if (!qcc->subs->events)
qcc->subs = NULL;
} }
} }
leave: leave:
@ -4614,15 +4612,12 @@ struct task *qc_process_timer(struct task *task, void *ctx, unsigned int state)
if (qc->path->in_flight) { if (qc->path->in_flight) {
pktns = quic_pto_pktns(qc, qc->state >= QUIC_HS_ST_COMPLETE, NULL); pktns = quic_pto_pktns(qc, qc->state >= QUIC_HS_ST_COMPLETE, NULL);
if (qc->mux_state == QC_MUX_READY && qc->qcc->subs && if (qc->subs && qc->subs->events & SUB_RETRY_SEND) {
qc->qcc->subs->events & SUB_RETRY_SEND) {
struct qcc *qcc = qc->qcc;
pktns->tx.pto_probe = QUIC_MAX_NB_PTO_DGRAMS; pktns->tx.pto_probe = QUIC_MAX_NB_PTO_DGRAMS;
tasklet_wakeup(qcc->subs->tasklet); tasklet_wakeup(qc->subs->tasklet);
qcc->subs->events &= ~SUB_RETRY_SEND; qc->subs->events &= ~SUB_RETRY_SEND;
if (!qcc->subs->events) if (!qc->subs->events)
qcc->subs = NULL; qc->subs = NULL;
} }
else { else {
qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED; qc->flags |= QUIC_FL_CONN_RETRANS_NEEDED;
@ -4865,6 +4860,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
*/ */
qc->tid = quic_get_cid_tid(qc->scid.data, l->bind_conf); qc->tid = quic_get_cid_tid(qc->scid.data, l->bind_conf);
qc->wait_event.tasklet->tid = qc->tid; qc->wait_event.tasklet->tid = qc->tid;
qc->subs = NULL;
if (qc_conn_alloc_ssl_ctx(qc) || if (qc_conn_alloc_ssl_ctx(qc) ||
!quic_conn_init_timer(qc) || !quic_conn_init_timer(qc) ||

View File

@ -47,21 +47,24 @@ static void quic_close(struct connection *conn, void *xprt_ctx)
static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es) static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
{ {
struct quic_conn *qc = conn->handle.qc; struct quic_conn *qc = conn->handle.qc;
struct qcc *qcc = qc->qcc;
TRACE_ENTER(QUIC_EV_CONN_SUB, qc); TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV)); BUG_ON(event_type & ~(SUB_RETRY_SEND|SUB_RETRY_RECV));
BUG_ON(qcc->subs && qcc->subs != es); BUG_ON(qc->subs && qc->subs != es);
es->events |= event_type; es->events |= event_type;
qcc->subs = es; qc->subs = es;
/* TODO implement a check_events to detect if subscriber should be
* woken up immediately ?
*/
if (event_type & SUB_RETRY_RECV) if (event_type & SUB_RETRY_RECV)
TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc, qcc); TRACE_DEVEL("subscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
if (event_type & SUB_RETRY_SEND) if (event_type & SUB_RETRY_SEND)
TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc, qcc); TRACE_DEVEL("subscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
TRACE_LEAVE(QUIC_EV_CONN_SUB, qc); TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
@ -74,22 +77,24 @@ static int quic_conn_subscribe(struct connection *conn, void *xprt_ctx, int even
*/ */
static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es) static int quic_conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, struct wait_event *es)
{ {
int ret;
struct quic_conn *qc = conn->handle.qc; struct quic_conn *qc = conn->handle.qc;
struct qcc *qcc = qc->qcc;
TRACE_ENTER(QUIC_EV_CONN_SUB, qc); TRACE_ENTER(QUIC_EV_CONN_SUB, qc);
if (event_type & SUB_RETRY_RECV) if (event_type & SUB_RETRY_RECV)
TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc, qcc); TRACE_DEVEL("unsubscribe(recv)", QUIC_EV_CONN_XPRTRECV, qc);
if (event_type & SUB_RETRY_SEND) if (event_type & SUB_RETRY_SEND)
TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc, qcc); TRACE_DEVEL("unsubscribe(send)", QUIC_EV_CONN_XPRTSEND, qc);
ret = conn_unsubscribe(conn, xprt_ctx, event_type, es); es->events &= ~event_type;
if (!es->events)
qc->subs = NULL;
/* TODO implement ignore_events similar to conn_unsubscribe() ? */
TRACE_LEAVE(QUIC_EV_CONN_SUB, qc); TRACE_LEAVE(QUIC_EV_CONN_SUB, qc);
return ret; return 0;
} }
/* Store in <xprt_ctx> the context attached to <conn>. /* Store in <xprt_ctx> the context attached to <conn>.