MAJOR: stream: Use SE descriptor date to detect read/write timeouts

We stop to use the channel's expiration dates to detect read and write
timeouts on the channels. We now rely on the stream-endpoint descriptor to
do so. All the stuff is handled in process_stream().

The stream relies on 2 helper functions to know if the receives or sends may
expire: sc_rcv_may_expire() and sc_snd_may_expire().
This commit is contained in:
Christopher Faulet 2023-02-16 11:18:15 +01:00
parent 2ca4cc1936
commit b374ba563a
3 changed files with 48 additions and 25 deletions

View File

@ -523,26 +523,14 @@ static inline int channel_output_closed(struct channel *chn)
return ((chn->flags & CF_SHUTW) != 0);
}
/* Check channel timeouts, and set the corresponding flags. The likely/unlikely
* have been optimized for fastest normal path. The read/write timeouts are not
* set if there was activity on the channel. That way, we don't have to update
* the timeout on every I/O. Note that the analyser timeout is always checked.
*/
static inline void channel_check_timeouts(struct channel *chn)
/* Check channel timeouts, and set the corresponding flags. */
static inline void channel_check_timeout(struct channel *chn)
{
if (likely(!(chn->flags & (CF_SHUTR|CF_READ_TIMEOUT|CF_READ_EVENT))) &&
unlikely(tick_is_expired(sc_ep_rex(chn_prod(chn)), now_ms)))
chn->flags |= CF_READ_TIMEOUT;
if (likely(!(chn->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_EVENT))) &&
unlikely(tick_is_expired(sc_ep_wex(chn_cons(chn)), now_ms)))
chn->flags |= CF_WRITE_TIMEOUT;
if (likely(!(chn->flags & CF_READ_EVENT)) &&
unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
if (likely(!(chn->flags & CF_READ_EVENT)) && unlikely(tick_is_expired(chn->analyse_exp, now_ms)))
chn->flags |= CF_READ_EVENT;
}
/* Erase any content from channel <buf> and adjusts flags accordingly. Note
* that any spliced data is not affected since we may not have any access to
* it.

View File

@ -373,4 +373,32 @@ static inline int sc_is_send_allowed(const struct stconn *sc)
return !sc_ep_test(sc, SE_FL_WAIT_DATA | SE_FL_WONT_CONSUME);
}
static inline int sc_rcv_may_expire(const struct stconn *sc)
{
if (sc_ic(sc)->flags & (CF_EOI|CF_SHUTR|CF_READ_TIMEOUT|CF_READ_EVENT))
return 0;
if (sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
return 0;
if (sc_ep_test(sc, SE_FL_APPLET_NEED_CONN) || sc_ep_test(sc_opposite(sc), SE_FL_EXP_NO_DATA))
return 0;
return 1;
}
static inline int sc_snd_may_expire(const struct stconn *sc)
{
if (sc_oc(sc)->flags & (CF_SHUTW|CF_WRITE_TIMEOUT|CF_WRITE_EVENT))
return 0;
if (sc_ep_test(sc, SE_FL_WONT_CONSUME))
return 0;
return 1;
}
static inline void sc_check_timeouts(const struct stconn *sc)
{
if (likely(sc_rcv_may_expire(sc)) && unlikely(tick_is_expired(sc_ep_rcv_ex(sc), now_ms)))
sc_ic(sc)->flags |= CF_READ_TIMEOUT;
if (likely(sc_snd_may_expire(sc)) && unlikely(tick_is_expired(sc_ep_snd_ex(sc), now_ms)))
sc_oc(sc)->flags |= CF_WRITE_TIMEOUT;
}
#endif /* _HAPROXY_SC_STRM_H */

View File

@ -1729,14 +1729,13 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
if (unlikely(s->pending_events & TASK_WOKEN_TIMER)) {
stream_check_conn_timeout(s);
/* check channel timeouts, and close the corresponding stream connectors
/* check SC and channel timeouts, and close the corresponding stream connectors
* for future reads or writes. Note: this will also concern upper layers
* but we do not touch any other flag. We must be careful and correctly
* detect state changes when calling them.
*/
channel_check_timeouts(req);
sc_check_timeouts(scf);
channel_check_timeout(req);
if (unlikely((req->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
scb->flags |= SC_FL_NOLINGER;
sc_shutw(scb);
@ -1748,8 +1747,8 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
sc_shutr(scf);
}
channel_check_timeouts(res);
sc_check_timeouts(scb);
channel_check_timeout(res);
if (unlikely((res->flags & (CF_SHUTW|CF_WRITE_TIMEOUT)) == CF_WRITE_TIMEOUT)) {
scf->flags |= SC_FL_NOLINGER;
sc_shutw(scf);
@ -2515,9 +2514,17 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
update_exp_and_leave:
/* Note: please ensure that if you branch here you disable SC_FL_DONT_WAKE */
t->expire = tick_first((tick_is_expired(t->expire, now_ms) ? 0 : t->expire),
tick_first(tick_first(sc_ep_rex(scf), sc_ep_wex(scf)),
tick_first(sc_ep_rex(scb), sc_ep_wex(scb))));
t->expire = (tick_is_expired(t->expire, now_ms) ? 0 : t->expire);
if (likely(sc_rcv_may_expire(scf)))
t->expire = tick_first(t->expire, sc_ep_rcv_ex(scf));
if (likely(sc_snd_may_expire(scf)))
t->expire = tick_first(t->expire, sc_ep_snd_ex(scf));
if (likely(sc_rcv_may_expire(scb)))
t->expire = tick_first(t->expire, sc_ep_rcv_ex(scb));
if (likely(sc_snd_may_expire(scb)))
t->expire = tick_first(t->expire, sc_ep_snd_ex(scb));
if (!req->analysers)
req->analyse_exp = TICK_ETERNITY;