MINOR: stconn: Add a flag to report EOS at the stream-connector level

SC_FL_EOS flag is added to report the end-of-stream at the SC level. It will
be used to distinguish end of stream reported by the endoint, via the
SE_FL_EOS flag, and the abort triggered by the stream, via the
SC_FL_ABRT_DONE flag.

In this patch, the flag is defined and is systematically tested everywhere
SC_FL_ABRT_DONE is tested. It should be safe because it is never set.
This commit is contained in:
Christopher Faulet 2023-04-17 16:17:32 +02:00
parent 285aa40d35
commit ca5309a9a3
13 changed files with 78 additions and 75 deletions

View File

@ -514,7 +514,7 @@ static inline int channel_may_recv(const struct channel *chn)
/* Returns true if the channel's input is already closed */
static inline int channel_input_closed(struct channel *chn)
{
return ((chn_prod(chn)->flags & SC_FL_ABRT_DONE) != 0);
return ((chn_prod(chn)->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) != 0);
}
/* Returns true if the channel's output is already closed */

View File

@ -272,7 +272,7 @@ static inline void sc_must_kill_conn(struct stconn *sc)
__attribute__((warn_unused_result))
static inline int sc_is_recv_allowed(const struct stconn *sc)
{
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_ABRT_DONE|SC_FL_EOS))
return 0;
if (sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
@ -357,7 +357,7 @@ static inline int sc_is_send_allowed(const struct stconn *sc)
static inline int sc_rcv_may_expire(const struct stconn *sc)
{
if ((sc->flags & SC_FL_ABRT_DONE) ||
if ((sc->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) ||
(sc_ic(sc)->flags & (CF_READ_TIMEOUT|CF_READ_EVENT)))
return 0;
if (sc->flags & (SC_FL_EOI|SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))

View File

@ -158,10 +158,12 @@ enum sc_flags {
SC_FL_SND_NEVERWAIT = 0x00001000, /* Never wait for sending (permanent) */
SC_FL_SND_EXP_MORE = 0x00001000, /* More data expected to be sent very soon. cleared when all data were sent */
SC_FL_ABRT_WANTED = 0x00002000, /* An abort was requested and must be performed ASAP */
SC_FL_SHUT_WANTED = 0x00004000, /* A shutdown was requested and mux be performed ASAP */
SC_FL_ABRT_WANTED = 0x00002000, /* An abort was requested and must be performed ASAP (up side to down side) */
SC_FL_SHUT_WANTED = 0x00004000, /* A shutdown was requested and mux be performed ASAP (up side to down side) */
SC_FL_ABRT_DONE = 0x00008000, /* An abort was performed for the SC */
SC_FL_SHUT_DONE = 0x00010000, /* A shutdown was performed for the SC */
SC_FL_EOS = 0x00020000, /* End of stream was reached (from down side to up side) */
};
/* This function is used to report flags in debugging tools. Please reflect
@ -178,7 +180,8 @@ static forceinline char *sc_show_flags(char *buf, size_t len, const char *delim,
_(SC_FL_DONT_WAKE, _(SC_FL_INDEP_STR, _(SC_FL_WONT_READ,
_(SC_FL_NEED_BUFF, _(SC_FL_NEED_ROOM,
_(SC_FL_RCV_ONCE, _(SC_FL_SND_ASAP, _(SC_FL_SND_NEVERWAIT, _(SC_FL_SND_EXP_MORE,
_(SC_FL_ABRT_WANTED, _(SC_FL_SHUT_WANTED, _(SC_FL_ABRT_DONE, _(SC_FL_SHUT_DONE))))))))))))))))));
_(SC_FL_ABRT_WANTED, _(SC_FL_SHUT_WANTED, _(SC_FL_ABRT_DONE, _(SC_FL_SHUT_DONE,
_(SC_FL_EOS)))))))))))))))))));
/* epilogue */
_(~0U);
return buf;

View File

@ -484,7 +484,7 @@ int ci_getblk_nc(const struct channel *chn,
char **blk2, size_t *len2)
{
if (unlikely(ci_data(chn) == 0)) {
if (chn_prod(chn)->flags & SC_FL_ABRT_DONE)
if (chn_prod(chn)->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return -1;
return 0;
}

View File

@ -2692,7 +2692,7 @@ int pcli_wait_for_request(struct stream *s, struct channel *req, int an_bit)
goto read_again;
missing_data:
if (s->scf->flags & SC_FL_ABRT_DONE) {
if (s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
/* There is no more request or a only a partial one and we
* receive a close from the client, we can leave */
sc_schedule_shutdown(s->scf);
@ -2741,7 +2741,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
return 0;
}
if (s->scb->flags & SC_FL_ABRT_DONE) {
if (s->scb->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
/* stream cleanup */
pcli_write_prompt(s);
@ -2855,7 +2855,7 @@ int pcli_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
s->store_count = 0;
s->uniq_id = global.req_count++;
s->scf->flags &= ~(SC_FL_ERROR|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED);
s->scf->flags &= ~(SC_FL_EOS|SC_FL_ERROR|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED);
s->scf->flags &= ~SC_FL_SND_NEVERWAIT;
s->scf->flags |= SC_FL_RCV_ONCE; /* one read is usually enough */

View File

@ -1020,7 +1020,7 @@ flt_xfer_data(struct stream *s, struct channel *chn, unsigned int an_bit)
ret = 1;
goto end;
}
if (chn_prod(chn)->flags & SC_FL_ABRT_DONE) {
if (chn_prod(chn)->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
if (((s->flags & SF_HTX) && htx_is_empty(htxbuf(&chn->buf))) || c_empty(chn)) {
ret = 1;
goto end;

View File

@ -151,7 +151,7 @@ static int bwlim_apply_limit(struct filter *filter, struct channel *chn, unsigne
*/
ret = tokens;
if (tokens < conf->min_size) {
ret = (chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_ABRT_DONE))
ret = (chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE))
? MIN(len, conf->min_size)
: conf->min_size;

View File

@ -114,7 +114,7 @@ int http_wait_for_request(struct stream *s, struct channel *req, int an_bit)
/* An abort at this stage means we are performing a "destructive"
* HTTP upgrade (TCP>H2). In this case, we can leave.
*/
if (s->scf->flags & SC_FL_ABRT_DONE) {
if (s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) {
s->logs.logwait = 0;
s->logs.level = 0;
stream_abort(s);
@ -766,7 +766,7 @@ int http_process_tarpit(struct stream *s, struct channel *req, int an_bit)
* there and that the timeout has not expired.
*/
channel_dont_connect(req);
if (!(s->scf->flags & SC_FL_ABRT_DONE) &&
if (!(s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) &&
!tick_is_expired(req->analyse_exp, now_ms)) {
/* Be sure to drain all data from the request channel */
channel_htx_erase(req, htxbuf(&req->buf));
@ -1002,7 +1002,7 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
* it can be abused to exhaust source ports. */
if (s->be->options & PR_O_ABRT_CLOSE) {
channel_auto_read(req);
if ((s->scf->flags & SC_FL_ABRT_DONE) && !(txn->flags & TX_CON_WANT_TUN))
if ((s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)) && !(txn->flags & TX_CON_WANT_TUN))
s->scb->flags |= SC_FL_NOLINGER;
channel_auto_close(req);
}
@ -1018,7 +1018,7 @@ int http_request_forward_body(struct stream *s, struct channel *req, int an_bit)
missing_data_or_waiting:
/* stop waiting for data if the input is closed before the end */
if (msg->msg_state < HTTP_MSG_ENDING && (s->scf->flags & SC_FL_ABRT_DONE))
if (msg->msg_state < HTTP_MSG_ENDING && (s->scf->flags & (SC_FL_ABRT_DONE|SC_FL_EOS)))
goto return_cli_abort;
waiting:
@ -1132,7 +1132,7 @@ static __inline int do_l7_retry(struct stream *s, struct stconn *sc)
res = &s->res;
/* Remove any write error from the request, and read error from the response */
s->scf->flags &= ~(SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED);
s->scf->flags &= ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED);
req->flags &= ~CF_WRITE_TIMEOUT;
res->flags &= ~(CF_READ_TIMEOUT | CF_READ_EVENT);
res->analysers &= AN_RES_FLT_END;
@ -1294,8 +1294,8 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
}
/* 3: client abort with an abortonclose */
else if ((s->scb->flags & (SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) == (SC_FL_ABRT_DONE|SC_FL_SHUT_DONE) &&
(s->scf->flags & SC_FL_ABRT_DONE)) {
else if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->scb->flags & SC_FL_SHUT_DONE) &&
(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
_HA_ATOMIC_INC(&sess->fe->fe_counters.cli_aborts);
_HA_ATOMIC_INC(&s->be->be_counters.cli_aborts);
if (sess->listener && sess->listener->counters)
@ -1317,7 +1317,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
}
/* 4: close from server, capture the response if the server has started to respond */
else if (s->scb->flags & SC_FL_ABRT_DONE) {
else if (s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) {
if ((txn->flags & TX_L7_RETRY) &&
(s->be->retry_type & PR_RE_DISCONNECTED)) {
if (co_data(rep) || do_l7_retry(s, s->scb) == 0) {
@ -2059,7 +2059,7 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
if (htx->data != co_data(res))
goto missing_data_or_waiting;
if (!(msg->flags & HTTP_MSGF_XFER_LEN) && (s->scb->flags & SC_FL_ABRT_DONE)) {
if (!(msg->flags & HTTP_MSGF_XFER_LEN) && (s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
msg->msg_state = HTTP_MSG_ENDING;
goto ending;
}
@ -2126,8 +2126,8 @@ int http_response_forward_body(struct stream *s, struct channel *res, int an_bit
* so we don't want to count this as a server abort. Otherwise it's a
* server abort.
*/
if (msg->msg_state < HTTP_MSG_ENDING && (s->scb->flags & SC_FL_ABRT_DONE)) {
if ((s->scf->flags & SC_FL_ABRT_DONE) &&
if (msg->msg_state < HTTP_MSG_ENDING && (s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
if ((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
(s->scb->flags & SC_FL_SHUT_DONE))
goto return_cli_abort;
/* If we have some pending data, we continue the processing */
@ -2672,7 +2672,7 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis
/* Always call the action function if defined */
if (rule->action_ptr) {
if ((s->scf->flags & SC_FL_ERROR) ||
((s->scf->flags & SC_FL_ABRT_DONE) &&
((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
(px->options & PR_O_ABRT_CLOSE)))
act_opts |= ACT_OPT_FINAL;
@ -2835,7 +2835,7 @@ static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct lis
/* Always call the action function if defined */
if (rule->action_ptr) {
if ((s->scf->flags & SC_FL_ERROR) ||
((s->scf->flags & SC_FL_ABRT_DONE) &&
((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
(px->options & PR_O_ABRT_CLOSE)))
act_opts |= ACT_OPT_FINAL;
@ -4081,7 +4081,7 @@ enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
}
/* we get here if we need to wait for more data */
if (!(chn_prod(chn)->flags & SC_FL_ABRT_DONE)) {
if (!(chn_prod(chn)->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
if (!tick_isset(chn->analyse_exp))
chn->analyse_exp = tick_add_ifset(now_ms, time);
ret = HTTP_RULE_RES_YIELD;

View File

@ -615,7 +615,7 @@ static int smp_fetch_body(const struct arg *args, struct sample *smp, const char
smp->flags = SMP_F_VOL_TEST;
if (!finished && (check || (chn && !channel_full(chn, global.tune.maxrewrite) &&
!(chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_ABRT_DONE)))))
!(chn_prod(chn)->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)))))
smp->flags |= SMP_F_MAY_CHANGE;
return 1;

View File

@ -4493,7 +4493,7 @@ static void http_stats_io_handler(struct appctx *appctx)
if (appctx->st0 == STAT_HTTP_POST) {
if (stats_process_http_post(sc))
appctx->st0 = STAT_HTTP_LAST;
else if (s->scf->flags & SC_FL_ABRT_DONE)
else if (s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
appctx->st0 = STAT_HTTP_DONE;
}

View File

@ -501,7 +501,7 @@ struct appctx *sc_applet_create(struct stconn *sc, struct applet *app)
static inline int sc_cond_forward_shut(struct stconn *sc)
{
/* The close must not be forwarded */
if (!(sc->flags & SC_FL_ABRT_DONE) || !(sc->flags & SC_FL_NOHALF))
if (!(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || !(sc->flags & SC_FL_NOHALF))
return 0;
if (!channel_is_empty(sc_ic(sc))) {
@ -528,7 +528,7 @@ static void sc_app_abort(struct stconn *sc)
{
struct channel *ic = sc_ic(sc);
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return;
sc->flags |= SC_FL_ABRT_DONE;
@ -579,7 +579,7 @@ static void sc_app_shut(struct stconn *sc)
* However, if SC_FL_NOLINGER is explicitly set, we know there is
* no risk so we close both sides immediately.
*/
if (!(sc->flags & (SC_FL_ERROR|SC_FL_NOLINGER|SC_FL_ABRT_DONE)) &&
if (!(sc->flags & (SC_FL_ERROR|SC_FL_NOLINGER|SC_FL_EOS|SC_FL_ABRT_DONE)) &&
!(ic->flags & CF_DONT_READ))
return;
@ -655,7 +655,7 @@ static void sc_app_abort_conn(struct stconn *sc)
BUG_ON(!sc_conn(sc));
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return;
sc->flags |= SC_FL_ABRT_DONE;
ic->flags |= CF_READ_EVENT;
@ -725,7 +725,7 @@ static void sc_app_shut_conn(struct stconn *sc)
*/
sc_conn_shutw(sc, CO_SHW_NORMAL);
if (!(sc->flags & SC_FL_ABRT_DONE) && !(ic->flags & CF_DONT_READ))
if (!(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(ic->flags & CF_DONT_READ))
return;
}
@ -850,7 +850,7 @@ static void sc_app_abort_applet(struct stconn *sc)
BUG_ON(!sc_appctx(sc));
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return;
sc->flags |= SC_FL_ABRT_DONE;
ic->flags |= CF_READ_EVENT;
@ -903,7 +903,7 @@ static void sc_app_shut_applet(struct stconn *sc)
* However, if SC_FL_NOLINGER is explicitly set, we know there is
* no risk so we close both sides immediately.
*/
if (!(sc->flags & (SC_FL_ERROR|SC_FL_NOLINGER|SC_FL_ABRT_DONE)) &&
if (!(sc->flags & (SC_FL_ERROR|SC_FL_NOLINGER|SC_FL_EOS|SC_FL_ABRT_DONE)) &&
!(ic->flags & CF_DONT_READ))
return;
@ -971,7 +971,7 @@ void sc_update_rx(struct stconn *sc)
{
struct channel *ic = sc_ic(sc);
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return;
/* Read not closed, update FD status and timeout for reads */
@ -1095,12 +1095,12 @@ static void sc_notify(struct stconn *sc)
/* wake the task up only when needed */
if (/* changes on the production side that must be handled:
* - An error on receipt: SC_FL_ERROR
* - A read event: shutdown for reads (CF_READ_EVENT + ABRT_DONE)
* - A read event: shutdown for reads (CF_READ_EVENT + EOS/ABRT_DONE)
* end of input (CF_READ_EVENT + SC_FL_EOI)
* data received and no fast-forwarding (CF_READ_EVENT + !to_forward)
* read event while consumer side is not established (CF_READ_EVENT + sco->state != SC_ST_EST)
*/
((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (sc->flags & SC_FL_ABRT_DONE) || !ic->to_forward || sco->state != SC_ST_EST)) ||
((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || !ic->to_forward || sco->state != SC_ST_EST)) ||
(sc->flags & SC_FL_ERROR) ||
/* changes on the consumption side */
@ -1131,7 +1131,7 @@ static void sc_conn_read0(struct stconn *sc)
BUG_ON(!sc_conn(sc));
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return;
sc->flags |= SC_FL_ABRT_DONE;
ic->flags |= CF_READ_EVENT;
@ -1190,7 +1190,7 @@ static int sc_conn_recv(struct stconn *sc)
return 0;
/* maybe we were called immediately after an asynchronous abort */
if (sc->flags & SC_FL_ABRT_DONE)
if (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))
return 1;
/* we must wait because the mux is not installed yet */
@ -1320,7 +1320,7 @@ static int sc_conn_recv(struct stconn *sc)
*/
while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
(!(conn->flags & CO_FL_HANDSHAKE) &&
(!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(sc->flags & SC_FL_ABRT_DONE))) {
(!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)))) {
int cur_flags = flags;
/* Compute transient CO_RFL_* flags */
@ -1486,7 +1486,7 @@ static int sc_conn_recv(struct stconn *sc)
ret = 1;
}
else if (!(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) &&
!(sc->flags & SC_FL_ABRT_DONE)) {
!(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
/* Subscribe to receive events if we're blocking on I/O */
conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
se_have_no_more_data(sc->sedesc);
@ -1602,7 +1602,7 @@ static int sc_conn_send(struct stconn *sc)
((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
(sc->flags & SC_FL_SND_EXP_MORE) ||
(IS_HTX_STRM(s) &&
(!(sco->flags & (SC_FL_EOI|SC_FL_ABRT_DONE)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
(!(sco->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
((oc->flags & CF_ISRESP) &&
(oc->flags & CF_AUTO_CLOSE) &&
(sc->flags & SC_FL_SHUT_WANTED)))
@ -1766,7 +1766,7 @@ static int sc_conn_process(struct stconn *sc)
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
* care of it.
*/
if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & SC_FL_ABRT_DONE)) {
if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE))) {
/* we received a shutdown */
if (ic->flags & CF_AUTO_CLOSE)
sc_schedule_shutdown(sc_opposite(sc));
@ -1853,7 +1853,7 @@ static int sc_applet_process(struct stconn *sc)
/* If the applet wants to write and the channel is closed, it's a
* broken pipe and it must be reported.
*/
if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (sc->flags & SC_FL_ABRT_DONE))
if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (sc->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)))
sc_ep_set(sc, SE_FL_ERROR);
/* automatically mark the applet having data available if it reported

View File

@ -954,7 +954,7 @@ static void back_establish(struct stream *s)
}
/* If we managed to get the whole response, and we don't have anything
* left to send, or can't, switch to SC_ST_DIS now. */
if ((s->scb->flags & SC_FL_ABRT_DONE) || (s->scf->flags & SC_FL_SHUT_DONE)) {
if ((s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) || (s->scf->flags & SC_FL_SHUT_DONE)) {
s->scb->state = SC_ST_DIS;
DBG_TRACE_STATE("response channel shutdwn for read/write", STRM_EV_STRM_PROC|STRM_EV_CS_ST|STRM_EV_STRM_ERR, s);
}
@ -1590,7 +1590,7 @@ static void stream_handle_timeouts(struct stream *s)
sc_shutdown(s->scb);
}
if (unlikely(!(s->scf->flags & SC_FL_ABRT_DONE) && (s->req.flags & CF_READ_TIMEOUT))) {
if (unlikely(!(s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->req.flags & CF_READ_TIMEOUT))) {
if (s->scf->flags & SC_FL_NOHALF)
s->scf->flags |= SC_FL_NOLINGER;
sc_abort(s->scf);
@ -1600,7 +1600,7 @@ static void stream_handle_timeouts(struct stream *s)
sc_shutdown(s->scf);
}
if (unlikely(!(s->scb->flags & SC_FL_ABRT_DONE) && (s->res.flags & CF_READ_TIMEOUT))) {
if (unlikely(!(s->scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && (s->res.flags & CF_READ_TIMEOUT))) {
if (s->scb->flags & SC_FL_NOHALF)
s->scb->flags |= SC_FL_NOLINGER;
sc_abort(s->scb);
@ -1783,7 +1783,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
* So let's not run a whole stream processing if only an expiration
* timeout needs to be refreshed.
*/
if (!((scf->flags | scb->flags) & (SC_FL_ERROR|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) &&
if (!((scf->flags | scb->flags) & (SC_FL_ERROR|SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) &&
!((req->flags | res->flags) & (CF_READ_EVENT|CF_READ_TIMEOUT|CF_WRITE_EVENT|CF_WRITE_TIMEOUT)) &&
!(s->flags & SF_CONN_EXP) &&
((s->pending_events & TASK_WOKEN_ANY) == TASK_WOKEN_TIMER)) {
@ -1953,7 +1953,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
resync_request:
/* Analyse request */
if (((req->flags & ~rqf_last) & CF_MASK_ANALYSER) ||
((scf->flags ^ scf_flags) & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
((scb->flags ^ scb_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
(req->analysers && (scb->flags & SC_FL_SHUT_DONE)) ||
scf->state != rq_prod_last ||
@ -2041,10 +2041,10 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
rq_cons_last = scb->state;
req->flags &= ~CF_WAKE_ONCE;
rqf_last = req->flags;
scf_flags = (scf_flags & ~(SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
if (((scf->flags ^ scf_flags_ana) & SC_FL_ABRT_DONE) || ((scb->flags ^ scb_flags_ana) & SC_FL_SHUT_DONE))
if (((scf->flags ^ scf_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags_ana) & SC_FL_SHUT_DONE))
goto resync_request;
}
@ -2058,7 +2058,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
/* Analyse response */
if (((res->flags & ~rpf_last) & CF_MASK_ANALYSER) ||
((scb->flags ^ scb_flags) & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) ||
(res->analysers && (scf->flags & SC_FL_SHUT_DONE)) ||
scf->state != rp_cons_last ||
@ -2114,10 +2114,10 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
rp_prod_last = scb->state;
res->flags &= ~CF_WAKE_ONCE;
rpf_last = res->flags;
scb_flags = (scb_flags & ~(SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
if (((scb->flags ^ scb_flags_ana) & SC_FL_ABRT_DONE) || ((scf->flags ^ scf_flags_ana) & SC_FL_SHUT_DONE))
if (((scb->flags ^ scb_flags_ana) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scf->flags ^ scf_flags_ana) & SC_FL_SHUT_DONE))
goto resync_response;
}
@ -2250,7 +2250,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
*/
co_set_data(req, htx->data);
if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
!(scf->flags & SC_FL_ABRT_DONE) && !(scb->flags & SC_FL_SHUT_WANTED))
!(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
channel_htx_forward_forever(req, htx);
}
else {
@ -2259,14 +2259,14 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
*/
c_adv(req, ci_data(req));
if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
!(scf->flags & SC_FL_ABRT_DONE) && !(scb->flags & SC_FL_SHUT_WANTED))
!(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
channel_forward_forever(req);
}
}
/* check if it is wise to enable kernel splicing to forward request data */
if (!(req->flags & CF_KERN_SPLICING) &&
!(scf->flags & SC_FL_ABRT_DONE) &&
!(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
req->to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->rcv_pipe &&
@ -2282,7 +2282,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
/* reflect what the L7 analysers have seen last */
rqf_last = req->flags;
scf_flags = (scf_flags & ~(SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scf_flags = (scf_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scb_flags = (scb_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
/* it's possible that an upper layer has requested a connection setup or abort.
@ -2363,7 +2363,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
* the other side's timeout as well. However this doesn't have effect during the
* connection setup unless the backend has abortonclose set.
*/
if (unlikely((req->flags & CF_AUTO_CLOSE) && (scf->flags & SC_FL_ABRT_DONE) &&
if (unlikely((req->flags & CF_AUTO_CLOSE) && (scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
!(scb->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) &&
(scb->state != SC_ST_CON || (s->be->options & PR_O_ABRT_CLOSE)))) {
sc_schedule_shutdown(scb);
@ -2378,12 +2378,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
}
/* shutdown(write) done on server side, we must stop the client too */
if (unlikely((scb->flags & SC_FL_SHUT_DONE) && !(scf->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
if (unlikely((scb->flags & SC_FL_SHUT_DONE) && !(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
!req->analysers)
sc_schedule_abort(scf);
/* shutdown(read) pending */
if (unlikely((scf->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
if (unlikely((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
if (scf->flags & SC_FL_NOHALF)
scf->flags |= SC_FL_NOLINGER;
sc_abort(scf);
@ -2397,7 +2397,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
goto resync_stconns;
/* otherwise we want to check if we need to resync the req buffer or not */
if (((scf->flags ^ scf_flags) & SC_FL_ABRT_DONE) || ((scb->flags ^ scb_flags) & SC_FL_SHUT_DONE))
if (((scf->flags ^ scf_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE)) || ((scb->flags ^ scb_flags) & SC_FL_SHUT_DONE))
goto resync_request;
/* perform output updates to the response buffer */
@ -2426,7 +2426,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
*/
co_set_data(res, htx->data);
if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
!(scf->flags & SC_FL_ABRT_DONE) && !(scb->flags & SC_FL_SHUT_WANTED))
!(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
channel_htx_forward_forever(res, htx);
}
else {
@ -2435,7 +2435,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
*/
c_adv(res, ci_data(res));
if ((global.tune.options & GTUNE_USE_FAST_FWD) &&
!(scf->flags & SC_FL_ABRT_DONE) && !(scb->flags & SC_FL_SHUT_WANTED))
!(scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && !(scb->flags & SC_FL_SHUT_WANTED))
channel_forward_forever(res);
}
@ -2446,16 +2446,16 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
if (!req->analysers && s->tunnel_timeout) {
scf->ioto = scb->ioto = s->tunnel_timeout;
if ((scf->flags & (SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(sess->fe->timeout.clientfin))
if ((scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(sess->fe->timeout.clientfin))
scf->ioto = sess->fe->timeout.clientfin;
if ((scb->flags & (SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(s->be->timeout.serverfin))
if ((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_SHUT_DONE)) && tick_isset(s->be->timeout.serverfin))
scb->ioto = s->be->timeout.serverfin;
}
}
/* check if it is wise to enable kernel splicing to forward response data */
if (!(res->flags & CF_KERN_SPLICING) &&
!(scb->flags & SC_FL_ABRT_DONE) &&
!(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
res->to_forward &&
(global.tune.options & GTUNE_USE_SPLICE) &&
(sc_conn(scf) && __sc_conn(scf)->xprt && __sc_conn(scf)->xprt->snd_pipe &&
@ -2471,7 +2471,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
/* reflect what the L7 analysers have seen last */
rpf_last = res->flags;
scb_flags = (scb_flags & ~(SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scb_flags = (scb_flags & ~(SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) | (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED));
scf_flags = (scf_flags & ~(SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)) | (scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED));
/* Let's see if we can send the pending response now */
@ -2486,7 +2486,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
*/
/* first, let's check if the response buffer needs to shutdown(write) */
if (unlikely((res->flags & CF_AUTO_CLOSE) && (scb->flags & SC_FL_ABRT_DONE) &&
if (unlikely((res->flags & CF_AUTO_CLOSE) && (scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
!(scf->flags & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))) {
sc_schedule_shutdown(scf);
}
@ -2498,12 +2498,12 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
}
/* shutdown(write) done on the client side, we must stop the server too */
if (unlikely((scf->flags & SC_FL_SHUT_DONE) && !(scb->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
if (unlikely((scf->flags & SC_FL_SHUT_DONE) && !(scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED))) &&
!res->analysers)
sc_schedule_abort(scb);
/* shutdown(read) pending */
if (unlikely((scb->flags & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
if (unlikely((scb->flags & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) == SC_FL_ABRT_WANTED)) {
if (scb->flags & SC_FL_NOHALF)
scb->flags |= SC_FL_NOLINGER;
sc_abort(scb);
@ -2518,7 +2518,7 @@ struct task *process_stream(struct task *t, void *context, unsigned int state)
if ((req->flags & ~rqf_last) & CF_MASK_ANALYSER)
goto resync_request;
if (((scb->flags ^ scb_flags) & (SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
if (((scb->flags ^ scb_flags) & (SC_FL_EOS|SC_FL_ABRT_DONE|SC_FL_ABRT_WANTED)) ||
((scf->flags ^ scf_flags) & (SC_FL_SHUT_DONE|SC_FL_SHUT_WANTED)))
goto resync_response;

View File

@ -116,7 +116,7 @@ int tcp_inspect_request(struct stream *s, struct channel *req, int an_bit)
* - if one rule returns KO, then return KO
*/
if ((s->scf->flags & (SC_FL_EOI|SC_FL_ABRT_DONE)) || channel_full(req, global.tune.maxrewrite) ||
if ((s->scf->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)) || channel_full(req, global.tune.maxrewrite) ||
sc_waiting_room(s->scf) ||
!s->be->tcp_req.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
partial = SMP_OPT_FINAL;
@ -298,7 +298,7 @@ int tcp_inspect_response(struct stream *s, struct channel *rep, int an_bit)
* - if one rule returns OK, then return OK
* - if one rule returns KO, then return KO
*/
if ((s->scb->flags & (SC_FL_EOI|SC_FL_ABRT_DONE)) || channel_full(rep, global.tune.maxrewrite) ||
if ((s->scb->flags & (SC_FL_EOI|SC_FL_EOS|SC_FL_ABRT_DONE)) || channel_full(rep, global.tune.maxrewrite) ||
sc_waiting_room(s->scb) ||
!s->be->tcp_rep.inspect_delay || tick_is_expired(s->rules_exp, now_ms)) {
partial = SMP_OPT_FINAL;