MEDIUM: conn-stream: Move remaning flags from CS to endpoint

All old flags CS_FL_* are now moved in the endpoint scope and renamed
CS_EP_* accordingly. It is a systematic replacement. There is no true change
except for the health-check and the endpoint reset. Here it is a bit special
because the same conn-stream is reused. Thus, we must handle endpoint
allocation errors. To do so, cs_reset_endp() has been adapted.

Thanks to this last change, it will now be possible to simplify the
multiplexer and probably the applets too. A review must also be performed to
remove some flags in the channel or the stream-interface. The HTX will
probably be simplified too. Finally, there is now some place in the
conn-stream to move info from the stream-interface.
This commit is contained in:
Christopher Faulet 2022-03-24 10:27:02 +01:00
parent 9ec2f4dc7c
commit b041b23ae4
19 changed files with 188 additions and 170 deletions

View File

@ -1523,7 +1523,7 @@ static void promex_appctx_handle_io(struct appctx *appctx)
channel_add_input(res, 1); channel_add_input(res, 1);
} }
res_htx->flags |= HTX_FL_EOM; res_htx->flags |= HTX_FL_EOM;
si->cs->flags |= CS_FL_EOI; si->cs->endp->flags |= CS_EP_EOI;
res->flags |= CF_EOI; res->flags |= CF_EOI;
appctx->st0 = PROMEX_ST_END; appctx->st0 = PROMEX_ST_END;
/* fall through */ /* fall through */

View File

@ -189,6 +189,12 @@ void show_endp_flags(unsigned int f)
} }
SHOW_FLAG(f, CS_EP_KILL_CONN); SHOW_FLAG(f, CS_EP_KILL_CONN);
SHOW_FLAG(f, CS_EP_WAIT_FOR_HS); SHOW_FLAG(f, CS_EP_WAIT_FOR_HS);
SHOW_FLAG(f, CS_EP_WANT_ROOM);
SHOW_FLAG(f, CS_EP_ERROR);
SHOW_FLAG(f, CS_EP_ERR_PENDING);
SHOW_FLAG(f, CS_EP_EOS);
SHOW_FLAG(f, CS_EP_EOI);
SHOW_FLAG(f, CS_EP_RCV_MORE);
SHOW_FLAG(f, CS_EP_MAY_SPLICE); SHOW_FLAG(f, CS_EP_MAY_SPLICE);
SHOW_FLAG(f, CS_EP_WEBSOCKET); SHOW_FLAG(f, CS_EP_WEBSOCKET);
SHOW_FLAG(f, CS_EP_NOT_FIRST); SHOW_FLAG(f, CS_EP_NOT_FIRST);
@ -213,12 +219,6 @@ void show_cs_flags(unsigned int f)
printf("0\n"); printf("0\n");
return; return;
} }
SHOW_FLAG(f, CS_FL_EOI);
SHOW_FLAG(f, CS_FL_EOS);
SHOW_FLAG(f, CS_FL_ERR_PENDING);
SHOW_FLAG(f, CS_FL_WANT_ROOM);
SHOW_FLAG(f, CS_FL_RCV_MORE);
SHOW_FLAG(f, CS_FL_ERROR);
if (f) { if (f) {
printf("EXTRA(0x%08x)", f); printf("EXTRA(0x%08x)", f);

View File

@ -54,30 +54,30 @@ struct stream_interface;
/* following flags are supposed to be set by the endpoint and read by /* following flags are supposed to be set by the endpoint and read by
* the app layer : * the app layer :
*/ */
/* Permanent flags */
CS_EP_NOT_FIRST = 0x00001000, /* This conn-stream is not the first one for the endpoint */ CS_EP_NOT_FIRST = 0x00001000, /* This conn-stream is not the first one for the endpoint */
CS_EP_WEBSOCKET = 0x00002000, /* The endpoint uses the websocket proto */ CS_EP_WEBSOCKET = 0x00002000, /* The endpoint uses the websocket proto */
CS_EP_MAY_SPLICE = 0x00004000, /* The endpoint may use the kernel splicing to forward data to the other side (implies CS_EP_CAN_SPLICE) */ CS_EP_EOI = 0x00004000, /* end-of-input reached */
CS_EP_EOS = 0x00008000, /* End of stream delivered to data layer */
CS_EP_ERROR = 0x00010000, /* a fatal error was reported */
/* Transient flags */
CS_EP_ERR_PENDING= 0x00020000, /* An error is pending, but there's still data to be read */
CS_EP_MAY_SPLICE = 0x00040000, /* The endpoint may use the kernel splicing to forward data to the other side (implies CS_EP_CAN_SPLICE) */
CS_EP_RCV_MORE = 0x00080000, /* Endpoint may have more bytes to transfer */
CS_EP_WANT_ROOM = 0x00100000, /* More bytes to transfer, but not enough room */
/* unused: 0x00008000 */ /* unused: 0x00200000 .. 0x00800000 */
/* following flags are supposed to be set by the app layer and read by /* following flags are supposed to be set by the app layer and read by
* the endpoint : * the endpoint :
*/ */
CS_EP_WAIT_FOR_HS = 0x00010000, /* This stream is waiting for handhskae */ CS_EP_WAIT_FOR_HS = 0x01000000, /* This stream is waiting for handhskae */
CS_EP_KILL_CONN = 0x00020000, /* must kill the connection when the CS closes */ CS_EP_KILL_CONN = 0x02000000, /* must kill the connection when the CS closes */
}; };
/* conn_stream flags */ /* conn_stream flags */
enum { enum {
CS_FL_NONE = 0x00000000, /* Just for initialization purposes */ CS_FL_NONE = 0x00000000, /* Just for initialization purposes */
CS_FL_ERROR = 0x00000100, /* a fatal error was reported */
CS_FL_RCV_MORE = 0x00000200, /* We may have more bytes to transfer */
CS_FL_WANT_ROOM = 0x00000400, /* More bytes to transfer, but not enough room */
CS_FL_ERR_PENDING = 0x00000800, /* An error is pending, but there's still data to be read */
CS_FL_EOS = 0x00001000, /* End of stream delivered to data layer */
/* unused: 0x00002000 */
CS_FL_EOI = 0x00004000, /* end-of-input reached */
}; };
/* cs_shutr() modes */ /* cs_shutr() modes */

View File

@ -210,13 +210,13 @@ static inline void cs_drain_and_close(struct conn_stream *cs)
cs_shutr(cs, CS_SHR_DRAIN); cs_shutr(cs, CS_SHR_DRAIN);
} }
/* sets CS_FL_ERROR or CS_FL_ERR_PENDING on the cs */ /* sets CS_EP_ERROR or CS_EP_ERR_PENDING on the cs */
static inline void cs_set_error(struct conn_stream *cs) static inline void cs_set_error(struct conn_stream *cs)
{ {
if (cs->flags & CS_FL_EOS) if (cs->endp->flags & CS_EP_EOS)
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
else else
cs->flags |= CS_FL_ERR_PENDING; cs->endp->flags |= CS_EP_ERR_PENDING;
} }
/* Retrieves any valid conn_stream from this connection, preferably the first /* Retrieves any valid conn_stream from this connection, preferably the first

View File

@ -1743,7 +1743,7 @@ static int connect_server(struct stream *s)
* sockets, socket pairs, and occasionally TCP connections on the * sockets, socket pairs, and occasionally TCP connections on the
* loopback on a heavily loaded system. * loopback on a heavily loaded system.
*/ */
if ((srv_conn->flags & CO_FL_ERROR || s->csb->flags & CS_FL_ERROR)) if ((srv_conn->flags & CO_FL_ERROR || s->csb->endp->flags & CS_EP_ERROR))
cs_si(s->csb)->flags |= SI_FL_ERR; cs_si(s->csb)->flags |= SI_FL_ERR;
/* If we had early data, and the handshake ended, then /* If we had early data, and the handshake ended, then
@ -1769,7 +1769,7 @@ static int connect_server(struct stream *s)
* wake callback. Otherwise si_cs_recv()/si_cs_send() already take * wake callback. Otherwise si_cs_recv()/si_cs_send() already take
* care of it. * care of it.
*/ */
if ((s->csb->flags & CS_FL_EOI) && !(si_ic(cs_si(s->csb))->flags & CF_EOI)) if ((s->csb->endp->flags & CS_EP_EOI) && !(si_ic(cs_si(s->csb))->flags & CF_EOI))
si_ic(cs_si(s->csb))->flags |= (CF_EOI|CF_READ_PARTIAL); si_ic(cs_si(s->csb))->flags |= (CF_EOI|CF_READ_PARTIAL);
/* catch all sync connect while the mux is not already installed */ /* catch all sync connect while the mux is not already installed */

View File

@ -1500,7 +1500,7 @@ static void http_cache_io_handler(struct appctx *appctx)
if (appctx->st0 == HTX_CACHE_EOM) { if (appctx->st0 == HTX_CACHE_EOM) {
/* no more data are expected. */ /* no more data are expected. */
res_htx->flags |= HTX_FL_EOM; res_htx->flags |= HTX_FL_EOM;
si->cs->flags |= CS_FL_EOI; si->cs->endp->flags |= CS_EP_EOI;
res->flags |= CF_EOI; res->flags |= CF_EOI;
appctx->st0 = HTX_CACHE_END; appctx->st0 = HTX_CACHE_END;
} }

View File

@ -795,7 +795,7 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
retrieve_errno_from_socket(conn); retrieve_errno_from_socket(conn);
if (conn && !(conn->flags & CO_FL_ERROR) && if (conn && !(conn->flags & CO_FL_ERROR) &&
cs && !(cs->flags & CS_FL_ERROR) && !expired) cs && !(cs->endp->flags & CS_EP_ERROR) && !expired)
return; return;
TRACE_ENTER(CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check, 0, 0, (size_t[]){expired}); TRACE_ENTER(CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check, 0, 0, (size_t[]){expired});
@ -914,7 +914,7 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
} }
else if (conn->flags & CO_FL_WAIT_L4_CONN) { else if (conn->flags & CO_FL_WAIT_L4_CONN) {
/* L4 not established (yet) */ /* L4 not established (yet) */
if (conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR) if (conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR)
set_server_check_status(check, HCHK_STATUS_L4CON, err_msg); set_server_check_status(check, HCHK_STATUS_L4CON, err_msg);
else if (expired) else if (expired)
set_server_check_status(check, HCHK_STATUS_L4TOUT, err_msg); set_server_check_status(check, HCHK_STATUS_L4TOUT, err_msg);
@ -929,12 +929,12 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
} }
else if (conn->flags & CO_FL_WAIT_L6_CONN) { else if (conn->flags & CO_FL_WAIT_L6_CONN) {
/* L6 not established (yet) */ /* L6 not established (yet) */
if (conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR) if (conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR)
set_server_check_status(check, HCHK_STATUS_L6RSP, err_msg); set_server_check_status(check, HCHK_STATUS_L6RSP, err_msg);
else if (expired) else if (expired)
set_server_check_status(check, HCHK_STATUS_L6TOUT, err_msg); set_server_check_status(check, HCHK_STATUS_L6TOUT, err_msg);
} }
else if (conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR) { else if (conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR) {
/* I/O error after connection was established and before we could diagnose */ /* I/O error after connection was established and before we could diagnose */
set_server_check_status(check, HCHK_STATUS_SOCKERR, err_msg); set_server_check_status(check, HCHK_STATUS_SOCKERR, err_msg);
} }
@ -1037,7 +1037,7 @@ static int wake_srv_chk(struct conn_stream *cs)
cs = check->cs; cs = check->cs;
conn = cs_conn(cs); conn = cs_conn(cs);
if (unlikely(!conn || !cs || conn->flags & CO_FL_ERROR || cs->flags & CS_FL_ERROR)) { if (unlikely(!conn || !cs || conn->flags & CO_FL_ERROR || cs->endp->flags & CS_EP_ERROR)) {
/* We may get error reports bypassing the I/O handlers, typically /* We may get error reports bypassing the I/O handlers, typically
* the case when sending a pure TCP check which fails, then the I/O * the case when sending a pure TCP check which fails, then the I/O
* handlers above are not called. This is completely handled by the * handlers above are not called. This is completely handled by the
@ -1135,13 +1135,17 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
check->current_step = NULL; check->current_step = NULL;
if (check->cs->flags & CS_FL_ERROR) { if (!check->cs->endp) {
check->cs->flags &= ~CS_FL_ERROR; /* CS endpoint may be NULL if a previous reset
* failed. Try to allocate a new one and report a
* SOCKERR if it fails.
*/
check->cs->endp = cs_endpoint_new(); check->cs->endp = cs_endpoint_new();
if (!check->cs->endp) if (!check->cs->endp) {
check->cs->flags |= CS_FL_ERROR; set_server_check_status(check, HCHK_STATUS_SOCKERR, NULL);
else goto end;
check->cs->endp->flags |= CS_EP_DETACHED; }
check->cs->endp->flags |= CS_EP_DETACHED;
} }
tcpcheck_main(check); tcpcheck_main(check);
expired = 0; expired = 0;
@ -1157,7 +1161,7 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
/* Here the connection must be defined. Otherwise the /* Here the connection must be defined. Otherwise the
* error would have already been detected * error would have already been detected
*/ */
if ((conn && ((conn->flags & CO_FL_ERROR) || (check->cs->flags & CS_FL_ERROR))) || expired) { if ((conn && ((conn->flags & CO_FL_ERROR) || (check->cs->endp->flags & CS_EP_ERROR))) || expired) {
TRACE_ERROR("report connection error", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check); TRACE_ERROR("report connection error", CHK_EV_TASK_WAKE|CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
chk_report_conn_err(check, 0, expired); chk_report_conn_err(check, 0, expired);
} }
@ -1165,9 +1169,8 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
if (check->state & CHK_ST_CLOSE_CONN) { if (check->state & CHK_ST_CLOSE_CONN) {
TRACE_DEVEL("closing current connection", CHK_EV_TASK_WAKE|CHK_EV_HCHK_RUN, check); TRACE_DEVEL("closing current connection", CHK_EV_TASK_WAKE|CHK_EV_HCHK_RUN, check);
check->state &= ~CHK_ST_CLOSE_CONN; check->state &= ~CHK_ST_CLOSE_CONN;
if (cs_reset_endp(check->cs) < 0)
check->cs->flags |= CS_FL_ERROR;
conn = NULL; conn = NULL;
cs_reset_endp(check->cs); /* error will be handled by tcpcheck_main() */
tcpcheck_main(check); tcpcheck_main(check);
} }
if (check->result == CHK_RES_UNKNOWN) { if (check->result == CHK_RES_UNKNOWN) {
@ -1201,12 +1204,9 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
*/ */
tasklet_remove_from_tasklet_list(check->wait_list.tasklet); tasklet_remove_from_tasklet_list(check->wait_list.tasklet);
if (cs_reset_endp(check->cs) < 0) { /* Force detach on error. the endpoint will be recreated on the next start */
/* If an error occurred at this stage, it will be fixed by the if (cs_reset_endp(check->cs) < 0)
* next check cs_detach_endp(check->cs);
*/
check->cs->flags |= CS_FL_ERROR;
}
if (check->sess != NULL) { if (check->sess != NULL) {
vars_prune(&check->vars, check->sess, NULL); vars_prune(&check->vars, check->sess, NULL);
@ -1214,6 +1214,7 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
check->sess = NULL; check->sess = NULL;
} }
end:
if (check->server && likely(!(check->state & CHK_ST_PURGE))) { if (check->server && likely(!(check->state & CHK_ST_PURGE))) {
if (check->result == CHK_RES_FAILED) { if (check->result == CHK_RES_FAILED) {
/* a failure or timeout detected */ /* a failure or timeout detected */

View File

@ -222,6 +222,9 @@ int cs_attach_strm(struct conn_stream *cs, struct stream *strm)
*/ */
void cs_detach_endp(struct conn_stream *cs) void cs_detach_endp(struct conn_stream *cs)
{ {
if (!cs->endp)
goto reset_cs;
if (cs->endp->flags & CS_EP_T_MUX) { if (cs->endp->flags & CS_EP_T_MUX) {
struct connection *conn = cs_conn(cs); struct connection *conn = cs_conn(cs);
@ -260,6 +263,7 @@ void cs_detach_endp(struct conn_stream *cs)
cs->endp->flags |= CS_EP_DETACHED; cs->endp->flags |= CS_EP_DETACHED;
} }
reset_cs:
/* FIXME: Rest CS for now but must be reviewed. CS flags are only /* FIXME: Rest CS for now but must be reviewed. CS flags are only
* connection related for now but this will evolved * connection related for now but this will evolved
*/ */
@ -285,15 +289,29 @@ void cs_detach_app(struct conn_stream *cs)
int cs_reset_endp(struct conn_stream *cs) int cs_reset_endp(struct conn_stream *cs)
{ {
struct cs_endpoint *new_endp;
BUG_ON(!cs->app); BUG_ON(!cs->app);
cs_detach_endp(cs); if (!__cs_endp_target(cs)) {
if (!cs->endp) { /* endpoint not attached or attached to a mux with no
cs->endp = cs_endpoint_new(); * target. Thus the endpoint will not be release but just
if (!cs->endp) { * reset
cs->flags |= CS_FL_ERROR; */
return -1; cs_detach_endp(cs);
} return 0;
cs->endp->flags |= CS_EP_DETACHED;
} }
/* allocate the new endpoint first to be able to set error if it
* fails */
new_endp = cs_endpoint_new();
if (!unlikely(new_endp)) {
cs->endp->flags |= CS_EP_ERROR;
return -1;
}
cs_detach_endp(cs);
BUG_ON(cs->endp);
cs->endp = new_endp;
cs->endp->flags |= CS_EP_DETACHED;
return 0; return 0;
} }

View File

@ -9576,7 +9576,7 @@ void hlua_applet_http_fct(struct appctx *ctx)
} }
res_htx->flags |= HTX_FL_EOM; res_htx->flags |= HTX_FL_EOM;
si->cs->flags |= CS_FL_EOI; si->cs->endp->flags |= CS_EP_EOI;
res->flags |= CF_EOI; res->flags |= CF_EOI;
strm->txn->status = ctx->ctx.hlua_apphttp.status; strm->txn->status = ctx->ctx.hlua_apphttp.status;
ctx->ctx.hlua_apphttp.flags |= APPLET_RSP_SENT; ctx->ctx.hlua_apphttp.flags |= APPLET_RSP_SENT;

View File

@ -1256,7 +1256,6 @@ static __inline int do_l7_retry(struct stream *s, struct stream_interface *si)
res->total = 0; res->total = 0;
if (cs_reset_endp(s->csb) < 0) { if (cs_reset_endp(s->csb) < 0) {
s->csb->flags |= CS_FL_ERROR;
if (!(s->flags & SF_ERR_MASK)) if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_INTERNAL; s->flags |= SF_ERR_INTERNAL;
return -1; return -1;

View File

@ -735,7 +735,7 @@ static void httpclient_applet_io_handler(struct appctx *appctx)
/* if the request contains the HTX_FL_EOM, we finished the request part. */ /* if the request contains the HTX_FL_EOM, we finished the request part. */
if (htx->flags & HTX_FL_EOM) { if (htx->flags & HTX_FL_EOM) {
si->cs->flags |= CS_FL_EOI; si->cs->endp->flags |= CS_EP_EOI;
req->flags |= CF_EOI; req->flags |= CF_EOI;
appctx->st0 = HTTPCLIENT_S_RES_STLINE; appctx->st0 = HTTPCLIENT_S_RES_STLINE;
} }

View File

@ -1011,7 +1011,7 @@ static inline void fcgi_strm_close(struct fcgi_strm *fstrm)
if (!fstrm->id) if (!fstrm->id)
fstrm->fconn->nb_reserved--; fstrm->fconn->nb_reserved--;
if (fstrm->cs) { if (fstrm->cs) {
if (!(fstrm->cs->flags & CS_FL_EOS) && !b_data(&fstrm->rxbuf)) if (!(fstrm->endp->flags & CS_EP_EOS) && !b_data(&fstrm->rxbuf))
fcgi_strm_notify_recv(fstrm); fcgi_strm_notify_recv(fstrm);
} }
fstrm->state = FCGI_SS_CLOSED; fstrm->state = FCGI_SS_CLOSED;
@ -1148,8 +1148,8 @@ static struct fcgi_strm *fcgi_conn_stream_new(struct fcgi_conn *fconn, struct co
return NULL; return NULL;
} }
/* Wakes a specific stream and assign its conn_stream some CS_FL_* flags among /* Wakes a specific stream and assign its conn_stream some CS_EP_* flags among
* CS_FL_ERR_PENDING and CS_FL_ERROR if needed. The stream's state is * CS_EP_ERR_PENDING and CS_EP_ERROR if needed. The stream's state is
* automatically updated accordingly. If the stream is orphaned, it is * automatically updated accordingly. If the stream is orphaned, it is
* destroyed. * destroyed.
*/ */
@ -1176,9 +1176,9 @@ static void fcgi_strm_wake_one_stream(struct fcgi_strm *fstrm)
} }
if ((fconn->state == FCGI_CS_CLOSED || fconn->conn->flags & CO_FL_ERROR)) { if ((fconn->state == FCGI_CS_CLOSED || fconn->conn->flags & CO_FL_ERROR)) {
fstrm->cs->flags |= CS_FL_ERR_PENDING; fstrm->endp->flags |= CS_EP_ERR_PENDING;
if (fstrm->cs->flags & CS_FL_EOS) if (fstrm->endp->flags & CS_EP_EOS)
fstrm->cs->flags |= CS_FL_ERROR; fstrm->endp->flags |= CS_EP_ERROR;
if (fstrm->state < FCGI_SS_ERROR) { if (fstrm->state < FCGI_SS_ERROR) {
fstrm->state = FCGI_SS_ERROR; fstrm->state = FCGI_SS_ERROR;
@ -2626,10 +2626,10 @@ static void fcgi_process_demux(struct fcgi_conn *fconn)
fcgi_conn_read0_pending(fconn) || fcgi_conn_read0_pending(fconn) ||
fstrm->state == FCGI_SS_CLOSED || fstrm->state == FCGI_SS_CLOSED ||
(fstrm->flags & FCGI_SF_ES_RCVD) || (fstrm->flags & FCGI_SF_ES_RCVD) ||
(fstrm->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) { (fstrm->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
/* we may have to signal the upper layers */ /* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm); TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
fstrm->cs->flags |= CS_FL_RCV_MORE; fstrm->endp->flags |= CS_EP_RCV_MORE;
fcgi_strm_notify_recv(fstrm); fcgi_strm_notify_recv(fstrm);
} }
fstrm = tmp_fstrm; fstrm = tmp_fstrm;
@ -2707,10 +2707,10 @@ static void fcgi_process_demux(struct fcgi_conn *fconn)
fcgi_conn_read0_pending(fconn) || fcgi_conn_read0_pending(fconn) ||
fstrm->state == FCGI_SS_CLOSED || fstrm->state == FCGI_SS_CLOSED ||
(fstrm->flags & FCGI_SF_ES_RCVD) || (fstrm->flags & FCGI_SF_ES_RCVD) ||
(fstrm->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) { (fstrm->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
/* we may have to signal the upper layers */ /* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm); TRACE_DEVEL("notifying stream before switching SID", FCGI_EV_RX_RECORD|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
fstrm->cs->flags |= CS_FL_RCV_MORE; fstrm->endp->flags |= CS_EP_RCV_MORE;
fcgi_strm_notify_recv(fstrm); fcgi_strm_notify_recv(fstrm);
} }
@ -3961,18 +3961,18 @@ static size_t fcgi_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t co
TRACE_STATE("fstrm rxbuf not allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm); TRACE_STATE("fstrm rxbuf not allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK, fconn->conn, fstrm);
if (b_data(&fstrm->rxbuf)) if (b_data(&fstrm->rxbuf))
cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
else { else {
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (fstrm->state == FCGI_SS_ERROR || (fstrm->h1m.state == H1_MSG_DONE)) { if (fstrm->state == FCGI_SS_ERROR || (fstrm->h1m.state == H1_MSG_DONE)) {
cs->flags |= CS_FL_EOI; cs->endp->flags |= CS_EP_EOI;
if (!(fstrm->h1m.flags & (H1_MF_VER_11|H1_MF_XFER_LEN))) if (!(fstrm->h1m.flags & (H1_MF_VER_11|H1_MF_XFER_LEN)))
cs->flags |= CS_FL_EOS; cs->endp->flags |= CS_EP_EOS;
} }
if (fcgi_conn_read0_pending(fconn)) if (fcgi_conn_read0_pending(fconn))
cs->flags |= CS_FL_EOS; cs->endp->flags |= CS_EP_EOS;
if (cs->flags & CS_FL_ERR_PENDING) if (cs->endp->flags & CS_EP_ERR_PENDING)
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
fcgi_release_buf(fconn, &fstrm->rxbuf); fcgi_release_buf(fconn, &fstrm->rxbuf);
} }
@ -4025,7 +4025,7 @@ static size_t fcgi_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t co
if (id < 0) { if (id < 0) {
fcgi_strm_close(fstrm); fcgi_strm_close(fstrm);
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("couldn't get a stream ID, leaving in error", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_ERR|FCGI_EV_STRM_ERR, fconn->conn, fstrm); TRACE_DEVEL("couldn't get a stream ID, leaving in error", FCGI_EV_STRM_SEND|FCGI_EV_FSTRM_ERR|FCGI_EV_STRM_ERR, fconn->conn, fstrm);
return 0; return 0;
} }

View File

@ -1900,7 +1900,7 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count
*/ */
if (((h1m->state == H1_MSG_DONE) && (h1m->flags & H1_MF_RESP)) || if (((h1m->state == H1_MSG_DONE) && (h1m->flags & H1_MF_RESP)) ||
((h1m->state == H1_MSG_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG))) ((h1m->state == H1_MSG_DONE) && (h1s->meth != HTTP_METH_CONNECT) && !(h1m->flags & H1_MF_CONN_UPG)))
h1s->cs->flags |= CS_FL_EOI; h1s->endp->flags |= CS_EP_EOI;
out: out:
/* When Input data are pending for this message, notify upper layer that /* When Input data are pending for this message, notify upper layer that
@ -1910,20 +1910,20 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count
* - Headers or trailers are pending to be copied. * - Headers or trailers are pending to be copied.
*/ */
if (h1s->flags & (H1S_F_RX_CONGESTED)) { if (h1s->flags & (H1S_F_RX_CONGESTED)) {
h1s->cs->flags |= CS_FL_RCV_MORE | CS_FL_WANT_ROOM; h1s->endp->flags |= CS_EP_RCV_MORE | CS_EP_WANT_ROOM;
TRACE_STATE("waiting for more room", H1_EV_RX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s); TRACE_STATE("waiting for more room", H1_EV_RX_DATA|H1_EV_H1S_BLK, h1c->conn, h1s);
} }
else { else {
h1s->cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); h1s->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (h1s->flags & H1S_F_REOS) { if (h1s->flags & H1S_F_REOS) {
h1s->cs->flags |= CS_FL_EOS; h1s->endp->flags |= CS_EP_EOS;
if (h1m->state >= H1_MSG_DONE || !(h1m->flags & H1_MF_XFER_LEN)) { if (h1m->state >= H1_MSG_DONE || !(h1m->flags & H1_MF_XFER_LEN)) {
/* DONE or TUNNEL or SHUTR without XFER_LEN, set /* DONE or TUNNEL or SHUTR without XFER_LEN, set
* EOI on the conn-stream */ * EOI on the conn-stream */
h1s->cs->flags |= CS_FL_EOI; h1s->endp->flags |= CS_EP_EOI;
} }
else if (h1m->state > H1_MSG_LAST_LF && h1m->state < H1_MSG_DONE) { else if (h1m->state > H1_MSG_LAST_LF && h1m->state < H1_MSG_DONE) {
h1s->cs->flags |= CS_FL_ERROR; h1s->endp->flags |= CS_EP_ERROR;
TRACE_ERROR("message aborted, set error on CS", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s); TRACE_ERROR("message aborted, set error on CS", H1_EV_RX_DATA|H1_EV_H1S_ERR, h1c->conn, h1s);
} }
@ -1942,7 +1942,7 @@ static size_t h1_process_demux(struct h1c *h1c, struct buffer *buf, size_t count
err: err:
htx_to_buf(htx, buf); htx_to_buf(htx, buf);
if (h1s->cs) if (h1s->cs)
h1s->cs->flags |= CS_FL_EOI; h1s->endp->flags |= CS_EP_EOI;
TRACE_DEVEL("leaving on error", H1_EV_RX_DATA|H1_EV_STRM_ERR, h1c->conn, h1s); TRACE_DEVEL("leaving on error", H1_EV_RX_DATA|H1_EV_STRM_ERR, h1c->conn, h1s);
return 0; return 0;
} }
@ -2556,7 +2556,7 @@ static size_t h1_process_mux(struct h1c *h1c, struct buffer *buf, size_t count)
h1c->flags |= H1C_F_ST_ERROR; h1c->flags |= H1C_F_ST_ERROR;
TRACE_ERROR("txn done but data waiting to be sent, set error on h1c", H1_EV_H1C_ERR, h1c->conn, h1s); TRACE_ERROR("txn done but data waiting to be sent, set error on h1c", H1_EV_H1C_ERR, h1c->conn, h1s);
} }
h1s->cs->flags |= CS_FL_EOI; h1s->endp->flags |= CS_EP_EOI;
} }
TRACE_LEAVE(H1_EV_TX_DATA, h1c->conn, h1s, chn_htx, (size_t[]){total}); TRACE_LEAVE(H1_EV_TX_DATA, h1c->conn, h1s, chn_htx, (size_t[]){total});
@ -3037,7 +3037,7 @@ static int h1_process(struct h1c * h1c)
TRACE_STATE("read0 on connection", H1_EV_H1C_RECV, conn, h1s); TRACE_STATE("read0 on connection", H1_EV_H1C_RECV, conn, h1s);
} }
if ((h1c->flags & H1C_F_ST_ERROR) || ((conn->flags & CO_FL_ERROR) && !b_data(&h1c->ibuf))) if ((h1c->flags & H1C_F_ST_ERROR) || ((conn->flags & CO_FL_ERROR) && !b_data(&h1c->ibuf)))
h1s->cs->flags |= CS_FL_ERROR; h1s->endp->flags |= CS_EP_ERROR;
TRACE_POINT(H1_EV_STRM_WAKE, h1c->conn, h1s); TRACE_POINT(H1_EV_STRM_WAKE, h1c->conn, h1s);
h1_alert(h1s); h1_alert(h1s);
} }
@ -3091,9 +3091,9 @@ static int h1_process(struct h1c * h1c)
BUG_ON(!h1s || h1c->flags & H1C_F_ST_READY); BUG_ON(!h1s || h1c->flags & H1C_F_ST_READY);
if (conn_xprt_read0_pending(conn) || (h1s->flags & H1S_F_REOS)) if (conn_xprt_read0_pending(conn) || (h1s->flags & H1S_F_REOS))
h1s->cs->flags |= CS_FL_EOS; h1s->endp->flags |= CS_EP_EOS;
if ((h1c->flags & H1C_F_ST_ERROR) || (conn->flags & CO_FL_ERROR)) if ((h1c->flags & H1C_F_ST_ERROR) || (conn->flags & CO_FL_ERROR))
h1s->cs->flags |= CS_FL_ERROR; h1s->endp->flags |= CS_EP_ERROR;
h1_alert(h1s); h1_alert(h1s);
TRACE_DEVEL("waiting to release the CS before releasing the connection", H1_EV_H1C_WAKE); TRACE_DEVEL("waiting to release the CS before releasing the connection", H1_EV_H1C_WAKE);
} }
@ -3245,7 +3245,7 @@ struct task *h1_timeout_task(struct task *t, void *context, unsigned int state)
if (h1c->flags & H1C_F_ST_ATTACHED) { if (h1c->flags & H1C_F_ST_ATTACHED) {
/* Don't release the H1 connection right now, we must destroy the /* Don't release the H1 connection right now, we must destroy the
* attached CS first. Here, the H1C must not be READY */ * attached CS first. Here, the H1C must not be READY */
h1c->h1s->cs->flags |= (CS_FL_EOS|CS_FL_ERROR); h1c->h1s->endp->flags |= (CS_EP_EOS|CS_EP_ERROR);
h1_alert(h1c->h1s); h1_alert(h1c->h1s);
h1_refresh_timeout(h1c); h1_refresh_timeout(h1c);
HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock); HA_SPIN_UNLOCK(OTHER_LOCK, &idle_conns[tid].idle_conns_lock);
@ -3685,7 +3685,7 @@ static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
} }
if (h1c->flags & H1C_F_ST_ERROR) { if (h1c->flags & H1C_F_ST_ERROR) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_ERROR("H1C on error, leaving in error", H1_EV_STRM_SEND|H1_EV_H1C_ERR|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s); TRACE_ERROR("H1C on error, leaving in error", H1_EV_STRM_SEND|H1_EV_H1C_ERR|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
return 0; return 0;
} }
@ -3717,7 +3717,7 @@ static size_t h1_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
} }
if (h1c->flags & H1C_F_ST_ERROR) { if (h1c->flags & H1C_F_ST_ERROR) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_ERROR("reporting error to the app-layer stream", H1_EV_STRM_SEND|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s); TRACE_ERROR("reporting error to the app-layer stream", H1_EV_STRM_SEND|H1_EV_H1S_ERR|H1_EV_STRM_ERR, h1c->conn, h1s);
} }
@ -3762,7 +3762,7 @@ static int h1_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned int c
if (ret > h1m->curr_len) { if (ret > h1m->curr_len) {
h1s->flags |= H1S_F_PARSING_ERROR; h1s->flags |= H1S_F_PARSING_ERROR;
h1c->flags |= H1C_F_ST_ERROR; h1c->flags |= H1C_F_ST_ERROR;
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_ERROR("too much payload, more than announced", TRACE_ERROR("too much payload, more than announced",
H1_EV_RX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s); H1_EV_RX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
goto end; goto end;
@ -3820,7 +3820,7 @@ static int h1_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
if (ret > h1m->curr_len) { if (ret > h1m->curr_len) {
h1s->flags |= H1S_F_PROCESSING_ERROR; h1s->flags |= H1S_F_PROCESSING_ERROR;
h1c->flags |= H1C_F_ST_ERROR; h1c->flags |= H1C_F_ST_ERROR;
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_ERROR("too much payload, more than announced", TRACE_ERROR("too much payload, more than announced",
H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s); H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
goto end; goto end;

View File

@ -1482,7 +1482,7 @@ static inline void h2s_close(struct h2s *h2s)
if (!h2s->id) if (!h2s->id)
h2s->h2c->nb_reserved--; h2s->h2c->nb_reserved--;
if (h2s->cs) { if (h2s->cs) {
if (!(h2s->cs->flags & CS_FL_EOS) && !b_data(&h2s->rxbuf)) if (!(h2s->endp->flags & CS_EP_EOS) && !b_data(&h2s->rxbuf))
h2s_notify_recv(h2s); h2s_notify_recv(h2s);
} }
HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams); HA_ATOMIC_DEC(&h2s->h2c->px_counters->open_streams);
@ -2170,8 +2170,8 @@ static int h2_send_empty_data_es(struct h2s *h2s)
return ret; return ret;
} }
/* wake a specific stream and assign its conn_stream some CS_FL_* flags among /* wake a specific stream and assign its conn_stream some CS_EP_* flags among
* CS_FL_ERR_PENDING and CS_FL_ERROR if needed. The stream's state * CS_EP_ERR_PENDING and CS_EP_ERROR if needed. The stream's state
* is automatically updated accordingly. If the stream is orphaned, it is * is automatically updated accordingly. If the stream is orphaned, it is
* destroyed. * destroyed.
*/ */
@ -2197,9 +2197,9 @@ static void h2s_wake_one_stream(struct h2s *h2s)
if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) || if ((h2s->h2c->st0 >= H2_CS_ERROR || h2s->h2c->conn->flags & CO_FL_ERROR) ||
(h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) { (h2s->h2c->last_sid > 0 && (!h2s->id || h2s->id > h2s->h2c->last_sid))) {
h2s->cs->flags |= CS_FL_ERR_PENDING; h2s->endp->flags |= CS_EP_ERR_PENDING;
if (h2s->cs->flags & CS_FL_EOS) if (h2s->endp->flags & CS_EP_EOS)
h2s->cs->flags |= CS_FL_ERROR; h2s->endp->flags |= CS_EP_ERROR;
if (h2s->st < H2_SS_ERROR) if (h2s->st < H2_SS_ERROR)
h2s->st = H2_SS_ERROR; h2s->st = H2_SS_ERROR;
@ -2962,7 +2962,7 @@ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
if (h2c->dff & H2_F_HEADERS_END_STREAM) if (h2c->dff & H2_F_HEADERS_END_STREAM)
h2s->flags |= H2_SF_ES_RCVD; h2s->flags |= H2_SF_ES_RCVD;
if (h2s->cs && h2s->cs->flags & CS_FL_ERROR && h2s->st < H2_SS_ERROR) if (h2s->cs && (h2s->endp->flags & CS_EP_ERROR) && h2s->st < H2_SS_ERROR)
h2s->st = H2_SS_ERROR; h2s->st = H2_SS_ERROR;
else if (h2s->flags & H2_SF_ES_RCVD) { else if (h2s->flags & H2_SF_ES_RCVD) {
if (h2s->st == H2_SS_OPEN) if (h2s->st == H2_SS_OPEN)
@ -3463,10 +3463,10 @@ static void h2_process_demux(struct h2c *h2c)
h2c_read0_pending(h2c) || h2c_read0_pending(h2c) ||
h2s->st == H2_SS_CLOSED || h2s->st == H2_SS_CLOSED ||
(h2s->flags & H2_SF_ES_RCVD) || (h2s->flags & H2_SF_ES_RCVD) ||
(h2s->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) { (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
/* we may have to signal the upper layers */ /* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s); TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_STRM_WAKE, h2c->conn, h2s);
h2s->cs->flags |= CS_FL_RCV_MORE; h2s->endp->flags |= CS_EP_RCV_MORE;
h2s_notify_recv(h2s); h2s_notify_recv(h2s);
} }
h2s = tmp_h2s; h2s = tmp_h2s;
@ -3634,10 +3634,10 @@ static void h2_process_demux(struct h2c *h2c)
h2c_read0_pending(h2c) || h2c_read0_pending(h2c) ||
h2s->st == H2_SS_CLOSED || h2s->st == H2_SS_CLOSED ||
(h2s->flags & H2_SF_ES_RCVD) || (h2s->flags & H2_SF_ES_RCVD) ||
(h2s->cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING|CS_FL_EOS)))) { (h2s->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING|CS_EP_EOS)))) {
/* we may have to signal the upper layers */ /* we may have to signal the upper layers */
TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s); TRACE_DEVEL("notifying stream before switching SID", H2_EV_RX_FRAME|H2_EV_H2S_WAKE, h2c->conn, h2s);
h2s->cs->flags |= CS_FL_RCV_MORE; h2s->endp->flags |= CS_EP_RCV_MORE;
h2s_notify_recv(h2s); h2s_notify_recv(h2s);
} }
@ -5010,7 +5010,7 @@ static int h2c_decode_headers(struct h2c *h2c, struct buffer *rxbuf, uint32_t *f
/* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame /* Transfer the payload of a DATA frame to the HTTP/1 side. The HTTP/2 frame
* parser state is automatically updated. Returns > 0 if it could completely * parser state is automatically updated. Returns > 0 if it could completely
* send the current frame, 0 if it couldn't complete, in which case * send the current frame, 0 if it couldn't complete, in which case
* CS_FL_RCV_MORE must be checked to know if some data remain pending (an empty * CS_EP_RCV_MORE must be checked to know if some data remain pending (an empty
* DATA frame can return 0 as a valid result). Stream errors are reported in * DATA frame can return 0 as a valid result). Stream errors are reported in
* h2s->errcode and connection errors in h2c->errcode. The caller must already * h2s->errcode and connection errors in h2c->errcode. The caller must already
* have checked the frame header and ensured that the frame was complete or the * have checked the frame header and ensured that the frame was complete or the
@ -6470,7 +6470,7 @@ static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
if (h2s_htx->flags & HTX_FL_PARSING_ERROR) { if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
buf_htx->flags |= HTX_FL_PARSING_ERROR; buf_htx->flags |= HTX_FL_PARSING_ERROR;
if (htx_is_empty(buf_htx)) if (htx_is_empty(buf_htx))
cs->flags |= CS_FL_EOI; cs->endp->flags |= CS_EP_EOI;
} }
else if (htx_is_empty(h2s_htx)) else if (htx_is_empty(h2s_htx))
buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM); buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
@ -6482,19 +6482,19 @@ static size_t h2_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
end: end:
if (b_data(&h2s->rxbuf)) if (b_data(&h2s->rxbuf))
cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
else { else {
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (h2s->flags & H2_SF_ES_RCVD) { if (h2s->flags & H2_SF_ES_RCVD) {
cs->flags |= CS_FL_EOI; cs->endp->flags |= CS_EP_EOI;
/* Add EOS flag for tunnel */ /* Add EOS flag for tunnel */
if (h2s->flags & H2_SF_BODY_TUNNEL) if (h2s->flags & H2_SF_BODY_TUNNEL)
cs->flags |= CS_FL_EOS; cs->endp->flags |= CS_EP_EOS;
} }
if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED) if (h2c_read0_pending(h2c) || h2s->st == H2_SS_CLOSED)
cs->flags |= CS_FL_EOS; cs->endp->flags |= CS_EP_EOS;
if (cs->flags & CS_FL_ERR_PENDING) if (cs->endp->flags & CS_EP_ERR_PENDING)
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
if (b_size(&h2s->rxbuf)) { if (b_size(&h2s->rxbuf)) {
b_free(&h2s->rxbuf); b_free(&h2s->rxbuf);
offer_buffers(NULL, 1); offer_buffers(NULL, 1);
@ -6546,7 +6546,7 @@ static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
} }
if (h2s->h2c->st0 >= H2_CS_ERROR) { if (h2s->h2c->st0 >= H2_CS_ERROR) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s); TRACE_DEVEL("connection is in error, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
return 0; return 0;
} }
@ -6560,7 +6560,7 @@ static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
int32_t id = h2c_get_next_sid(h2s->h2c); int32_t id = h2c_get_next_sid(h2s->h2c);
if (id < 0) { if (id < 0) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s); TRACE_DEVEL("couldn't get a stream ID, leaving in error", H2_EV_H2S_SEND|H2_EV_H2S_BLK|H2_EV_H2S_ERR|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
return 0; return 0;
} }
@ -6672,10 +6672,10 @@ static size_t h2_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t coun
!b_data(&h2s->h2c->dbuf) && !b_data(&h2s->h2c->dbuf) &&
(h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) { (h2s->flags & (H2_SF_BLK_SFCTL | H2_SF_BLK_MFCTL))) {
TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s); TRACE_DEVEL("fctl with shutr, reporting error to app-layer", H2_EV_H2S_SEND|H2_EV_STRM_SEND|H2_EV_STRM_ERR, h2s->h2c->conn, h2s);
if (cs->flags & CS_FL_EOS) if (cs->endp->flags & CS_EP_EOS)
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
else else
cs->flags |= CS_FL_ERR_PENDING; cs->endp->flags |= CS_EP_ERR_PENDING;
} }
if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) && if (total > 0 && !(h2s->flags & H2_SF_BLK_SFCTL) &&

View File

@ -317,7 +317,7 @@ static int mux_pt_init(struct connection *conn, struct proxy *prx, struct sessio
} }
conn->ctx = ctx; conn->ctx = ctx;
ctx->cs = cs; ctx->cs = cs;
cs->flags |= CS_FL_RCV_MORE; cs->endp->flags |= CS_EP_RCV_MORE;
if (global.tune.options & GTUNE_USE_SPLICE) if (global.tune.options & GTUNE_USE_SPLICE)
cs->endp->flags |= CS_EP_MAY_SPLICE; cs->endp->flags |= CS_EP_MAY_SPLICE;
@ -385,7 +385,7 @@ static int mux_pt_attach(struct connection *conn, struct conn_stream *cs, struct
conn->xprt->unsubscribe(ctx->conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event); conn->xprt->unsubscribe(ctx->conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
cs_attach_mux(cs, ctx, conn); cs_attach_mux(cs, ctx, conn);
ctx->cs = cs; ctx->cs = cs;
cs->flags |= CS_FL_RCV_MORE; cs->endp->flags |= CS_EP_RCV_MORE;
TRACE_LEAVE(PT_EV_STRM_NEW, conn, cs); TRACE_LEAVE(PT_EV_STRM_NEW, conn, cs);
return 0; return 0;
@ -464,7 +464,7 @@ static void mux_pt_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
if (cs->endp->flags & CS_EP_SHR) if (cs->endp->flags & CS_EP_SHR)
return; return;
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (conn_xprt_ready(conn) && conn->xprt->shutr) if (conn_xprt_ready(conn) && conn->xprt->shutr)
conn->xprt->shutr(conn, conn->xprt_ctx, conn->xprt->shutr(conn, conn->xprt_ctx,
(mode == CS_SHR_DRAIN)); (mode == CS_SHR_DRAIN));
@ -516,19 +516,19 @@ static size_t mux_pt_rcv_buf(struct conn_stream *cs, struct buffer *buf, size_t
TRACE_ENTER(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){count}); TRACE_ENTER(PT_EV_RX_DATA, conn, cs, buf, (size_t[]){count});
if (!count) { if (!count) {
cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
goto end; goto end;
} }
b_realign_if_empty(buf); b_realign_if_empty(buf);
ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags); ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, count, flags);
if (conn_xprt_read0_pending(conn)) { if (conn_xprt_read0_pending(conn)) {
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
cs->flags |= CS_FL_EOS; cs->endp->flags |= CS_EP_EOS;
TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs); TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
} }
if (conn->flags & CO_FL_ERROR) { if (conn->flags & CO_FL_ERROR) {
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs); TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
} }
end: end:
@ -550,7 +550,7 @@ static size_t mux_pt_snd_buf(struct conn_stream *cs, struct buffer *buf, size_t
b_del(buf, ret); b_del(buf, ret);
if (conn->flags & CO_FL_ERROR) { if (conn->flags & CO_FL_ERROR) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs); TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
} }
@ -594,11 +594,11 @@ static int mux_pt_rcv_pipe(struct conn_stream *cs, struct pipe *pipe, unsigned i
ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, pipe, count); ret = conn->xprt->rcv_pipe(conn, conn->xprt_ctx, pipe, count);
if (conn_xprt_read0_pending(conn)) { if (conn_xprt_read0_pending(conn)) {
cs->flags |= CS_FL_EOS; cs->endp->flags |= CS_EP_EOS;
TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs); TRACE_DEVEL("read0 on connection", PT_EV_RX_DATA, conn, cs);
} }
if (conn->flags & CO_FL_ERROR) { if (conn->flags & CO_FL_ERROR) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs); TRACE_DEVEL("error on connection", PT_EV_RX_DATA|PT_EV_CONN_ERR, conn, cs);
} }
@ -616,7 +616,7 @@ static int mux_pt_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
ret = conn->xprt->snd_pipe(conn, conn->xprt_ctx, pipe); ret = conn->xprt->snd_pipe(conn, conn->xprt_ctx, pipe);
if (conn->flags & CO_FL_ERROR) { if (conn->flags & CO_FL_ERROR) {
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs); TRACE_DEVEL("error on connection", PT_EV_TX_DATA|PT_EV_CONN_ERR, conn, cs);
} }

View File

@ -1144,15 +1144,15 @@ static size_t qc_rcv_buf(struct conn_stream *cs, struct buffer *buf,
end: end:
if (b_data(&qcs->rx.app_buf)) { if (b_data(&qcs->rx.app_buf)) {
cs->flags |= (CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags |= (CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
} }
else { else {
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM); cs->endp->flags &= ~(CS_EP_RCV_MORE | CS_EP_WANT_ROOM);
if (cs->flags & CS_FL_ERR_PENDING) if (cs->endp->flags & CS_EP_ERR_PENDING)
cs->flags |= CS_FL_ERROR; cs->endp->flags |= CS_EP_ERROR;
if (fin) if (fin)
cs->flags |= (CS_FL_EOI|CS_FL_EOS); cs->endp->flags |= (CS_EP_EOI|CS_EP_EOS);
if (b_size(&qcs->rx.app_buf)) { if (b_size(&qcs->rx.app_buf)) {
b_free(&qcs->rx.app_buf); b_free(&qcs->rx.app_buf);
@ -1213,7 +1213,7 @@ static int qc_unsubscribe(struct conn_stream *cs, int event_type, struct wait_ev
} }
/* Loop through all qcs from <qcc>. If CO_FL_ERROR is set on the connection, /* Loop through all qcs from <qcc>. If CO_FL_ERROR is set on the connection,
* report CS_FL_ERR_PENDING|CS_FL_ERROR on the attached conn-streams and wake * report CS_EP_ERR_PENDING|CS_EP_ERROR on the attached conn-streams and wake
* them. * them.
*/ */
static int qc_wake_some_streams(struct qcc *qcc) static int qc_wake_some_streams(struct qcc *qcc)
@ -1231,9 +1231,9 @@ static int qc_wake_some_streams(struct qcc *qcc)
continue; continue;
if (qcc->conn->flags & CO_FL_ERROR) { if (qcc->conn->flags & CO_FL_ERROR) {
qcs->cs->flags |= CS_FL_ERR_PENDING; qcs->endp->flags |= CS_EP_ERR_PENDING;
if (qcs->cs->flags & CS_FL_EOS) if (qcs->endp->flags & CS_EP_EOS)
qcs->cs->flags |= CS_FL_ERROR; qcs->endp->flags |= CS_EP_ERROR;
if (qcs->subs) { if (qcs->subs) {
qcs_notify_recv(qcs); qcs_notify_recv(qcs);

View File

@ -4330,7 +4330,7 @@ static void http_stats_io_handler(struct appctx *appctx)
channel_add_input(res, 1); channel_add_input(res, 1);
} }
res_htx->flags |= HTX_FL_EOM; res_htx->flags |= HTX_FL_EOM;
si->cs->flags |= CS_FL_EOI; si->cs->endp->flags |= CS_EP_EOI;
res->flags |= CF_EOI; res->flags |= CF_EOI;
appctx->st0 = STAT_HTTP_END; appctx->st0 = STAT_HTTP_END;
} }

View File

@ -636,7 +636,7 @@ static int si_cs_process(struct conn_stream *cs)
*/ */
if (si->state >= SI_ST_CON) { if (si->state >= SI_ST_CON) {
if ((cs->flags & CS_FL_ERROR) || si_is_conn_error(si)) if ((cs->endp->flags & CS_EP_ERROR) || si_is_conn_error(si))
si->flags |= SI_FL_ERR; si->flags |= SI_FL_ERR;
} }
@ -666,7 +666,7 @@ static int si_cs_process(struct conn_stream *cs)
* wake callback. Otherwise si_cs_recv()/si_cs_send() already take * wake callback. Otherwise si_cs_recv()/si_cs_send() already take
* care of it. * care of it.
*/ */
if (cs->flags & CS_FL_EOS && !(ic->flags & CF_SHUTR)) { if (cs->endp->flags & CS_EP_EOS && !(ic->flags & CF_SHUTR)) {
/* we received a shutdown */ /* we received a shutdown */
ic->flags |= CF_READ_NULL; ic->flags |= CF_READ_NULL;
if (ic->flags & CF_AUTO_CLOSE) if (ic->flags & CF_AUTO_CLOSE)
@ -681,7 +681,7 @@ static int si_cs_process(struct conn_stream *cs)
* wake callback. Otherwise si_cs_recv()/si_cs_send() already take * wake callback. Otherwise si_cs_recv()/si_cs_send() already take
* care of it. * care of it.
*/ */
if ((cs->flags & CS_FL_EOI) && !(ic->flags & CF_EOI)) if ((cs->endp->flags & CS_EP_EOI) && !(ic->flags & CF_EOI))
ic->flags |= (CF_EOI|CF_READ_PARTIAL); ic->flags |= (CF_EOI|CF_READ_PARTIAL);
/* Second step : update the stream-int and channels, try to forward any /* Second step : update the stream-int and channels, try to forward any
@ -707,7 +707,7 @@ static int si_cs_send(struct conn_stream *cs)
int ret; int ret;
int did_send = 0; int did_send = 0;
if (cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING) || si_is_conn_error(si)) { if (cs->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING) || si_is_conn_error(si)) {
/* We're probably there because the tasklet was woken up, /* We're probably there because the tasklet was woken up,
* but process_stream() ran before, detected there were an * but process_stream() ran before, detected there were an
* error and put the si back to SI_ST_TAR. There's still * error and put the si back to SI_ST_TAR. There's still
@ -830,7 +830,7 @@ static int si_cs_send(struct conn_stream *cs)
si_rx_room_rdy(si_opposite(si)); si_rx_room_rdy(si_opposite(si));
} }
if (cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING)) { if (cs->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING)) {
si->flags |= SI_FL_ERR; si->flags |= SI_FL_ERR;
return 1; return 1;
} }
@ -1213,7 +1213,7 @@ static void stream_int_chk_snd_conn(struct stream_interface *si)
if (!(si->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(si_oc(si))) if (!(si->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(si_oc(si)))
si_cs_send(cs); si_cs_send(cs);
if (cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING) || si_is_conn_error(si)) { if (cs->endp->flags & (CS_EP_ERROR|CS_EP_ERR_PENDING) || si_is_conn_error(si)) {
/* Write error on the file descriptor */ /* Write error on the file descriptor */
if (si->state >= SI_ST_CON) if (si->state >= SI_ST_CON)
si->flags |= SI_FL_ERR; si->flags |= SI_FL_ERR;
@ -1316,7 +1316,7 @@ static int si_cs_recv(struct conn_stream *cs)
return 0; return 0;
/* stop here if we reached the end of data */ /* stop here if we reached the end of data */
if (cs->flags & CS_FL_EOS) if (cs->endp->flags & CS_EP_EOS)
goto end_recv; goto end_recv;
/* stop immediately on errors. Note that we DON'T want to stop on /* stop immediately on errors. Note that we DON'T want to stop on
@ -1325,15 +1325,15 @@ static int si_cs_recv(struct conn_stream *cs)
* happens when we send too large a request to a backend server * happens when we send too large a request to a backend server
* which rejects it before reading it all. * which rejects it before reading it all.
*/ */
if (!(cs->flags & CS_FL_RCV_MORE)) { if (!(cs->endp->flags & CS_EP_RCV_MORE)) {
if (!conn_xprt_ready(conn)) if (!conn_xprt_ready(conn))
return 0; return 0;
if (cs->flags & CS_FL_ERROR) if (cs->endp->flags & CS_EP_ERROR)
goto end_recv; goto end_recv;
} }
/* prepare to detect if the mux needs more room */ /* prepare to detect if the mux needs more room */
cs->flags &= ~CS_FL_WANT_ROOM; cs->endp->flags &= ~CS_EP_WANT_ROOM;
if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) && if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
global.tune.idle_timer && global.tune.idle_timer &&
@ -1385,7 +1385,7 @@ static int si_cs_recv(struct conn_stream *cs)
ic->flags |= CF_READ_PARTIAL; ic->flags |= CF_READ_PARTIAL;
} }
if (cs->flags & (CS_FL_EOS|CS_FL_ERROR)) if (cs->endp->flags & (CS_EP_EOS|CS_EP_ERROR))
goto end_recv; goto end_recv;
if (conn->flags & CO_FL_WAIT_ROOM) { if (conn->flags & CO_FL_WAIT_ROOM) {
@ -1440,9 +1440,9 @@ static int si_cs_recv(struct conn_stream *cs)
* that if such an event is not handled above in splice, it will be handled here by * that if such an event is not handled above in splice, it will be handled here by
* recv(). * recv().
*/ */
while ((cs->flags & CS_FL_RCV_MORE) || while ((cs->endp->flags & CS_EP_RCV_MORE) ||
(!(conn->flags & CO_FL_HANDSHAKE) && (!(conn->flags & CO_FL_HANDSHAKE) &&
(!(cs->flags & (CS_FL_ERROR|CS_FL_EOS))) && !(ic->flags & CF_SHUTR))) { (!(cs->endp->flags & (CS_EP_ERROR|CS_EP_EOS))) && !(ic->flags & CF_SHUTR))) {
int cur_flags = flags; int cur_flags = flags;
/* Compute transient CO_RFL_* flags */ /* Compute transient CO_RFL_* flags */
@ -1451,13 +1451,13 @@ static int si_cs_recv(struct conn_stream *cs)
} }
/* <max> may be null. This is the mux responsibility to set /* <max> may be null. This is the mux responsibility to set
* CS_FL_RCV_MORE on the CS if more space is needed. * CS_EP_RCV_MORE on the CS if more space is needed.
*/ */
max = channel_recv_max(ic); max = channel_recv_max(ic);
ret = conn->mux->rcv_buf(cs, &ic->buf, max, cur_flags); ret = conn->mux->rcv_buf(cs, &ic->buf, max, cur_flags);
if (cs->flags & CS_FL_WANT_ROOM) { if (cs->endp->flags & CS_EP_WANT_ROOM) {
/* CS_FL_WANT_ROOM must not be reported if the channel's /* CS_EP_WANT_ROOM must not be reported if the channel's
* buffer is empty. * buffer is empty.
*/ */
BUG_ON(c_empty(ic)); BUG_ON(c_empty(ic));
@ -1501,7 +1501,7 @@ static int si_cs_recv(struct conn_stream *cs)
* the channel's policies.This way, we are still able to receive * the channel's policies.This way, we are still able to receive
* shutdowns. * shutdowns.
*/ */
if (cs->flags & CS_FL_EOI) if (cs->endp->flags & CS_EP_EOI)
break; break;
if ((ic->flags & CF_READ_DONTWAIT) || --read_poll <= 0) { if ((ic->flags & CF_READ_DONTWAIT) || --read_poll <= 0) {
@ -1587,16 +1587,16 @@ static int si_cs_recv(struct conn_stream *cs)
/* Report EOI on the channel if it was reached from the mux point of /* Report EOI on the channel if it was reached from the mux point of
* view. */ * view. */
if ((cs->flags & CS_FL_EOI) && !(ic->flags & CF_EOI)) { if ((cs->endp->flags & CS_EP_EOI) && !(ic->flags & CF_EOI)) {
ic->flags |= (CF_EOI|CF_READ_PARTIAL); ic->flags |= (CF_EOI|CF_READ_PARTIAL);
ret = 1; ret = 1;
} }
if (cs->flags & CS_FL_ERROR) { if (cs->endp->flags & CS_EP_ERROR) {
si->flags |= SI_FL_ERR; si->flags |= SI_FL_ERR;
ret = 1; ret = 1;
} }
else if (cs->flags & CS_FL_EOS) { else if (cs->endp->flags & CS_EP_EOS) {
/* we received a shutdown */ /* we received a shutdown */
ic->flags |= CF_READ_NULL; ic->flags |= CF_READ_NULL;
if (ic->flags & CF_AUTO_CLOSE) if (ic->flags & CF_AUTO_CLOSE)

View File

@ -1483,7 +1483,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_send(struct check *check, struct tcpcheck_r
TRACE_DATA("send data", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check); TRACE_DATA("send data", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA, check);
if (conn->mux->snd_buf(cs, &check->bo, if (conn->mux->snd_buf(cs, &check->bo,
(IS_HTX_CONN(conn) ? (htxbuf(&check->bo))->data: b_data(&check->bo)), 0) <= 0) { (IS_HTX_CONN(conn) ? (htxbuf(&check->bo))->data: b_data(&check->bo)), 0) <= 0) {
if ((conn->flags & CO_FL_ERROR) || (cs->flags & CS_FL_ERROR)) { if ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR)) {
ret = TCPCHK_EVAL_STOP; ret = TCPCHK_EVAL_STOP;
TRACE_DEVEL("connection error during send", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TX_ERR, check); TRACE_DEVEL("connection error during send", CHK_EV_TCPCHK_SND|CHK_EV_TX_DATA|CHK_EV_TX_ERR, check);
goto out; goto out;
@ -1547,7 +1547,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
goto wait_more_data; goto wait_more_data;
} }
if (cs->flags & CS_FL_EOS) if (cs->endp->flags & CS_EP_EOS)
goto end_recv; goto end_recv;
if (check->state & CHK_ST_IN_ALLOC) { if (check->state & CHK_ST_IN_ALLOC) {
@ -1564,15 +1564,15 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
/* errors on the connection and the conn-stream were already checked */ /* errors on the connection and the conn-stream were already checked */
/* prepare to detect if the mux needs more room */ /* prepare to detect if the mux needs more room */
cs->flags &= ~CS_FL_WANT_ROOM; cs->endp->flags &= ~CS_EP_WANT_ROOM;
while ((cs->flags & CS_FL_RCV_MORE) || while ((cs->endp->flags & CS_EP_RCV_MORE) ||
(!(conn->flags & CO_FL_ERROR) && !(cs->flags & (CS_FL_ERROR|CS_FL_EOS)))) { (!(conn->flags & CO_FL_ERROR) && !(cs->endp->flags & (CS_EP_ERROR|CS_EP_EOS)))) {
max = (IS_HTX_CS(cs) ? htx_free_space(htxbuf(&check->bi)) : b_room(&check->bi)); max = (IS_HTX_CS(cs) ? htx_free_space(htxbuf(&check->bi)) : b_room(&check->bi));
read = conn->mux->rcv_buf(cs, &check->bi, max, 0); read = conn->mux->rcv_buf(cs, &check->bi, max, 0);
cur_read += read; cur_read += read;
if (!read || if (!read ||
(cs->flags & CS_FL_WANT_ROOM) || (cs->endp->flags & CS_EP_WANT_ROOM) ||
(--read_poll <= 0) || (--read_poll <= 0) ||
(read < max && read >= global.tune.recv_enough)) (read < max && read >= global.tune.recv_enough))
break; break;
@ -1580,7 +1580,7 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
end_recv: end_recv:
is_empty = (IS_HTX_CS(cs) ? htx_is_empty(htxbuf(&check->bi)) : !b_data(&check->bi)); is_empty = (IS_HTX_CS(cs) ? htx_is_empty(htxbuf(&check->bi)) : !b_data(&check->bi));
if (is_empty && ((conn->flags & CO_FL_ERROR) || (cs->flags & CS_FL_ERROR))) { if (is_empty && ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR))) {
/* Report network errors only if we got no other data. Otherwise /* Report network errors only if we got no other data. Otherwise
* we'll let the upper layers decide whether the response is OK * we'll let the upper layers decide whether the response is OK
* or not. It is very common that an RST sent by the server is * or not. It is very common that an RST sent by the server is
@ -1590,11 +1590,11 @@ enum tcpcheck_eval_ret tcpcheck_eval_recv(struct check *check, struct tcpcheck_r
goto stop; goto stop;
} }
if (!cur_read) { if (!cur_read) {
if (cs->flags & CS_FL_EOI) { if (cs->endp->flags & CS_EP_EOI) {
/* If EOI is set, it means there is a response or an error */ /* If EOI is set, it means there is a response or an error */
goto out; goto out;
} }
if (!(cs->flags & (CS_FL_WANT_ROOM|CS_FL_ERROR|CS_FL_EOS))) { if (!(cs->endp->flags & (CS_EP_WANT_ROOM|CS_EP_ERROR|CS_EP_EOS))) {
conn->mux->subscribe(cs, SUB_RETRY_RECV, &check->wait_list); conn->mux->subscribe(cs, SUB_RETRY_RECV, &check->wait_list);
TRACE_DEVEL("waiting for response", CHK_EV_RX_DATA, check); TRACE_DEVEL("waiting for response", CHK_EV_RX_DATA, check);
goto wait_more_data; goto wait_more_data;
@ -2140,7 +2140,7 @@ int tcpcheck_main(struct check *check)
*/ */
/* 1- check for connection error, if any */ /* 1- check for connection error, if any */
if ((conn && conn->flags & CO_FL_ERROR) || (cs->flags & CS_FL_ERROR)) if ((conn && conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR))
goto out_end_tcpcheck; goto out_end_tcpcheck;
/* 2- check if a rule must be resume. It happens if check->current_step /* 2- check if a rule must be resume. It happens if check->current_step
@ -2222,7 +2222,7 @@ int tcpcheck_main(struct check *check)
goto out_end_tcpcheck; goto out_end_tcpcheck;
else if (eval_ret == TCPCHK_EVAL_WAIT) else if (eval_ret == TCPCHK_EVAL_WAIT)
goto out; goto out;
last_read = ((conn->flags & CO_FL_ERROR) || (cs->flags & (CS_FL_ERROR|CS_FL_EOS))); last_read = ((conn->flags & CO_FL_ERROR) || (cs->endp->flags & (CS_EP_ERROR|CS_EP_EOS)));
must_read = 0; must_read = 0;
} }
@ -2303,7 +2303,7 @@ int tcpcheck_main(struct check *check)
TRACE_PROTO("tcp-check passed", CHK_EV_TCPCHK_EVAL, check); TRACE_PROTO("tcp-check passed", CHK_EV_TCPCHK_EVAL, check);
out_end_tcpcheck: out_end_tcpcheck:
if ((conn && conn->flags & CO_FL_ERROR) || (cs->flags & CS_FL_ERROR)) { if ((conn && conn->flags & CO_FL_ERROR) || (cs->endp->flags & CS_EP_ERROR)) {
TRACE_ERROR("report connection error", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ERR, check); TRACE_ERROR("report connection error", CHK_EV_TCPCHK_EVAL|CHK_EV_TCPCHK_ERR, check);
chk_report_conn_err(check, errno, 0); chk_report_conn_err(check, errno, 0);
} }