MEDIUM: mux-h1: Properly handle state transitions of chunked outgoing messages

On the sending path, there are 3 states for chunked payload in H1:

  * H1_MSG_CHUNK_SIZE: the chunk size must be emitted
  * H1_MSH_CHUNK_CRLF: The end of the chunk must be emitted
  * H1_MSG_DATA: Chunked data must be emitted

However, some shortcuts were used on the sending path to avoid some
transitions. Especially, outgoing messages were never switched in
H1_MSG_CHUNK_SIZE state.

However, it will be necessary to properly handle all transitions on the payload
to implement mux-to-mux forwarding, to be sure to always known when the chunk
size or the end of the chunk must be emitted.
This commit is contained in:
Christopher Faulet 2023-09-26 18:00:49 +02:00
parent 117f9cc017
commit d57a66d63a

View File

@ -2485,7 +2485,7 @@ static size_t h1_make_eoh(struct h1s *h1s, struct h1m *h1m, struct htx *htx, siz
else { else {
if (!chunk_memcat(&outbuf, "\r\n", 2)) if (!chunk_memcat(&outbuf, "\r\n", 2))
goto full; goto full;
h1m->state = H1_MSG_DATA; h1m->state = ((h1m->flags & H1_MF_CHNK) ? H1_MSG_CHUNK_SIZE: H1_MSG_DATA);
} }
TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request headers xferred" : "H1 response headers xferred"), TRACE_PROTO((!(h1m->flags & H1_MF_RESP) ? "H1 request headers xferred" : "H1 response headers xferred"),
@ -2588,21 +2588,26 @@ static size_t h1_make_data(struct h1s *h1s, struct h1m *h1m, struct buffer *buf,
*/ */
/* If is a new chunk, prepend the chunk size */ /* If is a new chunk, prepend the chunk size */
if (!h1m->curr_len) { if (h1m->state == H1_MSG_CHUNK_CRLF || h1m->state == H1_MSG_CHUNK_SIZE) {
if (h1m->curr_len) {
TRACE_ERROR("too much payload, more than announced",
H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
goto error;
}
h1m->curr_len = count + (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH ? htx->extra : 0); h1m->curr_len = count + (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH ? htx->extra : 0);
h1_prepend_chunk_size(&h1c->obuf, h1m->curr_len); h1_prepend_chunk_size(&h1c->obuf, h1m->curr_len);
} if (h1m->state == H1_MSG_CHUNK_CRLF)
h1m->curr_len -= count; h1_prepend_chunk_crlf(&h1c->obuf);
/* CRLF of the previous chunk is missing. Prepend it */
if (h1m->state == H1_MSG_CHUNK_CRLF) {
h1_prepend_chunk_crlf(&h1c->obuf);
h1m->state = H1_MSG_DATA; h1m->state = H1_MSG_DATA;
} }
h1m->curr_len -= count;
/* It is the end of the chunk, append the CRLF */ /* It is the end of the chunk, append the CRLF */
if (!h1m->curr_len) if (!h1m->curr_len) {
h1_append_chunk_crlf(&h1c->obuf); h1_append_chunk_crlf(&h1c->obuf);
h1m->state = H1_MSG_CHUNK_SIZE;
}
/* It is the end of the message, add the last chunk with the extra CRLF */ /* It is the end of the message, add the last chunk with the extra CRLF */
if (eom) { if (eom) {
@ -2624,10 +2629,17 @@ static size_t h1_make_data(struct h1s *h1s, struct h1m *h1m, struct buffer *buf,
b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf)); b_slow_realign(&h1c->obuf, trash.area, b_data(&h1c->obuf));
outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0); outbuf = b_make(b_tail(&h1c->obuf), b_contig_space(&h1c->obuf), 0, 0);
if (h1m->state == H1_MSG_CHUNK_CRLF) {
/* Handle now case of CRLF at the end of a chun. */
if ((h1m->flags & H1_MF_CHNK) && h1m->state == H1_MSG_CHUNK_CRLF) {
if (h1m->curr_len) {
TRACE_ERROR("too much payload, more than announced",
H1_EV_TX_DATA|H1_EV_STRM_ERR|H1_EV_H1C_ERR|H1_EV_H1S_ERR, h1c->conn, h1s);
goto error;
}
if (!chunk_memcat(&outbuf, "\r\n", 2)) if (!chunk_memcat(&outbuf, "\r\n", 2))
goto full; goto full;
h1m->state = H1_MSG_DATA; h1m->state = H1_MSG_CHUNK_SIZE;
} }
while (blk && count) { while (blk && count) {
@ -2661,12 +2673,13 @@ static size_t h1_make_data(struct h1s *h1s, struct h1m *h1m, struct buffer *buf,
chklen = 0; chklen = 0;
if (h1m->flags & H1_MF_CHNK) { if (h1m->flags & H1_MF_CHNK) {
/* If is a new chunk, prepend the chunk size */ /* If is a new chunk, prepend the chunk size */
if (!h1m->curr_len) { if (h1m->state == H1_MSG_CHUNK_SIZE) {
h1m->curr_len = (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH ? htx->data + htx->extra : vlen); h1m->curr_len = (htx->extra && htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH ? htx->data + htx->extra : vlen);
if (!h1_append_chunk_size(&outbuf, h1m->curr_len)) { if (!h1_append_chunk_size(&outbuf, h1m->curr_len)) {
h1m->curr_len = 0; h1m->curr_len = 0;
goto full; goto full;
} }
h1m->state = H1_MSG_DATA;
} }
if (vlen > h1m->curr_len) { if (vlen > h1m->curr_len) {
@ -2696,8 +2709,11 @@ static size_t h1_make_data(struct h1s *h1s, struct h1m *h1m, struct buffer *buf,
if (h1m->flags & H1_MF_CHNK) { if (h1m->flags & H1_MF_CHNK) {
h1m->curr_len -= vlen; h1m->curr_len -= vlen;
/* Space already reserved, so it must succeed */ /* Space already reserved, so it must succeed */
if (!h1m->curr_len && !chunk_memcat(&outbuf, "\r\n", 2)) if (!h1m->curr_len) {
goto error; if (!chunk_memcat(&outbuf, "\r\n", 2))
goto error;
h1m->state = H1_MSG_CHUNK_SIZE;
}
if (last_data && !chunk_memcat(&outbuf, "0\r\n\r\n", 5)) if (last_data && !chunk_memcat(&outbuf, "0\r\n\r\n", 5))
goto error; goto error;
} }
@ -3014,6 +3030,7 @@ static size_t h1_process_mux(struct h1c *h1c, struct buffer *buf, size_t count)
ret = h1_make_eoh(h1s, h1m, htx, count); ret = h1_make_eoh(h1s, h1m, htx, count);
break; break;
case H1_MSG_CHUNK_SIZE:
case H1_MSG_CHUNK_CRLF: case H1_MSG_CHUNK_CRLF:
case H1_MSG_DATA: case H1_MSG_DATA:
ret = h1_make_data(h1s, h1m, buf, count); ret = h1_make_data(h1s, h1m, buf, count);