MINOR: compression: Introduce minimum size

This is the introduction of "minsize-req" and "minsize-res".
These two options allow you to set the minimum payload size required for
compression to be applied.
This helps save CPU on both server and client sides when the payload does
not need to be compressed.
This commit is contained in:
Vincent Dechenaux 2025-02-21 22:37:57 +01:00 committed by Willy Tarreau
parent e7510d6230
commit 9011b3621b
5 changed files with 229 additions and 2 deletions

View File

@ -6782,6 +6782,8 @@ compression type <mime type> ...
* The response contains a "Content-Encoding" header, indicating that the * The response contains a "Content-Encoding" header, indicating that the
response is already compressed (see compression offload) response is already compressed (see compression offload)
* The response contains an invalid "ETag" header or multiple ETag headers * The response contains an invalid "ETag" header or multiple ETag headers
* The payload size is smaller than the minimum size
(see compression minsize-res)
Note: The compression does not emit the Warning header. Note: The compression does not emit the Warning header.
@ -6789,7 +6791,22 @@ compression type <mime type> ...
compression algo gzip compression algo gzip
compression type text/html text/plain compression type text/html text/plain
See also : "compression offload", "compression direction" See also : "compression offload", "compression direction",
"compression minsize-req" and "compression minsize-res"
compression minsize-req <size>
compression minsize-res <size>
Sets the minimum payload size in bytes for compression to be applied.
May be used in the following contexts: http
May be used in sections : defaults | frontend | listen | backend
yes | yes | yes | yes
Payloads smaller than this size will not be compressed, avoiding unnecessary
CPU overhead for data that would not significantly benefit from compression.
"minsize-req" applies on requests and "minsize-res" on responses.
The default value is 0.
compression offload compression offload
Makes HAProxy work as a compression offloader only. Makes HAProxy work as a compression offloader only.

View File

@ -50,6 +50,8 @@ struct comp {
struct comp_algo *algo_req; /* Algo to use for request */ struct comp_algo *algo_req; /* Algo to use for request */
struct comp_type *types_req; /* Types to be compressed for requests */ struct comp_type *types_req; /* Types to be compressed for requests */
struct comp_type *types_res; /* Types to be compressed for responses */ struct comp_type *types_res; /* Types to be compressed for responses */
unsigned int minsize_res; /* Min response body size to be compressed */
unsigned int minsize_req; /* Min request body size to be compressed */
unsigned int flags; unsigned int flags;
}; };

View File

@ -0,0 +1,135 @@
varnishtest "Compression ignores small payloads"
#REQUIRE_OPTION=ZLIB|SLZ
feature ignore_unknown_macro
server s1 {
rxreq
expect req.url == "/response-lower"
expect req.http.accept-encoding == "gzip"
txresp \
-hdr "Content-Type: text/plain" \
-hdr "ETag: \"123\"" \
-bodylen 50
rxreq
expect req.url == "/response-equal"
expect req.http.accept-encoding == "gzip"
txresp \
-hdr "Content-Type: text/plain" \
-hdr "ETag: \"123\"" \
-bodylen 1024
rxreq
expect req.url == "/response-greater"
expect req.http.accept-encoding == "gzip"
txresp \
-hdr "Content-Type: text/plain" \
-hdr "ETag: \"123\"" \
-bodylen 2000
rxreq
expect req.url == "/request-lower"
expect req.http.content-encoding == "<undef>"
expect req.method == "POST"
expect resp.bodylen == 50
txresp
rxreq
expect req.url == "/request-equal"
expect req.http.content-encoding == "gzip"
expect req.method == "POST"
gunzip
expect resp.bodylen == 800
txresp
rxreq
expect req.url == "/request-greater"
expect req.http.content-encoding == "gzip"
expect req.method == "POST"
gunzip
expect resp.bodylen == 2000
txresp
} -start
haproxy h1 -conf {
global
# WT: limit false-positives causing "HTTP header incomplete" due to
# idle server connections being randomly used and randomly expiring
# under us.
tune.idle-pool.shared off
defaults
mode http
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
frontend fe-gzip
bind "fd@${fe_gzip}"
default_backend be-gzip
backend be-gzip
compression direction both
compression algo-res gzip
compression type-res text/plain
compression minsize-res 1k
compression algo-req gzip
compression type-req text/plain
compression minsize-req 800
server www ${s1_addr}:${s1_port}
} -start
client c1 -connect ${h1_fe_gzip_sock} {
txreq -url "/response-lower" \
-hdr "Accept-Encoding: gzip"
rxresp
expect resp.status == 200
expect resp.http.content-encoding == "<undef>"
expect resp.http.etag == "\"123\""
expect resp.bodylen == 50
txreq -url "/response-equal" \
-hdr "Accept-Encoding: gzip"
rxresp
expect resp.status == 200
expect resp.http.content-encoding == "gzip"
expect resp.http.etag == "W/\"123\""
gunzip
expect resp.bodylen == 1024
txreq -url "/response-greater" \
-hdr "Accept-Encoding: gzip"
rxresp
expect resp.status == 200
expect resp.http.content-encoding == "gzip"
expect resp.http.etag == "W/\"123\""
gunzip
expect resp.bodylen == 2000
txreq -method POST \
-url "/request-lower" \
-hdr "Content-Type: text/plain" \
-bodylen 50
rxresp
expect resp.status == 200
txreq -method POST \
-url "/request-equal" \
-hdr "Content-Type: text/plain" \
-bodylen 800
rxresp
expect resp.status == 200
txreq -method POST \
-url "/request-greater" \
-hdr "Content-Type: text/plain" \
-bodylen 2000
rxresp
expect resp.status == 200
} -run

View File

@ -137,6 +137,9 @@ comp_prepare_compress_request(struct comp_state *st, struct stream *s, struct ht
struct http_txn *txn = s->txn; struct http_txn *txn = s->txn;
struct http_hdr_ctx ctx; struct http_hdr_ctx ctx;
struct comp_type *comp_type; struct comp_type *comp_type;
unsigned int comp_minsize = 0;
int32_t pos;
unsigned long long len = 0;
ctx.blk = NULL; ctx.blk = NULL;
/* Already compressed, don't bother */ /* Already compressed, don't bother */
@ -147,6 +150,25 @@ comp_prepare_compress_request(struct comp_state *st, struct stream *s, struct ht
return; return;
comp_type = NULL; comp_type = NULL;
/* compress only if body size is >= than the min size */
if ((s->be->comp && (comp_minsize = s->be->comp->minsize_req)) ||
(strm_fe(s)->comp && (comp_minsize = strm_fe(s)->comp->minsize_req))) {
for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
}
if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
len += htx->extra;
/* small requests should not be compressed */
if (len < comp_minsize)
goto fail;
}
/* /*
* We don't want to compress content-types not listed in the "compression type" directive if any. If no content-type was found but configuration * We don't want to compress content-types not listed in the "compression type" directive if any. If no content-type was found but configuration
* requires one, we don't compress either. Backend has the priority. * requires one, we don't compress either. Backend has the priority.
@ -624,6 +646,9 @@ select_compression_response_header(struct comp_state *st, struct stream *s, stru
struct http_txn *txn = s->txn; struct http_txn *txn = s->txn;
struct http_hdr_ctx ctx; struct http_hdr_ctx ctx;
struct comp_type *comp_type; struct comp_type *comp_type;
unsigned int comp_minsize = 0;
int32_t pos;
unsigned long long len = 0;
/* no common compression algorithm was found in request header */ /* no common compression algorithm was found in request header */
if (st->comp_algo[COMP_DIR_RES] == NULL) if (st->comp_algo[COMP_DIR_RES] == NULL)
@ -650,6 +675,25 @@ select_compression_response_header(struct comp_state *st, struct stream *s, stru
if (!(msg->flags & HTTP_MSGF_XFER_LEN) || msg->flags & HTTP_MSGF_BODYLESS) if (!(msg->flags & HTTP_MSGF_XFER_LEN) || msg->flags & HTTP_MSGF_BODYLESS)
goto fail; goto fail;
/* compress only if body size is >= than the min size */
if ((s->be->comp && (comp_minsize = s->be->comp->minsize_res)) ||
(strm_fe(s)->comp && (comp_minsize = strm_fe(s)->comp->minsize_res))) {
for (pos = htx_get_first(htx); pos != -1; pos = htx_get_next(htx, pos)) {
struct htx_blk *blk = htx_get_blk(htx, pos);
enum htx_blk_type type = htx_get_blk_type(blk);
if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
break;
if (type == HTX_BLK_DATA)
len += htx_get_blksz(blk);
}
if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
len += htx->extra;
/* small responses should not be compressed */
if (len < comp_minsize)
goto fail;
}
/* content is already compressed */ /* content is already compressed */
ctx.blk = NULL; ctx.blk = NULL;
if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1)) if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
@ -781,6 +825,7 @@ parse_compression_options(char **args, int section, struct proxy *proxy,
{ {
struct comp *comp; struct comp *comp;
int ret = 0; int ret = 0;
const char *res;
if (proxy->comp == NULL) { if (proxy->comp == NULL) {
comp = calloc(1, sizeof(*comp)); comp = calloc(1, sizeof(*comp));
@ -894,6 +939,32 @@ parse_compression_options(char **args, int section, struct proxy *proxy,
continue; continue;
} }
} }
else if (strcmp(args[1], "minsize-req") == 0) {
if (*(args[2]) == 0) {
memprintf(err, "'%s' expects an integer argument.", args[1]);
ret = -1;
goto end;
}
res = parse_size_err(args[2], &comp->minsize_req);
if (res != NULL) {
memprintf(err, "unexpected '%s' after size passed to '%s'", res, args[1]);
ret = -1;
goto end;
}
}
else if (strcmp(args[1], "minsize-res") == 0) {
if (*(args[2]) == 0) {
memprintf(err, "'%s' expects an integer argument.", args[1]);
ret = -1;
goto end;
}
res = parse_size_err(args[2], &comp->minsize_res);
if (res != NULL) {
memprintf(err, "unexpected '%s' after size passed to '%s'", res, args[1]);
ret = -1;
goto end;
}
}
else if (strcmp(args[1], "direction") == 0) { else if (strcmp(args[1], "direction") == 0) {
if (!args[2]) { if (!args[2]) {
memprintf(err, "'%s' expects 'request', 'response', or 'both'.", args[0]); memprintf(err, "'%s' expects 'request', 'response', or 'both'.", args[0]);
@ -915,7 +986,7 @@ parse_compression_options(char **args, int section, struct proxy *proxy,
} }
} }
else { else {
memprintf(err, "'%s' expects 'algo', 'type' 'direction' or 'offload'", memprintf(err, "'%s' expects 'algo', 'type', 'direction', 'offload', 'minsize-req' or 'minsize-res'.",
args[0]); args[0]);
ret = -1; ret = -1;
goto end; goto end;

View File

@ -1868,6 +1868,8 @@ static int proxy_defproxy_cpy(struct proxy *curproxy, const struct proxy *defpro
curproxy->comp->algo_req = defproxy->comp->algo_req; curproxy->comp->algo_req = defproxy->comp->algo_req;
curproxy->comp->types_res = defproxy->comp->types_res; curproxy->comp->types_res = defproxy->comp->types_res;
curproxy->comp->types_req = defproxy->comp->types_req; curproxy->comp->types_req = defproxy->comp->types_req;
curproxy->comp->minsize_res = defproxy->comp->minsize_res;
curproxy->comp->minsize_req = defproxy->comp->minsize_req;
curproxy->comp->flags = defproxy->comp->flags; curproxy->comp->flags = defproxy->comp->flags;
} }