CLEANUP: flt_http_comp: remove duplicate rate limit and CPU usage checks

In comp_prepare_compress_request(), the compression rate limit and CPU
usage checks were duplicated. The first set runs before selecting the
algorithm, and the second set runs after. That's definitely a copy-paste
issue or a patch being applied twice. Let's just drop one.
This commit is contained in:
Willy Tarreau 2026-05-11 15:31:16 +02:00
parent 4eb6e8daa3
commit e32cc2e805
2 changed files with 1 additions and 11 deletions

View File

@ -60,7 +60,7 @@ struct comp_ctx {
struct slz_stream strm;
const void *direct_ptr; /* NULL or pointer to beginning of data */
int direct_len; /* length of direct_ptr if not NULL */
struct buffer queued; /* if not NULL, data already queued */
struct buffer queued; /* if not null, data already queued */
#elif defined(USE_ZLIB)
z_stream strm; /* zlib stream */
void *zlib_deflate_state;

View File

@ -172,16 +172,6 @@ comp_prepare_compress_request(struct comp_state *st, struct stream *s, struct ht
else
goto fail; /* no algo selected: nothing to do */
/* limit compression rate */
if (global.comp_rate_lim > 0)
if (read_freq_ctr(&global.comp_bps_in) > global.comp_rate_lim)
goto fail;
/* limit cpu usage */
if (th_ctx->idle_pct < compress_min_idle)
goto fail;
/* initialize compression */
if (st->comp_algo->init(&st->comp_ctx, global.tune.comp_maxlevel) < 0)
goto fail;