[MEDIUM] add support for global.maxconnrate to limit the per-process conn rate.

This one enforces a per-process connection rate limit, regardless of what
may be set per frontend. It can be a way to limit the CPU usage of a process
being severely attacked.

The side effect is that the global process connection rate is now measured
for each incoming connection, so it will be possible to report it.
This commit is contained in:
Willy Tarreau 2011-09-07 15:17:21 +02:00
parent 91886b692a
commit 81c25d0ee6
6 changed files with 47 additions and 0 deletions

View File

@ -448,6 +448,7 @@ The following keywords are supported in the "global" section :
* Performance tuning
- maxconn
- maxconnrate
- maxpipes
- noepoll
- nokqueue
@ -649,6 +650,16 @@ maxconn <number>
connections when this limit is reached. The "ulimit-n" parameter is
automatically adjusted according to this value. See also "ulimit-n".
maxconnrate <number>
Sets the maximum per-process number of connections per second to <number>.
Proxies will stop accepting connections when this limit is reached. It can be
used to limit the global capacity regardless of each frontend capacity. It is
important to note that this can only be used as a service protection measure,
as there will not necessarily be a fair share between frontends when the
limit is reached, so it's a good idea to also limit each frontend to some
value close to its expected share. Also, lowering tune.maxaccept can improve
fairness.
maxpipes <number>
Sets the maximum per-process number of pipes to <number>. Currently, pipes
are only used by kernel-based tcp splicing. Since a pipe contains two file

View File

@ -25,6 +25,7 @@
#include <common/config.h>
#include <common/ticks.h>
#include <common/time.h>
#include <types/global.h>
#include <types/proxy.h>
#include <proto/freq_ctr.h>

View File

@ -66,6 +66,8 @@ struct global {
int gid;
int nbproc;
int maxconn, hardmaxconn;
struct freq_ctr conn_per_sec;
int cps_lim, cps_max;
int maxpipes; /* max # of pipes */
int maxsock; /* max # of sockets */
int rlimit_nofile; /* default ulimit-n value : 0=unset */

View File

@ -682,6 +682,19 @@ int cfg_parse_global(const char *file, int linenum, char **args, int kwm)
}
#endif /* SYSTEM_MAXCONN */
}
else if (!strcmp(args[0], "maxconnrate")) {
if (global.cps_lim != 0) {
Alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);
err_code |= ERR_ALERT;
goto out;
}
if (*(args[1]) == 0) {
Alert("parsing [%s:%d] : '%s' expects an integer argument.\n", file, linenum, args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
global.cps_lim = atol(args[1]);
}
else if (!strcmp(args[0], "maxpipes")) {
if (global.maxpipes != 0) {
Alert("parsing [%s:%d] : '%s' already specified. Continuing.\n", file, linenum, args[0]);

View File

@ -808,6 +808,7 @@ static int stats_sock_parse_request(struct stream_interface *si, char *line)
}
}
global.cps_max = 0;
return 1;
}
else if (strcmp(args[1], "table") == 0) {

View File

@ -1156,6 +1156,20 @@ int stream_sock_accept(int fd)
return 0;
}
if (global.cps_lim) {
int max = freq_ctr_remain(&global.conn_per_sec, global.cps_lim, 0);
if (unlikely(!max)) {
/* frontend accept rate limit was reached */
limit_listener(l, &global_listener_queue);
task_schedule(global_listener_queue_task, tick_add(now_ms, next_event_delay(&global.conn_per_sec, global.cps_lim, 0)));
return 0;
}
if (max_accept > max)
max_accept = max;
}
if (p && p->fe_sps_lim) {
int max = freq_ctr_remain(&p->fe_sess_per_sec, p->fe_sps_lim, 0);
@ -1237,6 +1251,11 @@ int stream_sock_accept(int fd)
return 0;
}
/* increase the per-process number of cumulated connections */
update_freq_ctr(&global.conn_per_sec, 1);
if (global.conn_per_sec.curr_ctr > global.cps_max)
global.cps_max = global.conn_per_sec.curr_ctr;
jobs++;
actconn++;
totalconn++;