diff --git a/doc/configuration.txt b/doc/configuration.txt index b33469721..918d76293 100644 --- a/doc/configuration.txt +++ b/doc/configuration.txt @@ -1790,9 +1790,14 @@ fd-hard-limit much RAM for regular usage. The fd-hard-limit setting is provided to enforce a possibly lower bound to this limit. This means that it will always respect the system-imposed limits when they are below but the specified - value will be used if system-imposed limits are higher. In the example below, - no other setting is specified and the maxconn value will automatically adapt - to the lower of "fd-hard-limit" and the system-imposed limit: + value will be used if system-imposed limits are higher. By default + fd-hard-limit is set to 1048576. This default could be changed via + DEFAULT_MAXFD compile-time variable, that could serve as the maximum (kernel) + system limit, if RLIMIT_NOFILE hard limit is extremely large. fd-hard-limit + set in global section allows to temporarily override the value provided via + DEFAULT_MAXFD at the build-time. In the example below, no other setting is + specified and the maxconn value will automatically adapt to the lower of + "fd-hard-limit" and the RLIMIT_NOFILE limit: global # use as many FDs as possible but no more than 50000 diff --git a/include/haproxy/defaults.h b/include/haproxy/defaults.h index 73e8e0c92..35317b7af 100644 --- a/include/haproxy/defaults.h +++ b/include/haproxy/defaults.h @@ -295,6 +295,24 @@ #define DEFAULT_MAXCONN 100 #endif +/* Default file descriptor limit. + * + * DEFAULT_MAXFD explicitly reduces the hard RLIMIT_NOFILE, which is used by the + * process as the base value to calculate the default global.maxsock, if + * global.maxconn, global.rlimit_memmax are not defined. This is useful in the + * case, when hard nofile limit has been bumped to fs.nr_open (kernel max), + * which is extremely large on many modern distros. So, we will also finish with + * an extremely large default global.maxsock. The only way to override + * DEFAULT_MAXFD, if defined, is to set fd_hard_limit in the config global + * section. If DEFAULT_MAXFD is not set, a reasonable maximum of 1048576 will be + * used as the default value, which almost guarantees that a process will + * correctly start in any situation and will be not killed then by watchdog, + * when it will loop over the allocated fdtab. +*/ +#ifndef DEFAULT_MAXFD +#define DEFAULT_MAXFD 1048576 +#endif + /* Define a maxconn which will be used in the master process once it re-exec to * the MODE_MWORKER_WAIT and won't change when SYSTEM_MAXCONN is set. * diff --git a/src/haproxy.c b/src/haproxy.c index faff4b0f6..d7d743eef 100644 --- a/src/haproxy.c +++ b/src/haproxy.c @@ -1512,7 +1512,19 @@ static int compute_ideal_maxconn() * - two FDs per connection */ - if (global.fd_hard_limit && remain > global.fd_hard_limit) + /* on some modern distros for archs like amd64 fs.nr_open (kernel max) could + * be in order of 1 billion, systemd since the version 256~rc3-3 bumped + * fs.nr_open as the hard RLIMIT_NOFILE (rlim_fd_max_at_boot). If we are + * started without global.maxconn or global.rlimit_memmax_all, we risk to + * finish with computed global.maxconn = ~500000000 and computed + * global.maxsock = ~1000000000. So, fdtab will be unnecessary and extremely + * huge and watchdog will kill the process, when it tries to loop over the + * fdtab (see fd_reregister_all). + */ + if (!global.fd_hard_limit) + global.fd_hard_limit = DEFAULT_MAXFD; + + if (remain > global.fd_hard_limit) remain = global.fd_hard_limit; /* subtract listeners and checks */