mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-11-28 06:11:32 +01:00
MINOR: cfgparse: limit file size loaded via /dev/stdin
load_cfg_in_mem() can continuously reallocate memory in order to load an extremely large input from /dev/stdin, until it fails with ENOMEM, which means that process has consumed all available RAM. In case of containers and virtualized environments it's not very good. So, in order to prevent this, let's introduce MAX_CFG_SIZE as 10MB, which will limit the size of input supplied via /dev/stdin.
This commit is contained in:
parent
fd48b28315
commit
8b1dfa9def
@ -106,6 +106,11 @@
|
||||
#define LINESIZE 2048
|
||||
#endif
|
||||
|
||||
// maximum size of a configuration file that could be loaded in memory via
|
||||
// /dev/sdtin. This is needed to prevent from loading extremely large files
|
||||
// via standard input.
|
||||
#define MAX_CFG_SIZE 10485760
|
||||
|
||||
// max # args on a configuration line
|
||||
#define MAX_LINE_ARGS 64
|
||||
|
||||
|
||||
@ -1784,6 +1784,13 @@ ssize_t load_cfg_in_mem(char *filename, char **cfg_content)
|
||||
*cfg_content = NULL;
|
||||
|
||||
while (1) {
|
||||
if (!file_stat.st_size && ((read_bytes + bytes_to_read) > MAX_CFG_SIZE)) {
|
||||
ha_alert("Loading %s: input is too large %ldMB, limited to %dMB. Exiting.\n",
|
||||
filename, (long)(read_bytes + bytes_to_read)/(1024*1024),
|
||||
MAX_CFG_SIZE/(1024*1024));
|
||||
goto free_mem;
|
||||
}
|
||||
|
||||
if (read_bytes + bytes_to_read > chunk_size) {
|
||||
chunk_size = (read_bytes + bytes_to_read) * 2;
|
||||
new_area = realloc(*cfg_content, chunk_size);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user