mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-10 00:57:02 +02:00
BUG/MEDIUM: pipe/thread: fix atomicity of pipe counters
Previous patch 160287b676
("MEDIUM: pipe/thread: maintain a per-thread
local cache of recently used pipes") didn't replace all pipe counter
updates with atomic ops since some were already under a lock, which is
obviously not a valid reason since these ones can be updated in parallel
to other atomic ops. The result was that the pipes_used could seldom be
seen as negative in the stats (harmless) but also this could result in
slightly more pipes being allocated than permitted, thus stealing a few
file descriptors that were not usable for connections anymore. Let's use
pure atomic ops everywhere these counters are updated.
No backport is needed.
This commit is contained in:
parent
160287b676
commit
876b411f2b
41
src/pipe.c
41
src/pipe.c
@ -52,27 +52,27 @@ struct pipe *get_pipe()
|
|||||||
if (likely(pipes_live)) {
|
if (likely(pipes_live)) {
|
||||||
HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
|
HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
|
||||||
ret = pipes_live;
|
ret = pipes_live;
|
||||||
if (likely(ret)) {
|
if (likely(ret))
|
||||||
pipes_live = ret->next;
|
pipes_live = ret->next;
|
||||||
pipes_free--;
|
|
||||||
pipes_used++;
|
|
||||||
}
|
|
||||||
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
|
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
HA_ATOMIC_SUB(&pipes_free, 1);
|
||||||
|
HA_ATOMIC_ADD(&pipes_used, 1);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pipes_used >= global.maxpipes)
|
HA_ATOMIC_ADD(&pipes_used, 1);
|
||||||
goto out;
|
if (pipes_used + pipes_free >= global.maxpipes)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
ret = pool_alloc(pool_head_pipe);
|
ret = pool_alloc(pool_head_pipe);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
goto out;
|
goto fail;
|
||||||
|
|
||||||
|
if (pipe(pipefd) < 0)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
if (pipe(pipefd) < 0) {
|
|
||||||
pool_free(pool_head_pipe, ret);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
#ifdef F_SETPIPE_SZ
|
#ifdef F_SETPIPE_SZ
|
||||||
if (global.tune.pipesize)
|
if (global.tune.pipesize)
|
||||||
fcntl(pipefd[0], F_SETPIPE_SZ, global.tune.pipesize);
|
fcntl(pipefd[0], F_SETPIPE_SZ, global.tune.pipesize);
|
||||||
@ -81,9 +81,13 @@ struct pipe *get_pipe()
|
|||||||
ret->prod = pipefd[1];
|
ret->prod = pipefd[1];
|
||||||
ret->cons = pipefd[0];
|
ret->cons = pipefd[0];
|
||||||
ret->next = NULL;
|
ret->next = NULL;
|
||||||
HA_ATOMIC_ADD(&pipes_used, 1);
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
|
fail:
|
||||||
|
pool_free(pool_head_pipe, ret);
|
||||||
|
HA_ATOMIC_SUB(&pipes_used, 1);
|
||||||
|
return NULL;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* destroy a pipe, possibly because an error was encountered on it. Its FDs
|
/* destroy a pipe, possibly because an error was encountered on it. Its FDs
|
||||||
@ -108,21 +112,20 @@ void put_pipe(struct pipe *p)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(local_pipes_free * global.nbthread < global.maxpipes)) {
|
if (likely(local_pipes_free * global.nbthread < global.maxpipes - pipes_used)) {
|
||||||
p->next = local_pipes;
|
p->next = local_pipes;
|
||||||
local_pipes = p;
|
local_pipes = p;
|
||||||
local_pipes_free++;
|
local_pipes_free++;
|
||||||
HA_ATOMIC_ADD(&pipes_free, 1);
|
goto out;
|
||||||
HA_ATOMIC_SUB(&pipes_used, 1);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
|
HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
|
||||||
p->next = pipes_live;
|
p->next = pipes_live;
|
||||||
pipes_live = p;
|
pipes_live = p;
|
||||||
pipes_free++;
|
|
||||||
pipes_used--;
|
|
||||||
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
|
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
|
||||||
|
out:
|
||||||
|
HA_ATOMIC_ADD(&pipes_free, 1);
|
||||||
|
HA_ATOMIC_SUB(&pipes_used, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
Reference in New Issue
Block a user