BUG/MEDIUM: pipe/thread: fix atomicity of pipe counters

Previous patch 160287b676 ("MEDIUM: pipe/thread: maintain a per-thread
local cache of recently used pipes") didn't replace all pipe counter
updates with atomic ops since some were already under a lock, which is
obviously not a valid reason since these ones can be updated in parallel
to other atomic ops. The result was that the pipes_used could seldom be
seen as negative in the stats (harmless) but also this could result in
slightly more pipes being allocated than permitted, thus stealing a few
file descriptors that were not usable for connections anymore. Let's use
pure atomic ops everywhere these counters are updated.

No backport is needed.
This commit is contained in:
Willy Tarreau 2020-01-30 09:15:37 +01:00
parent 160287b676
commit 876b411f2b

View File

@ -52,27 +52,27 @@ struct pipe *get_pipe()
if (likely(pipes_live)) {
HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
ret = pipes_live;
if (likely(ret)) {
if (likely(ret))
pipes_live = ret->next;
pipes_free--;
pipes_used++;
}
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
if (ret)
if (ret) {
HA_ATOMIC_SUB(&pipes_free, 1);
HA_ATOMIC_ADD(&pipes_used, 1);
goto out;
}
}
if (pipes_used >= global.maxpipes)
goto out;
HA_ATOMIC_ADD(&pipes_used, 1);
if (pipes_used + pipes_free >= global.maxpipes)
goto fail;
ret = pool_alloc(pool_head_pipe);
if (!ret)
goto out;
goto fail;
if (pipe(pipefd) < 0)
goto fail;
if (pipe(pipefd) < 0) {
pool_free(pool_head_pipe, ret);
goto out;
}
#ifdef F_SETPIPE_SZ
if (global.tune.pipesize)
fcntl(pipefd[0], F_SETPIPE_SZ, global.tune.pipesize);
@ -81,9 +81,13 @@ struct pipe *get_pipe()
ret->prod = pipefd[1];
ret->cons = pipefd[0];
ret->next = NULL;
HA_ATOMIC_ADD(&pipes_used, 1);
out:
return ret;
fail:
pool_free(pool_head_pipe, ret);
HA_ATOMIC_SUB(&pipes_used, 1);
return NULL;
}
/* destroy a pipe, possibly because an error was encountered on it. Its FDs
@ -108,21 +112,20 @@ void put_pipe(struct pipe *p)
return;
}
if (likely(local_pipes_free * global.nbthread < global.maxpipes)) {
if (likely(local_pipes_free * global.nbthread < global.maxpipes - pipes_used)) {
p->next = local_pipes;
local_pipes = p;
local_pipes_free++;
HA_ATOMIC_ADD(&pipes_free, 1);
HA_ATOMIC_SUB(&pipes_used, 1);
return;
goto out;
}
HA_SPIN_LOCK(PIPES_LOCK, &pipes_lock);
p->next = pipes_live;
pipes_live = p;
pipes_free++;
pipes_used--;
HA_SPIN_UNLOCK(PIPES_LOCK, &pipes_lock);
out:
HA_ATOMIC_ADD(&pipes_free, 1);
HA_ATOMIC_SUB(&pipes_used, 1);
}
/*