MEDIUM: dynbuf: remove last usages of b_alloc_margin()

The function's purpose used to be to fail a buffer allocation if that
allocation wouldn't result in leaving some buffers available. Thus,
some allocations could succeed and others fail for the sole purpose of
trying to provide 2 buffers at once to process_stream(). But things
have changed a lot with 1.7 breaking the promise that process_stream()
would always succeed with only two buffers, and later the thread-local
pool caches that keep certain buffers available that are not accounted
for in the global pool so that local allocators cannot guess anything
from the number of currently available pools.

Let's just replace all last uses of b_alloc_margin() with b_alloc() once
for all.
This commit is contained in:
Willy Tarreau 2021-03-22 14:44:31 +01:00
parent 0f495b3d87
commit d68d4f1002
7 changed files with 20 additions and 20 deletions

View File

@ -48,7 +48,7 @@ int appctx_buf_available(void *arg)
return 0;
/* allocation possible now ? */
if (!b_alloc_margin(&si_ic(si)->buf, global.tune.reserved_bufs)) {
if (!b_alloc(&si_ic(si)->buf)) {
si_rx_buff_blk(si);
return 0;
}

View File

@ -994,12 +994,12 @@ int check_buf_available(void *target)
{
struct check *check = target;
if ((check->state & CHK_ST_IN_ALLOC) && b_alloc_margin(&check->bi, 0)) {
if ((check->state & CHK_ST_IN_ALLOC) && b_alloc(&check->bi)) {
check->state &= ~CHK_ST_IN_ALLOC;
tasklet_wakeup(check->wait_list.tasklet);
return 1;
}
if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc_margin(&check->bo, 0)) {
if ((check->state & CHK_ST_OUT_ALLOC) && b_alloc(&check->bo)) {
check->state &= ~CHK_ST_OUT_ALLOC;
tasklet_wakeup(check->wait_list.tasklet);
return 1;
@ -1016,7 +1016,7 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&check->buf_wait.list)) &&
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
unlikely((buf = b_alloc(bptr)) == NULL)) {
check->buf_wait.target = check;
check->buf_wait.wakeup_cb = check_buf_available;
LIST_ADDQ(&ti->buffer_wq, &check->buf_wait.list);

View File

@ -2837,7 +2837,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
if (LIST_ADDED(&buffer_wait->list))
LIST_DEL_INIT(&buffer_wait->list);
if (b_alloc_margin(buf, global.tune.reserved_bufs))
if (b_alloc(buf))
return 1;
LIST_ADDQ(&ti->buffer_wq, &buffer_wait->list);

View File

@ -570,14 +570,14 @@ static int fcgi_buf_available(void *target)
struct fcgi_conn *fconn = target;
struct fcgi_strm *fstrm;
if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc_margin(&fconn->dbuf, 0)) {
if ((fconn->flags & FCGI_CF_DEM_DALLOC) && b_alloc(&fconn->dbuf)) {
TRACE_STATE("unblocking fconn, dbuf allocated", FCGI_EV_FCONN_RECV|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
fconn->flags &= ~FCGI_CF_DEM_DALLOC;
fcgi_conn_restart_reading(fconn, 1);
return 1;
}
if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc_margin(br_tail(fconn->mbuf), 0)) {
if ((fconn->flags & FCGI_CF_MUX_MALLOC) && b_alloc(br_tail(fconn->mbuf))) {
TRACE_STATE("unblocking fconn, mbuf allocated", FCGI_EV_FCONN_SEND|FCGI_EV_FCONN_BLK|FCGI_EV_FCONN_WAKE, fconn->conn);
fconn->flags &= ~FCGI_CF_MUX_MALLOC;
if (fconn->flags & FCGI_CF_DEM_MROOM) {
@ -589,7 +589,7 @@ static int fcgi_buf_available(void *target)
if ((fconn->flags & FCGI_CF_DEM_SALLOC) &&
(fstrm = fcgi_conn_st_by_id(fconn, fconn->dsi)) && fstrm->cs &&
b_alloc_margin(&fstrm->rxbuf, 0)) {
b_alloc(&fstrm->rxbuf)) {
TRACE_STATE("unblocking fstrm, rxbuf allocated", FCGI_EV_STRM_RECV|FCGI_EV_FSTRM_BLK|FCGI_EV_STRM_WAKE, fconn->conn, fstrm);
fconn->flags &= ~FCGI_CF_DEM_SALLOC;
fcgi_conn_restart_reading(fconn, 1);
@ -605,7 +605,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&fconn->buf_wait.list)) &&
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
unlikely((buf = b_alloc(bptr)) == NULL)) {
fconn->buf_wait.target = fconn;
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
LIST_ADDQ(&ti->buffer_wq, &fconn->buf_wait.list);

View File

@ -415,7 +415,7 @@ static int h1_buf_available(void *target)
{
struct h1c *h1c = target;
if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc_margin(&h1c->ibuf, 0)) {
if ((h1c->flags & H1C_F_IN_ALLOC) && b_alloc(&h1c->ibuf)) {
TRACE_STATE("unblocking h1c, ibuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
h1c->flags &= ~H1C_F_IN_ALLOC;
if (h1_recv_allowed(h1c))
@ -423,7 +423,7 @@ static int h1_buf_available(void *target)
return 1;
}
if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc_margin(&h1c->obuf, 0)) {
if ((h1c->flags & H1C_F_OUT_ALLOC) && b_alloc(&h1c->obuf)) {
TRACE_STATE("unblocking h1s, obuf allocated", H1_EV_TX_DATA|H1_EV_H1S_BLK|H1_EV_STRM_WAKE, h1c->conn, h1c->h1s);
h1c->flags &= ~H1C_F_OUT_ALLOC;
if (h1c->h1s)
@ -431,7 +431,7 @@ static int h1_buf_available(void *target)
return 1;
}
if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc_margin(&h1c->h1s->rxbuf, 0)) {
if ((h1c->flags & H1C_F_IN_SALLOC) && h1c->h1s && b_alloc(&h1c->h1s->rxbuf)) {
TRACE_STATE("unblocking h1c, stream rxbuf allocated", H1_EV_H1C_RECV|H1_EV_H1C_BLK|H1_EV_H1C_WAKE, h1c->conn);
h1c->flags &= ~H1C_F_IN_SALLOC;
tasklet_wakeup(h1c->wait_event.tasklet);
@ -449,7 +449,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&h1c->buf_wait.list)) &&
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
unlikely((buf = b_alloc(bptr)) == NULL)) {
h1c->buf_wait.target = h1c;
h1c->buf_wait.wakeup_cb = h1_buf_available;
LIST_ADDQ(&ti->buffer_wq, &h1c->buf_wait.list);

View File

@ -775,13 +775,13 @@ static int h2_buf_available(void *target)
struct h2c *h2c = target;
struct h2s *h2s;
if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc_margin(&h2c->dbuf, 0)) {
if ((h2c->flags & H2_CF_DEM_DALLOC) && b_alloc(&h2c->dbuf)) {
h2c->flags &= ~H2_CF_DEM_DALLOC;
h2c_restart_reading(h2c, 1);
return 1;
}
if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc_margin(br_tail(h2c->mbuf), 0)) {
if ((h2c->flags & H2_CF_MUX_MALLOC) && b_alloc(br_tail(h2c->mbuf))) {
h2c->flags &= ~H2_CF_MUX_MALLOC;
if (h2c->flags & H2_CF_DEM_MROOM) {
@ -793,7 +793,7 @@ static int h2_buf_available(void *target)
if ((h2c->flags & H2_CF_DEM_SALLOC) &&
(h2s = h2c_st_by_id(h2c, h2c->dsi)) && h2s->cs &&
b_alloc_margin(&h2s->rxbuf, 0)) {
b_alloc(&h2s->rxbuf)) {
h2c->flags &= ~H2_CF_DEM_SALLOC;
h2c_restart_reading(h2c, 1);
return 1;
@ -807,7 +807,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
struct buffer *buf = NULL;
if (likely(!LIST_ADDED(&h2c->buf_wait.list)) &&
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
unlikely((buf = b_alloc(bptr)) == NULL)) {
h2c->buf_wait.target = h2c;
h2c->buf_wait.wakeup_cb = h2_buf_available;
LIST_ADDQ(&ti->buffer_wq, &h2c->buf_wait.list);

View File

@ -329,10 +329,10 @@ int stream_buf_available(void *arg)
struct stream *s = arg;
if (!s->req.buf.size && !s->req.pipe && (s->si[0].flags & SI_FL_RXBLK_BUFF) &&
b_alloc_margin(&s->req.buf, global.tune.reserved_bufs))
b_alloc(&s->req.buf))
si_rx_buff_rdy(&s->si[0]);
else if (!s->res.buf.size && !s->res.pipe && (s->si[1].flags & SI_FL_RXBLK_BUFF) &&
b_alloc_margin(&s->res.buf, 0))
b_alloc(&s->res.buf))
si_rx_buff_rdy(&s->si[1]);
else
return 0;
@ -772,7 +772,7 @@ static int stream_alloc_work_buffer(struct stream *s)
if (LIST_ADDED(&s->buffer_wait.list))
LIST_DEL_INIT(&s->buffer_wait.list);
if (b_alloc_margin(&s->res.buf, 0))
if (b_alloc(&s->res.buf))
return 1;
LIST_ADDQ(&ti->buffer_wq, &s->buffer_wait.list);