mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-18 13:06:58 +02:00
It looked strange to see pool_evict_from_cache() always very present
on "perf top", but there was actually a reason to this: while b_free()
uses pool_free() which properly disposes the buffer into the local cache
and b_alloc_fast() allocates using pool_get_first() which considers the
local cache, b_alloc_margin() does not consider the local cache as it
only uses __pool_get_first() which only allocates from the shared pools.
The impact is that basically everywhere a buffer is allocated (muxes,
streams, applets), it's always picked from the shared pool (hence
involves locking) and is released to the local one and makes it grow
until it's required to trigger a flush using pool_evict_from_cache().
Buffers usage are thus not thread-local at all, and cause eviction of
a lot of possibly useful objects from the local caches.
Just fixing this results in a 10% request rate increase in an HTTP/1 test
on a 16-thread machine.
This bug was caused by recent commit ed891fd
("MEDIUM: memory: make local
pools independent on lockless pools") merged into 2.2-dev9, so not backport
is needed.
214 lines
6.0 KiB
C
214 lines
6.0 KiB
C
/*
|
|
* include/haproxy/dynbuf.h
|
|
* Buffer management functions.
|
|
*
|
|
* Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
* exclusively.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#ifndef _HAPROXY_DYNBUF_H
|
|
#define _HAPROXY_DYNBUF_H
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include <import/ist.h>
|
|
#include <haproxy/activity.h>
|
|
#include <haproxy/api.h>
|
|
#include <haproxy/buf.h>
|
|
#include <haproxy/chunk.h>
|
|
#include <haproxy/dynbuf-t.h>
|
|
#include <haproxy/pool.h>
|
|
|
|
extern struct pool_head *pool_head_buffer;
|
|
extern struct mt_list buffer_wq;
|
|
|
|
int init_buffer();
|
|
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
|
|
|
|
/*****************************************************************/
|
|
/* These functions are used to compute various buffer area sizes */
|
|
/*****************************************************************/
|
|
|
|
/* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
|
|
static inline int buffer_almost_full(const struct buffer *buf)
|
|
{
|
|
if (b_is_null(buf))
|
|
return 0;
|
|
|
|
return b_almost_full(buf);
|
|
}
|
|
|
|
/**************************************************/
|
|
/* Functions below are used for buffer allocation */
|
|
/**************************************************/
|
|
|
|
/* Allocates a buffer and assigns it to *buf. If no memory is available,
|
|
* ((char *)1) is assigned instead with a zero size. No control is made to
|
|
* check if *buf already pointed to another buffer. The allocated buffer is
|
|
* returned, or NULL in case no memory is available.
|
|
*/
|
|
static inline struct buffer *b_alloc(struct buffer *buf)
|
|
{
|
|
char *area;
|
|
|
|
*buf = BUF_WANTED;
|
|
area = pool_alloc_dirty(pool_head_buffer);
|
|
if (unlikely(!area)) {
|
|
activity[tid].buf_wait++;
|
|
return NULL;
|
|
}
|
|
|
|
buf->area = area;
|
|
buf->size = pool_head_buffer->size;
|
|
return buf;
|
|
}
|
|
|
|
/* Allocates a buffer and assigns it to *buf. If no memory is available,
|
|
* ((char *)1) is assigned instead with a zero size. No control is made to
|
|
* check if *buf already pointed to another buffer. The allocated buffer is
|
|
* returned, or NULL in case no memory is available. The difference with
|
|
* b_alloc() is that this function only picks from the pool and never calls
|
|
* malloc(), so it can fail even if some memory is available.
|
|
*/
|
|
static inline struct buffer *b_alloc_fast(struct buffer *buf)
|
|
{
|
|
char *area;
|
|
|
|
*buf = BUF_WANTED;
|
|
area = pool_get_first(pool_head_buffer);
|
|
if (unlikely(!area))
|
|
return NULL;
|
|
|
|
buf->area = area;
|
|
buf->size = pool_head_buffer->size;
|
|
return buf;
|
|
}
|
|
|
|
/* Releases buffer <buf> (no check of emptiness). The buffer's head is marked
|
|
* empty.
|
|
*/
|
|
static inline void __b_free(struct buffer *buf)
|
|
{
|
|
char *area = buf->area;
|
|
|
|
/* let's first clear the area to save an occasional "show sess all"
|
|
* glancing over our shoulder from getting a dangling pointer.
|
|
*/
|
|
*buf = BUF_NULL;
|
|
__ha_barrier_store();
|
|
pool_free(pool_head_buffer, area);
|
|
}
|
|
|
|
/* Releases buffer <buf> if allocated, and marks it empty. */
|
|
static inline void b_free(struct buffer *buf)
|
|
{
|
|
if (buf->size)
|
|
__b_free(buf);
|
|
}
|
|
|
|
/* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
|
|
* there are still at least <margin> buffers available in the pool after this
|
|
* allocation so that we don't leave the pool in a condition where a session or
|
|
* a response buffer could not be allocated anymore, resulting in a deadlock.
|
|
* This means that we sometimes need to try to allocate extra entries even if
|
|
* only one buffer is needed.
|
|
*
|
|
* We need to lock the pool here to be sure to have <margin> buffers available
|
|
* after the allocation, regardless how many threads that doing it in the same
|
|
* time. So, we use internal and lockless memory functions (prefixed with '__').
|
|
*/
|
|
static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
|
|
{
|
|
char *area;
|
|
ssize_t idx __maybe_unused;
|
|
unsigned int cached;
|
|
|
|
if (buf->size)
|
|
return buf;
|
|
|
|
cached = 0;
|
|
#ifdef CONFIG_HAP_LOCAL_POOLS
|
|
if (likely(area = __pool_get_from_cache(pool_head_buffer)))
|
|
goto done;
|
|
|
|
idx = pool_get_index(pool_head_buffer);
|
|
if (idx >= 0)
|
|
cached = pool_cache[tid][idx].count;
|
|
#endif
|
|
|
|
*buf = BUF_WANTED;
|
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
|
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
|
|
#endif
|
|
|
|
/* fast path */
|
|
if ((pool_head_buffer->allocated - pool_head_buffer->used + cached) > margin) {
|
|
area = __pool_get_first(pool_head_buffer);
|
|
if (likely(area)) {
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
|
#endif
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
/* slow path, uses malloc() */
|
|
area = __pool_refill_alloc(pool_head_buffer, margin);
|
|
|
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
|
#endif
|
|
|
|
if (unlikely(!area)) {
|
|
activity[tid].buf_wait++;
|
|
return NULL;
|
|
}
|
|
|
|
done:
|
|
buf->area = area;
|
|
buf->size = pool_head_buffer->size;
|
|
return buf;
|
|
}
|
|
|
|
|
|
/* Offer a buffer currently belonging to target <from> to whoever needs one.
|
|
* Any pointer is valid for <from>, including NULL. Its purpose is to avoid
|
|
* passing a buffer to oneself in case of failed allocations (e.g. need two
|
|
* buffers, get one, fail, release it and wake up self again). In case of
|
|
* normal buffer release where it is expected that the caller is not waiting
|
|
* for a buffer, NULL is fine.
|
|
*/
|
|
void __offer_buffer(void *from, unsigned int threshold);
|
|
|
|
static inline void offer_buffers(void *from, unsigned int threshold)
|
|
{
|
|
if (!MT_LIST_ISEMPTY(&buffer_wq))
|
|
__offer_buffer(from, threshold);
|
|
}
|
|
|
|
|
|
#endif /* _HAPROXY_DYNBUF_H */
|
|
|
|
/*
|
|
* Local variables:
|
|
* c-indent-level: 8
|
|
* c-basic-offset: 8
|
|
* End:
|
|
*/
|