mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-11-06 03:21:41 +01:00
This is the second attempt at importing the updated mt_list code (commit
59459ea3). The previous one was attempted with commit c618ed5ff4 ("MAJOR:
import: update mt_list to support exponential back-off") but revealed
problems with QUIC connections and was reverted.
The problem that was faced was that elements deleted inside an iterator
were no longer reset, and that if they were to be recycled in this form,
they could appear as busy to the next user. This was trivially reproduced
with this:
$ cat quic-repro.cfg
global
stats socket /tmp/sock1 level admin
stats timeout 1h
limited-quic
frontend stats
mode http
bind quic4@:8443 ssl crt rsa+dh2048.pem alpn h3
timeout client 5s
stats uri /
$ ./haproxy -db -f quic-repro.cfg &
$ h2load -c 10 -n 100000 --npn h3 https://127.0.0.1:8443/
=> hang
This was purely an API issue caused by the simplified usage of the macros
for the iterator. The original version had two backups (one full element
and one pointer) that the user had to take care of, while the new one only
uses one that is transparent for the user. But during removal, the element
still has to be unlocked if it's going to be reused.
All of this sparked discussions with Fred and Aurélien regarding the still
unclear state of locking. It was found that the lock API does too much at
once and is lacking granularity. The new version offers a much more fine-
grained control allowing to selectively lock/unlock an element, a link,
the rest of the list etc.
It was also found that plenty of places just want to free the current
element, or delete it to do anything with it, hence don't need to reset
its pointers (e.g. event_hdl). Finally it appeared obvious that the
root cause of the problem was the unclear usage of the list iterators
themselves because one does not necessarily expect the element to be
presented locked when not needed, which makes the unlock easy to overlook
during reviews.
The updated version of the list presents explicit lock status in the
macro name (_LOCKED or _UNLOCKED suffixes). When using the _LOCKED
suffix, the caller is expected to unlock the element if it intends to
reuse it. At least the status is advertised. The _UNLOCKED variant,
instead, always unlocks it before starting the loop block. This means
it's not necessary to think about unlocking it, though it's obviously
not usable with everything. A few _UNLOCKED were used at obvious places
(i.e. where the element is deleted and freed without any prior check).
Interestingly, the tests performed last year on QUIC forwarding, that
resulted in limited traffic for the original version and higher bit
rate for the new one couldn't be reproduced because since then the QUIC
stack has gaind in efficiency, and the 100 Gbps barrier is now reached
with or without the mt_list update. However the unit tests definitely
show a huge difference, particularly on EPYC platforms where the EBO
provides tremendous CPU savings.
Overall, the following changes are visible from the application code:
- mt_list_for_each_entry_safe() + 1 back elem + 1 back ptr
=> MT_LIST_FOR_EACH_ENTRY_LOCKED() or MT_LIST_FOR_EACH_ENTRY_UNLOCKED()
+ 1 back elem
- MT_LIST_DELETE_SAFE() no longer needed in MT_LIST_FOR_EACH_ENTRY_UNLOCKED()
=> just manually set iterator to NULL however.
For MT_LIST_FOR_EACH_ENTRY_LOCKED()
=> mt_list_unlock_self() (if element going to be reused) + NULL
- MT_LIST_LOCK_ELT => mt_list_lock_full()
- MT_LIST_UNLOCK_ELT => mt_list_unlock_full()
- l = MT_LIST_APPEND_LOCKED(h, e); MT_LIST_UNLOCK_ELT();
=> l=mt_list_lock_prev(h); mt_list_lock_elem(e); mt_list_unlock_full(e, l)
264 lines
10 KiB
C
264 lines
10 KiB
C
/*
|
|
* include/haproxy/list.h
|
|
* Circular list manipulation macros and functions.
|
|
*
|
|
* Copyright (C) 2002-2020 Willy Tarreau - w@1wt.eu
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
* exclusively.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#ifndef _HAPROXY_LIST_H
|
|
#define _HAPROXY_LIST_H
|
|
|
|
#include <haproxy/api.h>
|
|
#include <haproxy/thread.h>
|
|
#include <import/mt_list.h>
|
|
|
|
/* First undefine some macros which happen to also be defined on OpenBSD,
|
|
* in sys/queue.h, used by sys/event.h
|
|
*/
|
|
#undef LIST_HEAD
|
|
#undef LIST_INIT
|
|
#undef LIST_NEXT
|
|
|
|
/* ILH = Initialized List Head : used to prevent gcc from moving an empty
|
|
* list to BSS. Some older version tend to trim all the array and cause
|
|
* corruption.
|
|
*/
|
|
#define ILH { .n = (struct list *)1, .p = (struct list *)2 }
|
|
|
|
#define LIST_HEAD(a) ((void *)(&(a)))
|
|
|
|
#define LIST_INIT(l) ((l)->n = (l)->p = (l))
|
|
|
|
#define LIST_HEAD_INIT(l) { &l, &l }
|
|
|
|
/* adds an element at the beginning of a list ; returns the element */
|
|
#define LIST_INSERT(lh, el) ({ (el)->n = (lh)->n; (el)->n->p = (lh)->n = (el); (el)->p = (lh); (el); })
|
|
|
|
/* adds an element at the end of a list ; returns the element */
|
|
#define LIST_APPEND(lh, el) ({ (el)->p = (lh)->p; (el)->p->n = (lh)->p = (el); (el)->n = (lh); (el); })
|
|
|
|
/* adds the contents of a list <old> at the beginning of another list <new>. The old list head remains untouched. */
|
|
#define LIST_SPLICE(new, old) do { \
|
|
if (!LIST_ISEMPTY(old)) { \
|
|
(old)->p->n = (new)->n; (old)->n->p = (new); \
|
|
(new)->n->p = (old)->p; (new)->n = (old)->n; \
|
|
} \
|
|
} while (0)
|
|
|
|
/* adds the contents of a list whose first element is <old> and last one is
|
|
* <old->prev> at the end of another list <new>. The old list DOES NOT have
|
|
* any head here.
|
|
*/
|
|
#define LIST_SPLICE_END_DETACHED(new, old) do { \
|
|
typeof(new) __t; \
|
|
(new)->p->n = (old); \
|
|
(old)->p->n = (new); \
|
|
__t = (old)->p; \
|
|
(old)->p = (new)->p; \
|
|
(new)->p = __t; \
|
|
} while (0)
|
|
|
|
/* removes an element from a list and returns it */
|
|
#if defined(DEBUG_LIST)
|
|
/* purposely corrupt the detached element to detect use-after-delete */
|
|
#define LIST_DELETE(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; *(__ret) = (struct list)ILH; (__ret);})
|
|
#else
|
|
#define LIST_DELETE(el) ({ typeof(el) __ret = (el); (el)->n->p = (el)->p; (el)->p->n = (el)->n; (__ret); })
|
|
#endif
|
|
|
|
/* removes an element from a list, initializes it and returns it.
|
|
* This is faster than LIST_DELETE+LIST_INIT as we avoid reloading the pointers.
|
|
*/
|
|
#define LIST_DEL_INIT(el) ({ \
|
|
typeof(el) __ret = (el); \
|
|
typeof(__ret->n) __n = __ret->n; \
|
|
typeof(__ret->p) __p = __ret->p; \
|
|
__n->p = __p; __p->n = __n; \
|
|
__ret->n = __ret->p = __ret; \
|
|
__ret; \
|
|
})
|
|
|
|
/* returns a pointer of type <pt> to a structure containing a list head called
|
|
* <el> at address <lh>. Note that <lh> can be the result of a function or macro
|
|
* since it's used only once.
|
|
* Example: LIST_ELEM(cur_node->args.next, struct node *, args)
|
|
*/
|
|
#define LIST_ELEM(lh, pt, el) ((pt)(((const char *)(lh)) - ((size_t)&((pt)NULL)->el)))
|
|
|
|
/* checks if the list head <lh> is empty or not */
|
|
#define LIST_ISEMPTY(lh) ((lh)->n == (lh))
|
|
|
|
/* checks if the list element <el> was added to a list or not. This only
|
|
* works when detached elements are reinitialized (using LIST_DEL_INIT)
|
|
*/
|
|
#define LIST_INLIST(el) ((el)->n != (el))
|
|
|
|
/* checks if the list element <el> has the same prev and next, i.e. it's either
|
|
* detached or alone in a list since (it points to itself or to a single other
|
|
* node). One can check that an element is strictly attached and alone by
|
|
* combining this with LIST_INLIST().
|
|
*/
|
|
#define LIST_ATMOST1(el) ((el)->n == (el)->p)
|
|
|
|
/* atomically checks if the list element's next pointer points to anything
|
|
* different from itself, implying the element should be part of a list. This
|
|
* usually is similar to LIST_INLIST() except that while that one might be
|
|
* instrumented using debugging code to perform further consistency checks,
|
|
* the macro below guarantees to always perform a single atomic test and is
|
|
* safe to use with barriers.
|
|
*/
|
|
#define LIST_INLIST_ATOMIC(el) ({ \
|
|
typeof(el) __ptr = (el); \
|
|
HA_ATOMIC_LOAD(&(__ptr)->n) != __ptr; \
|
|
})
|
|
|
|
/* returns a pointer of type <pt> to a structure following the element
|
|
* which contains list head <lh>, which is known as element <el> in
|
|
* struct pt.
|
|
* Example: LIST_NEXT(args, struct node *, list)
|
|
*/
|
|
#define LIST_NEXT(lh, pt, el) (LIST_ELEM((lh)->n, pt, el))
|
|
|
|
|
|
/* returns a pointer of type <pt> to a structure preceding the element
|
|
* which contains list head <lh>, which is known as element <el> in
|
|
* struct pt.
|
|
*/
|
|
#undef LIST_PREV
|
|
#define LIST_PREV(lh, pt, el) (LIST_ELEM((lh)->p, pt, el))
|
|
|
|
/*
|
|
* Simpler FOREACH_ITEM macro inspired from Linux sources.
|
|
* Iterates <item> through a list of items of type "typeof(*item)" which are
|
|
* linked via a "struct list" member named <member>. A pointer to the head of
|
|
* the list is passed in <list_head>. No temporary variable is needed. Note
|
|
* that <item> must not be modified during the loop.
|
|
* Example: list_for_each_entry(cur_acl, known_acl, list) { ... };
|
|
*/
|
|
#define list_for_each_entry(item, list_head, member) \
|
|
for (item = LIST_ELEM((list_head)->n, typeof(item), member); \
|
|
&item->member != (list_head); \
|
|
item = LIST_ELEM(item->member.n, typeof(item), member))
|
|
|
|
/*
|
|
* Same as list_for_each_entry but starting from current point
|
|
* Iterates <item> through the list starting from <item>
|
|
* It's basically the same macro but without initializing item to the head of
|
|
* the list.
|
|
*/
|
|
#define list_for_each_entry_from(item, list_head, member) \
|
|
for ( ; &item->member != (list_head); \
|
|
item = LIST_ELEM(item->member.n, typeof(item), member))
|
|
|
|
/*
|
|
* Simpler FOREACH_ITEM_SAFE macro inspired from Linux sources.
|
|
* Iterates <item> through a list of items of type "typeof(*item)" which are
|
|
* linked via a "struct list" member named <member>. A pointer to the head of
|
|
* the list is passed in <list_head>. A temporary variable <back> of same type
|
|
* as <item> is needed so that <item> may safely be deleted if needed.
|
|
* Example: list_for_each_entry_safe(cur_acl, tmp, known_acl, list) { ... };
|
|
*/
|
|
#define list_for_each_entry_safe(item, back, list_head, member) \
|
|
for (item = LIST_ELEM((list_head)->n, typeof(item), member), \
|
|
back = LIST_ELEM(item->member.n, typeof(item), member); \
|
|
&item->member != (list_head); \
|
|
item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
|
|
|
|
|
|
/*
|
|
* Same as list_for_each_entry_safe but starting from current point
|
|
* Iterates <item> through the list starting from <item>
|
|
* It's basically the same macro but without initializing item to the head of
|
|
* the list.
|
|
*/
|
|
#define list_for_each_entry_safe_from(item, back, list_head, member) \
|
|
for (back = LIST_ELEM(item->member.n, typeof(item), member); \
|
|
&item->member != (list_head); \
|
|
item = back, back = LIST_ELEM(back->member.n, typeof(back), member))
|
|
|
|
/*
|
|
* Iterate backwards <item> through a list of items of type "typeof(*item)"
|
|
* which are linked via a "struct list" member named <member>. A pointer to
|
|
* the head of the list is passed in <list_head>. No temporary variable is
|
|
* needed. Note that <item> must not be modified during the loop.
|
|
* Example: list_for_each_entry_rev(cur_acl, known_acl, list) { ... };
|
|
*/
|
|
#define list_for_each_entry_rev(item, list_head, member) \
|
|
for (item = LIST_ELEM((list_head)->p, typeof(item), member); \
|
|
&item->member != (list_head); \
|
|
item = LIST_ELEM(item->member.p, typeof(item), member))
|
|
|
|
/*
|
|
* Same as list_for_each_entry_rev but starting from current point
|
|
* Iterate backwards <item> through the list starting from <item>
|
|
* It's basically the same macro but without initializing item to the head of
|
|
* the list.
|
|
*/
|
|
#define list_for_each_entry_from_rev(item, list_head, member) \
|
|
for ( ; &item->member != (list_head); \
|
|
item = LIST_ELEM(item->member.p, typeof(item), member))
|
|
|
|
/*
|
|
* Iterate backwards <item> through a list of items of type "typeof(*item)"
|
|
* which are linked via a "struct list" member named <member>. A pointer to
|
|
* the head of the list is passed in <list_head>. A temporary variable <back>
|
|
* of same type as <item> is needed so that <item> may safely be deleted
|
|
* if needed.
|
|
* Example: list_for_each_entry_safe_rev(cur_acl, tmp, known_acl, list) { ... };
|
|
*/
|
|
#define list_for_each_entry_safe_rev(item, back, list_head, member) \
|
|
for (item = LIST_ELEM((list_head)->p, typeof(item), member), \
|
|
back = LIST_ELEM(item->member.p, typeof(item), member); \
|
|
&item->member != (list_head); \
|
|
item = back, back = LIST_ELEM(back->member.p, typeof(back), member))
|
|
|
|
/*
|
|
* Same as list_for_each_entry_safe_rev but starting from current point
|
|
* Iterate backwards <item> through the list starting from <item>
|
|
* It's basically the same macro but without initializing item to the head of
|
|
* the list.
|
|
*/
|
|
#define list_for_each_entry_safe_from_rev(item, back, list_head, member) \
|
|
for (back = LIST_ELEM(item->member.p, typeof(item), member); \
|
|
&item->member != (list_head); \
|
|
item = back, back = LIST_ELEM(back->member.p, typeof(back), member))
|
|
|
|
static __inline struct list *mt_list_to_list(struct mt_list *list)
|
|
{
|
|
union {
|
|
struct mt_list *mt_list;
|
|
struct list *list;
|
|
} mylist;
|
|
|
|
mylist.mt_list = list;
|
|
return mylist.list;
|
|
}
|
|
|
|
static __inline struct mt_list *list_to_mt_list(struct list *list)
|
|
{
|
|
union {
|
|
struct mt_list *mt_list;
|
|
struct list *list;
|
|
} mylist;
|
|
|
|
mylist.list = list;
|
|
return mylist.mt_list;
|
|
|
|
}
|
|
|
|
#endif /* _HAPROXY_LIST_H */
|