Compare commits

..

No commits in common. "master" and "v3.3-dev4" have entirely different histories.

95 changed files with 393 additions and 1445 deletions

67
.github/matrix.py vendored
View File

@ -125,11 +125,9 @@ def main(ref_name):
# Ubuntu # Ubuntu
if "haproxy-" in ref_name: if "haproxy-" in ref_name:
os = "ubuntu-24.04" # stable branch os = "ubuntu-24.04" # stable branch
os_arm = "ubuntu-24.04-arm" # stable branch
else: else:
os = "ubuntu-24.04" # development branch os = "ubuntu-24.04" # development branch
os_arm = "ubuntu-24.04-arm" # development branch
TARGET = "linux-glibc" TARGET = "linux-glibc"
for CC in ["gcc", "clang"]: for CC in ["gcc", "clang"]:
@ -174,37 +172,36 @@ def main(ref_name):
# ASAN # ASAN
for os_asan in [os, os_arm]: matrix.append(
matrix.append( {
{ "name": "{}, {}, ASAN, all features".format(os, CC),
"name": "{}, {}, ASAN, all features".format(os_asan, CC), "os": os,
"os": os_asan, "TARGET": TARGET,
"TARGET": TARGET, "CC": CC,
"CC": CC, "FLAGS": [
"FLAGS": [ "USE_OBSOLETE_LINKER=1",
"USE_OBSOLETE_LINKER=1", 'ARCH_FLAGS="-g -fsanitize=address"',
'ARCH_FLAGS="-g -fsanitize=address"', 'OPT_CFLAGS="-O1"',
'OPT_CFLAGS="-O1"', "USE_ZLIB=1",
"USE_ZLIB=1", "USE_OT=1",
"USE_OT=1", "OT_INC=${HOME}/opt-ot/include",
"OT_INC=${HOME}/opt-ot/include", "OT_LIB=${HOME}/opt-ot/lib",
"OT_LIB=${HOME}/opt-ot/lib", "OT_RUNPATH=1",
"OT_RUNPATH=1", "USE_PCRE2=1",
"USE_PCRE2=1", "USE_PCRE2_JIT=1",
"USE_PCRE2_JIT=1", "USE_LUA=1",
"USE_LUA=1", "USE_OPENSSL=1",
"USE_OPENSSL=1", "USE_WURFL=1",
"USE_WURFL=1", "WURFL_INC=addons/wurfl/dummy",
"WURFL_INC=addons/wurfl/dummy", "WURFL_LIB=addons/wurfl/dummy",
"WURFL_LIB=addons/wurfl/dummy", "USE_DEVICEATLAS=1",
"USE_DEVICEATLAS=1", "DEVICEATLAS_SRC=addons/deviceatlas/dummy",
"DEVICEATLAS_SRC=addons/deviceatlas/dummy", "USE_PROMEX=1",
"USE_PROMEX=1", "USE_51DEGREES=1",
"USE_51DEGREES=1", "51DEGREES_SRC=addons/51degrees/dummy/pattern",
"51DEGREES_SRC=addons/51degrees/dummy/pattern", ],
], }
} )
)
for compression in ["USE_ZLIB=1"]: for compression in ["USE_ZLIB=1"]:
matrix.append( matrix.append(

View File

@ -76,7 +76,7 @@ jobs:
uses: actions/cache@v4 uses: actions/cache@v4
with: with:
path: '~/opt-ot/' path: '~/opt-ot/'
key: ${{ matrix.os }}-ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }} key: ot-${{ matrix.CC }}-${{ env.OT_CPP_VERSION }}-${{ contains(matrix.name, 'ASAN') }}
- name: Install apt dependencies - name: Install apt dependencies
if: ${{ startsWith(matrix.os, 'ubuntu-') }} if: ${{ startsWith(matrix.os, 'ubuntu-') }}
run: | run: |

View File

@ -1,48 +1,6 @@
ChangeLog : ChangeLog :
=========== ===========
2025/08/06 : 3.3-dev6
- MINOR: acme: implement traces
- BUG/MINOR: hlua: take default-path into account with lua-load-per-thread
- CLEANUP: counters: rename counters_be_shared_init to counters_be_shared_prepare
- MINOR: clock: make global_now_ms a pointer
- MINOR: clock: make global_now_ns a pointer as well
- MINOR: mux-quic: release conn after shutdown on BE reuse failure
- MINOR: session: strengthen connection attach to session
- MINOR: session: remove redundant target argument from session_add_conn()
- MINOR: session: strengthen idle conn limit check
- MINOR: session: do not release conn in session_check_idle_conn()
- MINOR: session: streamline session_check_idle_conn() usage
- MINOR: muxes: refactor private connection detach
- BUG/MEDIUM: mux-quic: ensure Early-data header is set
- BUILD: acme: avoid declaring TRACE_SOURCE in acme-t.h
- MINOR: acme: emit a log for DNS-01 challenge response
- MINOR: acme: emit the DNS-01 challenge details on the dpapi sink
- MEDIUM: acme: allow to wait and restart the task for DNS-01
- MINOR: acme: update the log for DNS-01
- BUG/MINOR: acme: possible integer underflow in acme_txt_record()
- BUG/MEDIUM: hlua_fcn: ensure systematic watcher cleanup for server list iterator
- MINOR: sample: Add le2dec (little endian to decimal) sample fetch
- BUILD: fcgi: fix the struct name of fcgi_flt_ctx
- BUILD: compat: provide relaxed versions of the MIN/MAX macros
- BUILD: quic: use _MAX() to avoid build issues in pools declarations
- BUILD: compat: always set _POSIX_VERSION to ease comparisons
- MINOR: implement ha_aligned_alloc() to return aligned memory areas
- MINOR: pools: support creating a pool from a pool registration
- MINOR: pools: add a new flag to declare static registrations
- MINOR: pools: force the name at creation time to be a const.
- MEDIUM: pools: change the static pool creation to pass a registration
- DEBUG: pools: store the pool registration file name and line number
- DEBUG: pools: also retrieve file and line for direct callers of create_pool()
- MEDIUM: pools: add an alignment property
- MINOR: pools: add macros to register aligned pools
- MINOR: pools: add macros to declare pools based on a struct type
- MEDIUM: pools: respect pool alignment in allocations
2025/07/28 : 3.3-dev5
- BUG/MEDIUM: queue/stats: also use stream_set_srv_target() for pendconns
- DOC: list missing global QUIC settings
2025/07/26 : 3.3-dev4 2025/07/26 : 3.3-dev4
- CLEANUP: server: do not check for duplicates anymore in findserver() - CLEANUP: server: do not check for duplicates anymore in findserver()
- REORG: server: move findserver() from proxy.c to server.c - REORG: server: move findserver() from proxy.c to server.c

View File

@ -62,7 +62,6 @@
# USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only. # USE_MEMORY_PROFILING : enable the memory profiler. Linux-glibc only.
# USE_LIBATOMIC : force to link with/without libatomic. Automatic. # USE_LIBATOMIC : force to link with/without libatomic. Automatic.
# USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours # USE_PTHREAD_EMULATION : replace pthread's rwlocks with ours
# USE_SHM_OPEN : use shm_open() for features that can make use of shared memory
# #
# Options can be forced by specifying "USE_xxx=1" or can be disabled by using # Options can be forced by specifying "USE_xxx=1" or can be disabled by using
# "USE_xxx=" (empty string). The list of enabled and disabled options for a # "USE_xxx=" (empty string). The list of enabled and disabled options for a
@ -344,7 +343,7 @@ use_opts = USE_EPOLL USE_KQUEUE USE_NETFILTER USE_POLL \
USE_MATH USE_DEVICEATLAS USE_51DEGREES \ USE_MATH USE_DEVICEATLAS USE_51DEGREES \
USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \ USE_WURFL USE_OBSOLETE_LINKER USE_PRCTL USE_PROCCTL \
USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \ USE_THREAD_DUMP USE_EVPORTS USE_OT USE_QUIC USE_PROMEX \
USE_MEMORY_PROFILING USE_SHM_OPEN \ USE_MEMORY_PROFILING \
USE_STATIC_PCRE USE_STATIC_PCRE2 \ USE_STATIC_PCRE USE_STATIC_PCRE2 \
USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT USE_PCRE USE_PCRE_JIT USE_PCRE2 USE_PCRE2_JIT USE_QUIC_OPENSSL_COMPAT
@ -383,7 +382,7 @@ ifeq ($(TARGET),linux-glibc)
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN) USE_GETADDRINFO USE_BACKTRACE)
INSTALL = install -v INSTALL = install -v
endif endif
@ -402,7 +401,7 @@ ifeq ($(TARGET),linux-musl)
USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \ USE_POLL USE_TPROXY USE_LIBCRYPT USE_DL USE_RT USE_CRYPT_H USE_NETFILTER \
USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_LINUX_TPROXY USE_LINUX_CAP \
USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
USE_GETADDRINFO USE_BACKTRACE USE_SHM_OPEN) USE_GETADDRINFO USE_BACKTRACE)
INSTALL = install -v INSTALL = install -v
endif endif

View File

@ -1,2 +1,2 @@
$Format:%ci$ $Format:%ci$
2025/08/06 2025/07/26

View File

@ -1 +1 @@
3.3-dev6 3.3-dev4

View File

@ -242,8 +242,8 @@ void promex_register_module(struct promex_module *m)
} }
/* Pools used to allocate ref on Promex modules and filters */ /* Pools used to allocate ref on Promex modules and filters */
DECLARE_STATIC_TYPED_POOL(pool_head_promex_mod_ref, "promex_module_ref", struct promex_module_ref); DECLARE_STATIC_POOL(pool_head_promex_mod_ref, "promex_module_ref", sizeof(struct promex_module_ref));
DECLARE_STATIC_TYPED_POOL(pool_head_promex_metric_flt, "promex_metric_filter", struct promex_metric_filter); DECLARE_STATIC_POOL(pool_head_promex_metric_flt, "promex_metric_filter", sizeof(struct promex_metric_filter));
/* Return the server status. */ /* Return the server status. */
enum promex_srv_state promex_srv_status(struct server *sv) enum promex_srv_state promex_srv_status(struct server *sv)

View File

@ -3,7 +3,7 @@
Configuration Manual Configuration Manual
---------------------- ----------------------
version 3.3 version 3.3
2025/08/06 2025/07/26
This document covers the configuration language as implemented in the version This document covers the configuration language as implemented in the version
@ -1744,7 +1744,6 @@ The following keywords are supported in the "global" section :
- insecure-setuid-wanted - insecure-setuid-wanted
- issuers-chain-path - issuers-chain-path
- key-base - key-base
- limited-quic
- localpeer - localpeer
- log - log
- log-send-hostname - log-send-hostname
@ -1754,7 +1753,6 @@ The following keywords are supported in the "global" section :
- lua-prepend-path - lua-prepend-path
- mworker-max-reloads - mworker-max-reloads
- nbthread - nbthread
- no-quic
- node - node
- numa-cpu-mapping - numa-cpu-mapping
- ocsp-update.disable - ocsp-update.disable
@ -1884,7 +1882,6 @@ The following keywords are supported in the "global" section :
- tune.pool-low-fd-ratio - tune.pool-low-fd-ratio
- tune.pt.zero-copy-forwarding - tune.pt.zero-copy-forwarding
- tune.quic.cc-hystart - tune.quic.cc-hystart
- tune.quic.cc.cubic.min-losses
- tune.quic.disable-tx-pacing - tune.quic.disable-tx-pacing
- tune.quic.disable-udp-gso - tune.quic.disable-udp-gso
- tune.quic.frontend.glitches-threshold - tune.quic.frontend.glitches-threshold
@ -19901,7 +19898,6 @@ and(value) integer integer
b64dec string binary b64dec string binary
base64 binary string base64 binary string
be2dec(separator,chunk_size[,truncate]) binary string be2dec(separator,chunk_size[,truncate]) binary string
le2dec(separator,chunk_size[,truncate]) binary string
be2hex([separator[,chunk_size[,truncate]]]) binary string be2hex([separator[,chunk_size[,truncate]]]) binary string
bool integer boolean bool integer boolean
bytes(offset[,length]) binary binary bytes(offset[,length]) binary binary
@ -20142,19 +20138,6 @@ be2dec(<separator>,<chunk_size>[,<truncate>])
bin(01020304050607),be2dec(,2,1) # 2587721286 bin(01020304050607),be2dec(,2,1) # 2587721286
bin(7f000001),be2dec(.,1) # 127.0.0.1 bin(7f000001),be2dec(.,1) # 127.0.0.1
le2dec(<separator>,<chunk_size>[,<truncate>])
Converts little-endian binary input sample to a string containing an unsigned
integer number per <chunk_size> input bytes. <separator> is inserted every
<chunk_size> binary input bytes if specified. The <truncate> flag indicates
whether the binary input is truncated at <chunk_size> boundaries. The maximum
value for <chunk_size> is limited by the size of long long int (8 bytes).
Example:
bin(01020304050607),le2dec(:,2) # 513:1284:2055:7
bin(01020304050607),le2dec(-,2,1) # 513-1284-2055
bin(01020304050607),le2dec(,2,1) # 51312842055
bin(7f000001),le2dec(.,1) # 127.0.0.1
be2hex([<separator>[,<chunk_size>[,<truncate>]]]) be2hex([<separator>[,<chunk_size>[,<truncate>]]])
Converts big-endian binary input sample to a hex string containing two hex Converts big-endian binary input sample to a hex string containing two hex
digits per input byte. It is used to log or transfer hex dumps of some digits per input byte. It is used to log or transfer hex dumps of some
@ -30432,8 +30415,8 @@ bits <number>
but blocking the traffic too long could trigger the watchdog.) but blocking the traffic too long could trigger the watchdog.)
challenge <string> challenge <string>
Takes a challenge type as parameter, this must be http-01 or dns-01. When not Takes a challenge type as parameter, this must be HTTP-01 or DNS-01. When not
used the default is http-01. used the default is HTTP-01.
contact <string> contact <string>
The contact email that will be associated to the account key in the CA. The contact email that will be associated to the account key in the CA.
@ -30476,7 +30459,7 @@ Example:
directory https://acme-staging-v02.api.letsencrypt.org/directory directory https://acme-staging-v02.api.letsencrypt.org/directory
account-key /etc/haproxy/letsencrypt.account.key account-key /etc/haproxy/letsencrypt.account.key
contact john.doe@example.com contact john.doe@example.com
challenge http-01 challenge HTTP-01
keytype RSA keytype RSA
bits 2048 bits 2048
map virt@acme map virt@acme
@ -30485,7 +30468,7 @@ Example:
directory https://acme-staging-v02.api.letsencrypt.org/directory directory https://acme-staging-v02.api.letsencrypt.org/directory
account-key /etc/haproxy/letsencrypt.account.key account-key /etc/haproxy/letsencrypt.account.key
contact john.doe@example.com contact john.doe@example.com
challenge http-01 challenge HTTP-01
keytype ECDSA keytype ECDSA
curves P-384 curves P-384
map virt@acme map virt@acme

View File

@ -1,4 +1,4 @@
2025-08-11 - Pools structure and API 2022-02-24 - Pools structure and API
1. Background 1. Background
------------- -------------
@ -239,6 +239,10 @@ currently in use:
+------------+ +------------+ / is set at build time +------------+ +------------+ / is set at build time
or -dMtag at boot time or -dMtag at boot time
Right now no provisions are made to return objects aligned on larger boundaries
than those currently covered by malloc() (i.e. two pointers). This need appears
from time to time and the layout above might evolve a little bit if needed.
4. Storage in the process-wide shared pool 4. Storage in the process-wide shared pool
------------------------------------------ ------------------------------------------
@ -353,22 +357,6 @@ struct pool_head *create_pool(char *name, uint size, uint flags)
returned pointer is the new (or reused) pool head, or NULL upon error. returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy(). Pools created this way must be destroyed using pool_destroy().
struct pool_head *create_aligned_pool(char *name, uint size, uint align, uint flags)
Create a new pool named <name> for objects of size <size> bytes and
aligned to <align> bytes (0 meaning use the platform's default). Pool
names are truncated to their first 11 characters. Pools of very similar
size will usually be merged if both have set the flag MEM_F_SHARED in
<flags>. When DEBUG_DONT_SHARE_POOLS was set at build time, or
"-dMno-merge" is passed on the executable's command line, the pools
also need to have the exact same name to be merged. In addition, unless
MEM_F_EXACT is set in <flags>, the object size will usually be rounded
up to the size of pointers (16 or 32 bytes). MEM_F_UAF may be set on a
per-pool basis to enable the UAF detection only for this specific pool,
saving the massive overhead of global usage. The name that will appear
in the pool upon merging is the name of the first created pool. The
returned pointer is the new (or reused) pool head, or NULL upon error.
Pools created this way must be destroyed using pool_destroy().
void *pool_destroy(struct pool_head *pool) void *pool_destroy(struct pool_head *pool)
Destroy pool <pool>, that is, all of its unused objects are freed and Destroy pool <pool>, that is, all of its unused objects are freed and
the structure is freed as well if the pool didn't have any used objects the structure is freed as well if the pool didn't have any used objects
@ -482,20 +470,6 @@ complicate maintenance.
A few macros exist to ease the declaration of pools: A few macros exist to ease the declaration of pools:
DECLARE_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to variable <ptr>. <ptr> will be
created of type "struct pool_head *". If the pool needs to be visible
outside of the function (which is likely), it will also need to be
declared somewhere as "extern struct pool_head *<ptr>;". It is
recommended to place such declarations very early in the source file so
that the variable is already known to all subsequent functions which
may use it.
DECLARE_POOL(ptr, name, size) DECLARE_POOL(ptr, name, size)
Placed at the top level of a file, this declares a global memory pool Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This as variable <ptr>, name <name> and size <size> bytes per element. This
@ -507,17 +481,6 @@ DECLARE_POOL(ptr, name, size)
declarations very early in the source file so that the variable is declarations very early in the source file so that the variable is
already known to all subsequent functions which may use it. already known to all subsequent functions which may use it.
DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align)
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name> and size <size> bytes per element, all
of which will be aligned to <align> bytes. The alignment will be
rounded up to the next power of two and will be at least as large as a
word on the platform. This is made via a call to REGISTER_ALIGNED_POOL()
and by assigning the resulting pointer to local variable <ptr>. <ptr>
will be created of type "static struct pool_head *". It is recommended
to place such declarations very early in the source file so that the
variable is already known to all subsequent functions which may use it.
DECLARE_STATIC_POOL(ptr, name, size) DECLARE_STATIC_POOL(ptr, name, size)
Placed at the top level of a file, this declares a static memory pool Placed at the top level of a file, this declares a static memory pool
as variable <ptr>, name <name> and size <size> bytes per element. This as variable <ptr>, name <name> and size <size> bytes per element. This
@ -527,42 +490,6 @@ DECLARE_STATIC_POOL(ptr, name, size)
early in the source file so that the variable is already known to all early in the source file so that the variable is already known to all
subsequent functions which may use it. subsequent functions which may use it.
DECLARE_STATIC_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to local
variable <ptr>. <ptr> will be created of type "static struct pool_head
*". It is recommended to place such declarations very early in the
source file so that the variable is already known to all subsequent
functions which may use it.
DECLARE_TYPED_POOL(ptr, name, type[, extra[, align]])
Placed at the top level of a file, this declares a global memory pool
as variable <ptr>, name <name>, and configured to allocate objects of
type <type>. It is optionally possible to grow these objects by <extra>
bytes (e.g. if they contain some variable length data at the end), and
to force them to be aligned to <align> bytes. If only alignment is
desired without extra data, pass 0 as <extra>. Alignment must be at
least as large as the type's, and a control is enforced at declaration
time so that objects cannot be less aligned than what is promised to
the compiler. The default alignment of zero indicates that the default
one (from the type) should be used. This is made via a call to
REGISTER_ALIGNED_POOL() and by assigning the resulting pointer to
variable <ptr>. <ptr> will be created of type "struct pool_head *". If
the pool needs to be visible outside of the function (which is likely),
it will also need to be declared somewhere as "extern struct pool_head
*<ptr>;". It is recommended to place such declarations very early in
the source file so that the variable is already known to all subsequent
functions which may use it.
6. Build options 6. Build options
---------------- ----------------

View File

@ -200,12 +200,6 @@ list of options is :
-c : only performs a check of the configuration files and exits before trying -c : only performs a check of the configuration files and exits before trying
to bind. The exit status is zero if everything is OK, or non-zero if an to bind. The exit status is zero if everything is OK, or non-zero if an
error is encountered. Presence of warnings will be reported if any. error is encountered. Presence of warnings will be reported if any.
By default this option does not report a success message. Combined with
"-V" this will print the message "Configuration file is valid" upon
success.
Scripts must use the exit status to dertermine the success of the
command.
-cc : evaluates a condition as used within a conditional block of the -cc : evaluates a condition as used within a conditional block of the
configuration. The exit status is zero if the condition is true, 1 if the configuration. The exit status is zero if the condition is true, 1 if the

View File

@ -51,11 +51,9 @@ enum http_st {
}; };
struct acme_auth { struct acme_auth {
struct ist dns; /* dns entry */
struct ist auth; /* auth URI */ struct ist auth; /* auth URI */
struct ist chall; /* challenge URI */ struct ist chall; /* challenge URI */
struct ist token; /* token */ struct ist token; /* token */
int ready; /* is the challenge ready ? */
void *next; void *next;
}; };
@ -81,20 +79,6 @@ struct acme_ctx {
X509_REQ *req; X509_REQ *req;
struct ist finalize; struct ist finalize;
struct ist certificate; struct ist certificate;
struct task *task;
struct mt_list el; struct mt_list el;
}; };
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
#define ACME_EV_NEW (1ULL << 1) /* new task */
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
#define ACME_VERB_CLEAN 1
#define ACME_VERB_MINIMAL 2
#define ACME_VERB_SIMPLE 3
#define ACME_VERB_ADVANCED 4
#define ACME_VERB_COMPLETE 5
#endif #endif

View File

@ -620,130 +620,9 @@ struct mem_stats {
_HA_ATOMIC_ADD(&_.size, __y); \ _HA_ATOMIC_ADD(&_.size, __y); \
strdup(__x); \ strdup(__x); \
}) })
#undef ha_aligned_alloc
#define ha_aligned_alloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc(__a, __s); \
})
#undef ha_aligned_zalloc
#define ha_aligned_zalloc(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc(__a, __s); \
})
#undef ha_aligned_alloc_safe
#define ha_aligned_alloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_alloc_safe(__a, __s); \
})
#undef ha_aligned_zalloc_safe
#define ha_aligned_zalloc_safe(a,s) ({ \
size_t __a = (a); \
size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_MALLOC, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
_ha_aligned_zalloc_safe(__a, __s); \
})
#undef ha_aligned_free
#define ha_aligned_free(x) ({ \
typeof(x) __x = (x); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((x))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__x) \
_HA_ATOMIC_INC(&_.calls); \
_ha_aligned_free(__x); \
})
#undef ha_aligned_free_size
#define ha_aligned_free_size(p,s) ({ \
void *__p = (p); size_t __s = (s); \
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
.caller = { \
.file = __FILE__, .line = __LINE__, \
.what = MEM_STATS_TYPE_FREE, \
.func = __func__, \
}, \
}; \
HA_WEAK(__start_mem_stats); \
HA_WEAK(__stop_mem_stats); \
if (__builtin_constant_p((p))) { \
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
} \
if (__p) { \
_HA_ATOMIC_INC(&_.calls); \
_HA_ATOMIC_ADD(&_.size, __s); \
} \
_ha_aligned_free(__p); \
})
#else // DEBUG_MEM_STATS #else // DEBUG_MEM_STATS
#define will_free(x, y) do { } while (0) #define will_free(x, y) do { } while (0)
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
#define ha_aligned_zalloc(a,s) _ha_aligned_zalloc(a, s)
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
#define ha_aligned_zalloc_safe(a,s) _ha_aligned_zalloc_safe(a, s)
#define ha_aligned_free(p) _ha_aligned_free(p)
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
#endif /* DEBUG_MEM_STATS*/ #endif /* DEBUG_MEM_STATS*/

View File

@ -28,7 +28,7 @@
extern struct timeval start_date; /* the process's start date in wall-clock time */ extern struct timeval start_date; /* the process's start date in wall-clock time */
extern struct timeval ready_date; /* date when the process was considered ready */ extern struct timeval ready_date; /* date when the process was considered ready */
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */ extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */ extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */ extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */ extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
@ -49,8 +49,6 @@ uint clock_report_idle(void);
void clock_leaving_poll(int timeout, int interrupted); void clock_leaving_poll(int timeout, int interrupted);
void clock_entering_poll(void); void clock_entering_poll(void);
void clock_adjust_now_offset(void); void clock_adjust_now_offset(void);
void clock_set_now_offset(llong ofs);
llong clock_get_now_offset(void);
static inline void clock_update_date(int max_wait, int interrupted) static inline void clock_update_date(int max_wait, int interrupted)
{ {

View File

@ -94,21 +94,11 @@ typedef struct { } empty_t;
# endif # endif
#endif #endif
/* unsafe ones for use with constant macros needed in initializers */
#ifndef _MIN
#define _MIN(a, b) ((a < b) ? a : b)
#endif
#ifndef _MAX
#define _MAX(a, b) ((a > b) ? a : b)
#endif
/* safe versions for use anywhere except in initializers */
#ifndef MIN #ifndef MIN
#define MIN(a, b) ({ \ #define MIN(a, b) ({ \
typeof(a) _a = (a); \ typeof(a) _a = (a); \
typeof(a) _b = (b); \ typeof(a) _b = (b); \
_MIN(_a, _b); \ ((_a < _b) ? _a : _b); \
}) })
#endif #endif
@ -116,15 +106,10 @@ typedef struct { } empty_t;
#define MAX(a, b) ({ \ #define MAX(a, b) ({ \
typeof(a) _a = (a); \ typeof(a) _a = (a); \
typeof(a) _b = (b); \ typeof(a) _b = (b); \
_MAX(_a, _b); \ ((_a > _b) ? _a : _b); \
}) })
#endif #endif
/* always set a _POSIX_VERSION if there isn't any, in order to ease compares */
#ifndef _POSIX_VERSION
# define _POSIX_VERSION 0
#endif
/* this is for libc5 for example */ /* this is for libc5 for example */
#ifndef TCP_NODELAY #ifndef TCP_NODELAY
#define TCP_NODELAY 1 #define TCP_NODELAY 1

View File

@ -28,7 +28,7 @@
#include <haproxy/guid-t.h> #include <haproxy/guid-t.h>
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid); int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid); int counters_be_shared_init(struct be_counters_shared *counters, const struct guid_node *guid);
void counters_fe_shared_drop(struct fe_counters_shared *counters); void counters_fe_shared_drop(struct fe_counters_shared *counters);
void counters_be_shared_drop(struct be_counters_shared *counters); void counters_be_shared_drop(struct be_counters_shared *counters);

View File

@ -12,16 +12,7 @@ int guid_insert(enum obj_type *obj_type, const char *uid, char **errmsg);
void guid_remove(struct guid_node *guid); void guid_remove(struct guid_node *guid);
struct guid_node *guid_lookup(const char *uid); struct guid_node *guid_lookup(const char *uid);
/* Returns the actual text key associated to <guid> node or NULL if not
* set
*/
static inline const char *guid_get(const struct guid_node *guid)
{
return guid->node.key;
}
int guid_is_valid_fmt(const char *uid, char **errmsg); int guid_is_valid_fmt(const char *uid, char **errmsg);
char *guid_name(const struct guid_node *guid); char *guid_name(const struct guid_node *guid);
int guid_count(void);
#endif /* _HAPROXY_GUID_H */ #endif /* _HAPROXY_GUID_H */

View File

@ -14,7 +14,6 @@ extern struct list post_server_check_list;
extern struct list per_thread_alloc_list; extern struct list per_thread_alloc_list;
extern struct list per_thread_init_list; extern struct list per_thread_init_list;
extern struct list post_deinit_list; extern struct list post_deinit_list;
extern struct list post_deinit_master_list;
extern struct list proxy_deinit_list; extern struct list proxy_deinit_list;
extern struct list server_deinit_list; extern struct list server_deinit_list;
extern struct list per_thread_free_list; extern struct list per_thread_free_list;
@ -25,7 +24,6 @@ void hap_register_post_check(int (*fct)());
void hap_register_post_proxy_check(int (*fct)(struct proxy *)); void hap_register_post_proxy_check(int (*fct)(struct proxy *));
void hap_register_post_server_check(int (*fct)(struct server *)); void hap_register_post_server_check(int (*fct)(struct server *));
void hap_register_post_deinit(void (*fct)()); void hap_register_post_deinit(void (*fct)());
void hap_register_post_deinit_master(void (*fct)());
void hap_register_proxy_deinit(void (*fct)(struct proxy *)); void hap_register_proxy_deinit(void (*fct)(struct proxy *));
void hap_register_server_deinit(void (*fct)(struct server *)); void hap_register_server_deinit(void (*fct)(struct server *));
@ -65,10 +63,6 @@ void hap_register_unittest(const char *name, int (*fct)(int, char **));
#define REGISTER_POST_DEINIT(fct) \ #define REGISTER_POST_DEINIT(fct) \
INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct)) INITCALL1(STG_REGISTER, hap_register_post_deinit, (fct))
/* simplified way to declare a post-deinit (master process when launched in master/worker mode) callback in a file */
#define REGISTER_POST_DEINIT_MASTER(fct) \
INITCALL1(STG_REGISTER, hap_register_post_deinit_master, (fct))
/* simplified way to declare a proxy-deinit callback in a file */ /* simplified way to declare a proxy-deinit callback in a file */
#define REGISTER_PROXY_DEINIT(fct) \ #define REGISTER_PROXY_DEINIT(fct) \
INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct)) INITCALL1(STG_REGISTER, hap_register_proxy_deinit, (fct))

View File

@ -284,11 +284,10 @@ static __inline void watcher_attach(struct watcher *w, void *target)
MT_LIST_APPEND(list, &w->el); MT_LIST_APPEND(list, &w->el);
} }
/* Untracks target via <w> watcher. Does nothing if <w> is not attached */ /* Untracks target via <w> watcher. Invalid if <w> is not attached first. */
static __inline void watcher_detach(struct watcher *w) static __inline void watcher_detach(struct watcher *w)
{ {
if (!MT_LIST_INLIST(&w->el)) BUG_ON_HOT(!MT_LIST_INLIST(&w->el));
return;
*w->pptr = NULL; *w->pptr = NULL;
MT_LIST_DELETE(&w->el); MT_LIST_DELETE(&w->el);
} }

View File

@ -25,7 +25,6 @@
#include <sys/mman.h> #include <sys/mman.h>
#include <stdlib.h> #include <stdlib.h>
#include <haproxy/api.h> #include <haproxy/api.h>
#include <haproxy/tools.h>
/************* normal allocator *************/ /************* normal allocator *************/
@ -33,9 +32,9 @@
/* allocates an area of size <size> and returns it. The semantics are similar /* allocates an area of size <size> and returns it. The semantics are similar
* to those of malloc(). * to those of malloc().
*/ */
static forceinline void *pool_alloc_area(size_t size, size_t align) static forceinline void *pool_alloc_area(size_t size)
{ {
return ha_aligned_alloc(align, size); return malloc(size);
} }
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The /* frees an area <area> of size <size> allocated by pool_alloc_area(). The
@ -44,7 +43,8 @@ static forceinline void *pool_alloc_area(size_t size, size_t align)
*/ */
static forceinline void pool_free_area(void *area, size_t __maybe_unused size) static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
{ {
ha_aligned_free_size(area, size); will_free(area, size);
free(area);
} }
/************* use-after-free allocator *************/ /************* use-after-free allocator *************/
@ -52,15 +52,14 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
/* allocates an area of size <size> and returns it. The semantics are similar /* allocates an area of size <size> and returns it. The semantics are similar
* to those of malloc(). However the allocation is rounded up to 4kB so that a * to those of malloc(). However the allocation is rounded up to 4kB so that a
* full page is allocated. This ensures the object can be freed alone so that * full page is allocated. This ensures the object can be freed alone so that
* future dereferences are easily detected. The returned object is always at * future dereferences are easily detected. The returned object is always
* least 16-bytes aligned to avoid issues with unaligned structure objects, and * 16-bytes aligned to avoid issues with unaligned structure objects. In case
* in any case, is always at least aligned as required by the pool, though no * some padding is added, the area's start address is copied at the end of the
* more than 4096. In case some padding is added, the area's start address is * padding to help detect underflows.
* copied at the end of the padding to help detect underflows.
*/ */
static inline void *pool_alloc_area_uaf(size_t size, size_t align) static inline void *pool_alloc_area_uaf(size_t size)
{ {
size_t pad = (4096 - size) & 0xFF0 & -align; size_t pad = (4096 - size) & 0xFF0;
void *ret; void *ret;
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);

View File

@ -28,7 +28,6 @@
#define MEM_F_SHARED 0x1 #define MEM_F_SHARED 0x1
#define MEM_F_EXACT 0x2 #define MEM_F_EXACT 0x2
#define MEM_F_UAF 0x4 #define MEM_F_UAF 0x4
#define MEM_F_STATREG 0x8 /* static registration: do not free it! */
/* A special pointer for the pool's free_list that indicates someone is /* A special pointer for the pool's free_list that indicates someone is
* currently manipulating it. Serves as a short-lived lock. * currently manipulating it. Serves as a short-lived lock.
@ -70,12 +69,9 @@ struct pool_cache_head {
*/ */
struct pool_registration { struct pool_registration {
struct list list; /* link element */ struct list list; /* link element */
const char *name; /* name of the pool */ char name[12]; /* name of the pool */
const char *file; /* where the pool is declared */
unsigned int line; /* line in the file where the pool is declared, 0 if none */
unsigned int size; /* expected object size */ unsigned int size; /* expected object size */
unsigned int flags; /* MEM_F_* */ unsigned int flags; /* MEM_F_* */
unsigned int type_align; /* type-imposed alignment; 0=unspecified */
unsigned int align; /* expected alignment; 0=unspecified */ unsigned int align; /* expected alignment; 0=unspecified */
}; };
@ -129,7 +125,6 @@ struct pool_head {
unsigned int minavail; /* how many chunks are expected to be used */ unsigned int minavail; /* how many chunks are expected to be used */
unsigned int size; /* chunk size */ unsigned int size; /* chunk size */
unsigned int flags; /* MEM_F_* */ unsigned int flags; /* MEM_F_* */
unsigned int align; /* alignment size */
unsigned int users; /* number of pools sharing this zone */ unsigned int users; /* number of pools sharing this zone */
unsigned int alloc_sz; /* allocated size (includes hidden fields) */ unsigned int alloc_sz; /* allocated size (includes hidden fields) */
unsigned int sum_size; /* sum of all registered users' size */ unsigned int sum_size; /* sum of all registered users' size */

View File

@ -30,80 +30,19 @@
#include <haproxy/pool-t.h> #include <haproxy/pool-t.h>
#include <haproxy/thread.h> #include <haproxy/thread.h>
/* This creates a pool_reg registers a call to create_pool_callback(ptr) with it. /* This registers a call to create_pool_callback(ptr, name, size) */
* Do not use this one, use REGISTER_POOL() instead.
*/
#define __REGISTER_POOL(_line, _ptr, _name, _size, _type_align, _align) \
static struct pool_registration __pool_reg_##_line = { \
.name = _name, \
.file = __FILE__, \
.line = __LINE__, \
.size = _size, \
.flags = MEM_F_STATREG, \
.type_align = _type_align, \
.align = _align, \
}; \
INITCALL3(STG_POOL, create_pool_callback, (_ptr), (_name), &__pool_reg_##_line);
/* intermediary level for line number resolution, do not use this one, use
* REGISTER_POOL() instead.
*/
#define _REGISTER_POOL(line, ptr, name, size, align, type_align) \
__REGISTER_POOL(line, ptr, name, size, align, type_align)
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_POOL(ptr, name, size) \ #define REGISTER_POOL(ptr, name, size) \
_REGISTER_POOL(__LINE__, ptr, name, size, 0, 0) INITCALL3(STG_POOL, create_pool_callback, (ptr), (name), (size))
/* This macro declares a pool head <ptr> and registers its creation */ /* This macro declares a pool head <ptr> and registers its creation */
#define DECLARE_POOL(ptr, name, size) \ #define DECLARE_POOL(ptr, name, size) \
struct pool_head *(ptr) __read_mostly = NULL; \ struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, 0) REGISTER_POOL(&ptr, name, size)
/* This macro declares a static pool head <ptr> and registers its creation */ /* This macro declares a static pool head <ptr> and registers its creation */
#define DECLARE_STATIC_POOL(ptr, name, size) \ #define DECLARE_STATIC_POOL(ptr, name, size) \
static struct pool_head *(ptr) __read_mostly; \ static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, 0) REGISTER_POOL(&ptr, name, size)
/*** below are the aligned pool macros, taking one extra arg for alignment ***/
/* This registers a call to create_pool_callback(ptr) with these args */
#define REGISTER_ALIGNED_POOL(ptr, name, size, align) \
_REGISTER_POOL(__LINE__, ptr, name, size, 0, align)
/* This macro declares an aligned pool head <ptr> and registers its creation */
#define DECLARE_ALIGNED_POOL(ptr, name, size, align) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, align)
/* This macro declares a static aligned pool head <ptr> and registers its creation */
#define DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_POOL(__LINE__, &ptr, name, size, 0, align)
/*** below are the typed pool macros, taking a type and an extra size ***/
/* This is only used by REGISTER_TYPED_POOL below */
#define _REGISTER_TYPED_POOL(ptr, name, type, extra, align, ...) \
_REGISTER_POOL(__LINE__, ptr, name, sizeof(type) + extra, __alignof__(type), align)
/* This registers a call to create_pool_callback(ptr) with these args.
* It supports two optional args:
* - extra: the extra size to be allocated at the end of the type. Def: 0.
* - align: the desired alignment on the type. Def: 0 = same as type.
*/
#define REGISTER_TYPED_POOL(ptr, name, type, args...) \
_REGISTER_TYPED_POOL(ptr, name, type, ##args, 0, 0)
/* This macro declares an aligned pool head <ptr> and registers its creation */
#define DECLARE_TYPED_POOL(ptr, name, type, args...) \
struct pool_head *(ptr) __read_mostly = NULL; \
_REGISTER_TYPED_POOL(&ptr, name, type, ##args, 0, 0)
/* This macro declares a static aligned pool head <ptr> and registers its creation */
#define DECLARE_STATIC_TYPED_POOL(ptr, name, type, args...) \
static struct pool_head *(ptr) __read_mostly; \
_REGISTER_TYPED_POOL(&ptr, name, type, ##args, 0, 0)
/* By default, free objects are linked by a pointer stored at the beginning of /* By default, free objects are linked by a pointer stored at the beginning of
* the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is * the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
@ -184,22 +123,14 @@ unsigned long long pool_total_allocated(void);
unsigned long long pool_total_used(void); unsigned long long pool_total_used(void);
void pool_flush(struct pool_head *pool); void pool_flush(struct pool_head *pool);
void pool_gc(struct pool_head *pool_ctx); void pool_gc(struct pool_head *pool_ctx);
struct pool_head *create_pool_with_loc(const char *name, unsigned int size, unsigned int align, struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
unsigned int flags, const char *file, unsigned int line); void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size);
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg);
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg);
void *pool_destroy(struct pool_head *pool); void *pool_destroy(struct pool_head *pool);
void pool_destroy_all(void); void pool_destroy_all(void);
void *__pool_alloc(struct pool_head *pool, unsigned int flags); void *__pool_alloc(struct pool_head *pool, unsigned int flags);
void __pool_free(struct pool_head *pool, void *ptr); void __pool_free(struct pool_head *pool, void *ptr);
void pool_inspect_item(const char *msg, struct pool_head *pool, const void *item, const void *caller, ssize_t ofs); void pool_inspect_item(const char *msg, struct pool_head *pool, const void *item, const void *caller, ssize_t ofs);
#define create_pool(name, size, flags) \
create_pool_with_loc(name, size, 0, flags, __FILE__, __LINE__)
#define create_aligned_pool(name, size, align, flags) \
create_pool_with_loc(name, size, align, flags, __FILE__, __LINE__)
/****************** Thread-local cache management ******************/ /****************** Thread-local cache management ******************/

View File

@ -448,9 +448,9 @@ struct quic_conn_closed {
#define QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED (1U << 0) #define QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED (1U << 0)
#define QUIC_FL_CONN_SPIN_BIT (1U << 1) /* Spin bit set by remote peer */ #define QUIC_FL_CONN_SPIN_BIT (1U << 1) /* Spin bit set by remote peer */
#define QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS (1U << 2) /* HANDSHAKE_DONE must be sent */ #define QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS (1U << 2) /* HANDSHAKE_DONE must be sent */
#define QUIC_FL_CONN_IS_BACK (1U << 3) /* conn used on backend side */ /* gap here */
#define QUIC_FL_CONN_ACCEPT_REGISTERED (1U << 4) #define QUIC_FL_CONN_ACCEPT_REGISTERED (1U << 4)
#define QUIC_FL_CONN_UDP_GSO_EIO (1U << 5) /* GSO disabled due to a EIO occured on same listener */ /* gap here */
#define QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ (1U << 6) #define QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ (1U << 6)
#define QUIC_FL_CONN_RETRANS_NEEDED (1U << 7) #define QUIC_FL_CONN_RETRANS_NEEDED (1U << 7)
#define QUIC_FL_CONN_RETRANS_OLD_DATA (1U << 8) /* retransmission in progress for probing with already sent data */ #define QUIC_FL_CONN_RETRANS_OLD_DATA (1U << 8) /* retransmission in progress for probing with already sent data */
@ -488,9 +488,7 @@ static forceinline char *qc_show_flags(char *buf, size_t len, const char *delim,
_(QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED, _(QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED,
_(QUIC_FL_CONN_SPIN_BIT, _(QUIC_FL_CONN_SPIN_BIT,
_(QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS, _(QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS,
_(QUIC_FL_CONN_IS_BACK,
_(QUIC_FL_CONN_ACCEPT_REGISTERED, _(QUIC_FL_CONN_ACCEPT_REGISTERED,
_(QUIC_FL_CONN_UDP_GSO_EIO,
_(QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ, _(QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ,
_(QUIC_FL_CONN_RETRANS_NEEDED, _(QUIC_FL_CONN_RETRANS_NEEDED,
_(QUIC_FL_CONN_RETRANS_OLD_DATA, _(QUIC_FL_CONN_RETRANS_OLD_DATA,
@ -509,7 +507,7 @@ static forceinline char *qc_show_flags(char *buf, size_t len, const char *delim,
_(QUIC_FL_CONN_EXP_TIMER, _(QUIC_FL_CONN_EXP_TIMER,
_(QUIC_FL_CONN_CLOSING, _(QUIC_FL_CONN_CLOSING,
_(QUIC_FL_CONN_DRAINING, _(QUIC_FL_CONN_DRAINING,
_(QUIC_FL_CONN_IMMEDIATE_CLOSE))))))))))))))))))))))))); _(QUIC_FL_CONN_IMMEDIATE_CLOSE)))))))))))))))))))))));
/* epilogue */ /* epilogue */
_(~0U); _(~0U);
return buf; return buf;

View File

@ -82,12 +82,6 @@ void qc_check_close_on_released_mux(struct quic_conn *qc);
int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len, int quic_stateless_reset_token_cpy(unsigned char *pos, size_t len,
const unsigned char *salt, size_t saltlen); const unsigned char *salt, size_t saltlen);
/* Returns true if <qc> is used on the backed side (as a client). */
static inline int qc_is_back(const struct quic_conn *qc)
{
return qc->flags & QUIC_FL_CONN_IS_BACK;
}
/* Free the CIDs attached to <conn> QUIC connection. */ /* Free the CIDs attached to <conn> QUIC connection. */
static inline void free_quic_conn_cids(struct quic_conn *conn) static inline void free_quic_conn_cids(struct quic_conn *conn)
{ {

View File

@ -3,7 +3,7 @@
#define QUIC_MIN_CC_PKTSIZE 128 #define QUIC_MIN_CC_PKTSIZE 128
#define QUIC_DGRAM_HEADLEN (sizeof(uint16_t) + sizeof(void *)) #define QUIC_DGRAM_HEADLEN (sizeof(uint16_t) + sizeof(void *))
#define QUIC_MAX_CC_BUFSIZE _MAX(QUIC_INITIAL_IPV6_MTU, QUIC_INITIAL_IPV4_MTU) #define QUIC_MAX_CC_BUFSIZE MAX(QUIC_INITIAL_IPV6_MTU, QUIC_INITIAL_IPV4_MTU)
/* Sendmsg input buffer cannot be bigger than 65535 bytes. This comes from UDP /* Sendmsg input buffer cannot be bigger than 65535 bytes. This comes from UDP
* header which uses a 2-bytes length field. QUIC datagrams are limited to 1252 * header which uses a 2-bytes length field. QUIC datagrams are limited to 1252

View File

@ -171,31 +171,25 @@ static inline void session_unown_conn(struct session *sess, struct connection *c
} }
} }
/* Add the connection <conn> to the private conns list of session <sess>. Each /* Add the connection <conn> to the private conns list of session <sess>. This
* connection is indexed by their respective target in the session. Nothing is * function is called only if the connection is private. Nothing is performed
* performed if the connection is already in the session list. * if the connection is already in the session list or if the session does not
* * owned the connection.
* Returns true if conn is inserted or already present else false if a failure
* occurs during insertion.
*/ */
static inline int session_add_conn(struct session *sess, struct connection *conn) static inline int session_add_conn(struct session *sess, struct connection *conn, void *target)
{ {
struct sess_priv_conns *pconns = NULL; struct sess_priv_conns *pconns = NULL;
struct server *srv = objt_server(conn->target); struct server *srv = objt_server(conn->target);
int found = 0; int found = 0;
/* Connection target is used to index it in the session. Only BE conns are expected in session list. */ BUG_ON(objt_listener(conn->target));
BUG_ON(!conn->target || objt_listener(conn->target));
/* A connection cannot be attached already to another session. */ /* Already attach to the session or not the connection owner */
BUG_ON(conn->owner && conn->owner != sess); if (!LIST_ISEMPTY(&conn->sess_el) || (conn->owner && conn->owner != sess))
/* Already attach to the session */
if (!LIST_ISEMPTY(&conn->sess_el))
return 1; return 1;
list_for_each_entry(pconns, &sess->priv_conns, sess_el) { list_for_each_entry(pconns, &sess->priv_conns, sess_el) {
if (pconns->target == conn->target) { if (pconns->target == target) {
found = 1; found = 1;
break; break;
} }
@ -205,7 +199,7 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
pconns = pool_alloc(pool_head_sess_priv_conns); pconns = pool_alloc(pool_head_sess_priv_conns);
if (!pconns) if (!pconns)
return 0; return 0;
pconns->target = conn->target; pconns->target = target;
LIST_INIT(&pconns->conn_list); LIST_INIT(&pconns->conn_list);
LIST_APPEND(&sess->priv_conns, &pconns->sess_el); LIST_APPEND(&sess->priv_conns, &pconns->sess_el);
@ -225,34 +219,25 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
return 1; return 1;
} }
/* Check that session <sess> is able to keep idle connection <conn>. This must /* Returns 0 if the session can keep the idle conn, -1 if it was destroyed. The
* be called each time a connection stored in a session becomes idle. * connection must be private.
*
* Returns 0 if the connection is kept, else non-zero if the connection was
* explicitely removed from session.
*/ */
static inline int session_check_idle_conn(struct session *sess, struct connection *conn) static inline int session_check_idle_conn(struct session *sess, struct connection *conn)
{ {
/* Connection must be attached to session prior to this function call. */ /* Another session owns this connection */
BUG_ON(!conn->owner || conn->owner != sess); if (conn->owner != sess)
/* Connection is not attached to a session. */
if (!conn->owner)
return 0; return 0;
/* Ensure conn is not already accounted as idle to prevent sess idle count excess increment. */
BUG_ON(conn->flags & CO_FL_SESS_IDLE);
if (sess->idle_conns >= sess->fe->max_out_conns) { if (sess->idle_conns >= sess->fe->max_out_conns) {
session_unown_conn(sess, conn); session_unown_conn(sess, conn);
conn->owner = NULL; conn->owner = NULL;
conn->flags &= ~CO_FL_SESS_IDLE;
conn->mux->destroy(conn->ctx);
return -1; return -1;
} } else {
else {
conn->flags |= CO_FL_SESS_IDLE; conn->flags |= CO_FL_SESS_IDLE;
sess->idle_conns++; sess->idle_conns++;
} }
return 0; return 0;
} }

View File

@ -258,7 +258,6 @@ struct ssl_sock_ctx {
unsigned long error_code; /* last error code of the error stack */ unsigned long error_code; /* last error code of the error stack */
struct buffer early_buf; /* buffer to store the early data received */ struct buffer early_buf; /* buffer to store the early data received */
int sent_early_data; /* Amount of early data we sent so far */ int sent_early_data; /* Amount of early data we sent so far */
int can_send_early_data; /* We did not start the handshake yet so we can send early data */
#ifdef USE_QUIC #ifdef USE_QUIC
struct quic_conn *qc; struct quic_conn *qc;

View File

@ -64,7 +64,7 @@
/* currently updated and stored in time.c */ /* currently updated and stored in time.c */
extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */ extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
extern volatile unsigned int *global_now_ms; extern volatile unsigned int global_now_ms;
/* return 1 if tick is set, otherwise 0 */ /* return 1 if tick is set, otherwise 0 */
static inline int tick_isset(int expire) static inline int tick_isset(int expire)

View File

@ -1178,100 +1178,6 @@ static inline void *my_realloc2(void *ptr, size_t size)
return ret; return ret;
} }
/* portable memalign(): tries to accommodate OS specificities, and may fall
* back to plain malloc() if not supported, meaning that alignment guarantees
* are only a performance bonus but not granted. The caller is responsible for
* guaranteeing that the requested alignment is at least sizeof(void*) and a
* power of two. If uncertain, use ha_aligned_alloc() instead. The pointer
* needs to be passed to ha_aligned_free() for freeing (due to cygwin). Please
* use ha_aligned_alloc() instead (which does perform accounting).
*/
static inline void *_ha_aligned_alloc(size_t alignment, size_t size)
{
/* let's consider that most OSes have posix_memalign() and make the
* exception for the other ones. This way if an OS fails to build,
* we'll know about it and handle it as a new exception instead of
* relying on old fallbacks that may break (e.g. most BSDs have
* dropped memalign()).
*/
#if defined(_WIN32)
/* MINGW (Cygwin) uses _aligned_malloc() */
return _aligned_malloc(size, alignment);
#elif _POSIX_VERSION < 200112L || defined(__sun)
/* Old OSes or Solaris */
return memalign(alignment, size);
#else
void *ret;
/* most BSD, Linux since glibc 2.2, Solaris 11 */
if (posix_memalign(&ret, alignment, size) == 0)
return ret;
else
return NULL;
#endif
}
/* Like above but zeroing the area */
static inline void *_ha_aligned_zalloc(size_t alignment, size_t size)
{
void *ret = _ha_aligned_alloc(alignment, size);
if (ret)
memset(ret, 0, size);
return ret;
}
/* portable memalign(): tries to accommodate OS specificities, and may fall
* back to plain malloc() if not supported, meaning that alignment guarantees
* are only a performance bonus but not granted. The size will automatically be
* rounded up to the next power of two and set to a minimum of sizeof(void*).
* The checks are cheap and generally optimized away by the compiler since most
* input arguments are build time constants. The pointer needs to be passed to
* ha_aligned_free() for freeing (due to cygwin). Please use
* ha_aligned_alloc_safe() instead (which does perform accounting).
*/
static inline void *_ha_aligned_alloc_safe(size_t alignment, size_t size)
{
if (unlikely(alignment < sizeof(void*)))
alignment = sizeof(void*);
else if (unlikely(alignment & (alignment - 1))) {
/* not power of two! round up to next power of two by filling
* all LSB in O(log(log(N))) then increment the result.
*/
int shift = 1;
do {
alignment |= alignment >> shift;
shift *= 2;
} while (unlikely(alignment & (alignment + 1)));
alignment++;
}
return _ha_aligned_alloc(alignment, size);
}
/* Like above but zeroing the area */
static inline void *_ha_aligned_zalloc_safe(size_t alignment, size_t size)
{
void *ret = _ha_aligned_alloc_safe(alignment, size);
if (ret)
memset(ret, 0, size);
return ret;
}
/* To be used to free a pointer returned by _ha_aligned_alloc() or
* _ha_aligned_alloc_safe(). Please use ha_aligned_free() instead
* (which does perform accounting).
*/
static inline void _ha_aligned_free(void *ptr)
{
#if defined(_WIN32)
return _aligned_free(ptr);
#else
free(ptr);
#endif
}
int parse_dotted_uints(const char *s, unsigned int **nums, size_t *sz); int parse_dotted_uints(const char *s, unsigned int **nums, size_t *sz);
/* PRNG */ /* PRNG */

View File

@ -1,56 +0,0 @@
varnishtest "le2dec converter Test"
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.0-dev0)'"
feature ignore_unknown_macro
server s1 {
rxreq
txresp -hdr "Connection: close"
} -repeat 3 -start
haproxy h1 -conf {
defaults
mode http
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
frontend fe
bind "fd@${fe}"
#### requests
http-request set-var(txn.input) req.hdr(input)
http-response set-header le2dec-1 "%[var(txn.input),le2dec(:,1)]"
http-response set-header le2dec-2 "%[var(txn.input),le2dec(-,3)]"
http-response set-header le2dec-3 "%[var(txn.input),le2dec(::,3,1)]"
default_backend be
backend be
server s1 ${s1_addr}:${s1_port}
} -start
client c1 -connect ${h1_fe_sock} {
txreq -url "/" \
-hdr "input:"
rxresp
expect resp.status == 200
expect resp.http.le2dec-1 == ""
expect resp.http.le2dec-2 == ""
expect resp.http.le2dec-3 == ""
txreq -url "/" \
-hdr "input: 0123456789"
rxresp
expect resp.status == 200
expect resp.http.le2dec-1 == "48:49:50:51:52:53:54:55:56:57"
expect resp.http.le2dec-2 == "3289392-3486771-3684150-57"
expect resp.http.le2dec-3 == "3289392::3486771::3684150"
txreq -url "/" \
-hdr "input: abcdefghijklmnopqrstuvwxyz"
rxresp
expect resp.status == 200
expect resp.http.le2dec-1 == "97:98:99:100:101:102:103:104:105:106:107:108:109:110:111:112:113:114:115:116:117:118:119:120:121:122"
expect resp.http.le2dec-2 == "6513249-6710628-6908007-7105386-7302765-7500144-7697523-7894902-31353"
expect resp.http.le2dec-3 == "6513249::6710628::6908007::7105386::7302765::7500144::7697523::7894902"
} -run

View File

@ -34,111 +34,9 @@
#include <haproxy/ssl_sock.h> #include <haproxy/ssl_sock.h>
#include <haproxy/ssl_utils.h> #include <haproxy/ssl_utils.h>
#include <haproxy/tools.h> #include <haproxy/tools.h>
#include <haproxy/trace.h>
#define TRACE_SOURCE &trace_acme
#if defined(HAVE_ACME) #if defined(HAVE_ACME)
static void acme_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
const struct ist where, const struct ist func,
const void *a1, const void *a2, const void *a3, const void *a4);
static const struct trace_event acme_trace_events[] = {
{ .mask = ACME_EV_SCHED, .name = "acme_sched", .desc = "Wakeup scheduled ACME task" },
{ .mask = ACME_EV_NEW, .name = "acme_new", .desc = "New ACME task" },
{ .mask = ACME_EV_TASK, .name = "acme_task", .desc = "ACME task" },
{ }
};
static const struct name_desc acme_trace_lockon_args[4] = {
/* arg1 */ { .name="acme_ctx", .desc="ACME context" },
/* arg2 */ { },
/* arg3 */ { },
/* arg4 */ { }
};
static const struct name_desc acme_trace_decoding[] = {
{ .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
{ .name="minimal", .desc="report only conn, no real decoding" },
{ .name="simple", .desc="add error messages" },
{ .name="advanced", .desc="add handshake-related details" },
{ .name="complete", .desc="add full data dump when available" },
{ /* end */ }
};
struct trace_source trace_acme = {
.name = IST("acme"),
.desc = "ACME",
.arg_def = TRC_ARG_PRIV,
.default_cb = acme_trace,
.known_events = acme_trace_events,
.lockon_args = acme_trace_lockon_args,
.decoding = acme_trace_decoding,
.report_events = ~0, /* report everything by default */
};
INITCALL1(STG_REGISTER, trace_register_source, &trace_acme);
static void acme_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
const struct ist where, const struct ist func,
const void *a1, const void *a2, const void *a3, const void *a4)
{
const struct acme_ctx *ctx = a1;
if (src->verbosity <= ACME_VERB_CLEAN)
return;
chunk_appendf(&trace_buf, " :");
if (mask >= ACME_EV_NEW)
chunk_appendf(&trace_buf, " acme_ctx=%p", ctx);
if (mask == ACME_EV_NEW)
chunk_appendf(&trace_buf, ", crt=%s", ctx->store->path);
if (mask >= ACME_EV_TASK) {
switch (ctx->http_state) {
case ACME_HTTP_REQ:
chunk_appendf(&trace_buf, ", http_st: ACME_HTTP_REQ");
break;
case ACME_HTTP_RES:
chunk_appendf(&trace_buf, ", http_st: ACME_HTTP_RES");
break;
}
chunk_appendf(&trace_buf, ", st: ");
switch (ctx->state) {
case ACME_RESOURCES: chunk_appendf(&trace_buf, "ACME_RESOURCES"); break;
case ACME_NEWNONCE: chunk_appendf(&trace_buf, "ACME_NEWNONCE"); break;
case ACME_CHKACCOUNT: chunk_appendf(&trace_buf, "ACME_CHKACCOUNT"); break;
case ACME_NEWACCOUNT: chunk_appendf(&trace_buf, "ACME_NEWACCOUNT"); break;
case ACME_NEWORDER: chunk_appendf(&trace_buf, "ACME_NEWORDER"); break;
case ACME_AUTH: chunk_appendf(&trace_buf, "ACME_AUTH"); break;
case ACME_CHALLENGE: chunk_appendf(&trace_buf, "ACME_CHALLENGE"); break;
case ACME_CHKCHALLENGE: chunk_appendf(&trace_buf, "ACME_CHKCHALLENGE"); break;
case ACME_FINALIZE: chunk_appendf(&trace_buf, "ACME_FINALIZE"); break;
case ACME_CHKORDER: chunk_appendf(&trace_buf, "ACME_CHKORDER"); break;
case ACME_CERTIFICATE: chunk_appendf(&trace_buf, "ACME_CERTIFICATE"); break;
case ACME_END: chunk_appendf(&trace_buf, "ACME_END"); break;
}
}
if (mask & (ACME_EV_REQ|ACME_EV_RES)) {
const struct ist *url = a2;
const struct buffer *buf = a3;
if (mask & ACME_EV_REQ)
chunk_appendf(&trace_buf, " url: %.*s", (int)url->len, url->ptr);
if (src->verbosity >= ACME_VERB_COMPLETE && level >= TRACE_LEVEL_DATA) {
chunk_appendf(&trace_buf, " Buffer Dump:\n");
chunk_appendf(&trace_buf, "%.*s", (int)buf->data, buf->area);
}
}
}
struct mt_list acme_tasks = MT_LIST_HEAD_INIT(acme_tasks); struct mt_list acme_tasks = MT_LIST_HEAD_INIT(acme_tasks);
@ -190,7 +88,7 @@ struct acme_cfg *new_acme_cfg(const char *name)
/* 0 on the linenum just mean it was not initialized yet */ /* 0 on the linenum just mean it was not initialized yet */
ret->linenum = 0; ret->linenum = 0;
ret->challenge = strdup("http-01"); /* default value */ ret->challenge = strdup("HTTP-01"); /* default value */
/* The default generated keys are EC-384 */ /* The default generated keys are EC-384 */
ret->key.type = EVP_PKEY_EC; ret->key.type = EVP_PKEY_EC;
@ -408,8 +306,8 @@ static int cfg_parse_acme_kws(char **args, int section_type, struct proxy *curpx
goto out; goto out;
} }
} else if (strcmp(args[0], "challenge") == 0) { } else if (strcmp(args[0], "challenge") == 0) {
if ((!*args[1]) || (strcasecmp("http-01", args[1]) != 0 && (strcasecmp("dns-01", args[1]) != 0))) { if ((!*args[1]) || (strcmp("HTTP-01", args[1]) != 0 && (strcmp("DNS-01", args[1]) != 0))) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires a challenge type: http-01 or dns-01\n", file, linenum, args[0], cursection); ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires a challenge type: HTTP-01 or DNS-01\n", file, linenum, args[0], cursection);
err_code |= ERR_ALERT | ERR_FATAL; err_code |= ERR_ALERT | ERR_FATAL;
goto out; goto out;
} }
@ -755,7 +653,6 @@ static void acme_ctx_destroy(struct acme_ctx *ctx)
istfree(&auth->auth); istfree(&auth->auth);
istfree(&auth->chall); istfree(&auth->chall);
istfree(&auth->token); istfree(&auth->token);
istfree(&auth->token);
next = auth->next; next = auth->next;
free(auth); free(auth);
auth = next; auth = next;
@ -891,43 +788,6 @@ int acme_http_req(struct task *task, struct acme_ctx *ctx, struct ist url, enum
} }
/*
* compute a TXT record for dns-01 challenge
* base64url(sha256(token || '.' || base64url(Thumbprint(accountKey))))
*
* https://datatracker.ietf.org/doc/html/rfc8555/#section-8.4
*
*/
unsigned int acme_txt_record(const struct ist thumbprint, const struct ist token, struct buffer *output)
{
unsigned char md[EVP_MAX_MD_SIZE];
struct buffer *tmp = NULL;
unsigned int size;
int ret = 0;
if ((tmp = alloc_trash_chunk()) == NULL)
goto out;
chunk_istcat(tmp, token);
chunk_appendf(tmp, ".");
chunk_istcat(tmp, thumbprint);
if (EVP_Digest(tmp->area, tmp->data, md, &size, EVP_sha256(), NULL) == 0)
goto out;
ret = a2base64url((const char *)md, size, output->area, output->size);
if (ret < 0)
ret = 0;
output->data = ret;
out:
free_trash_chunk(tmp);
return ret;
}
int acme_jws_payload(struct buffer *req, struct ist nonce, struct ist url, EVP_PKEY *pkey, struct ist kid, struct buffer *output, char **errmsg) int acme_jws_payload(struct buffer *req, struct ist nonce, struct ist url, EVP_PKEY *pkey, struct ist kid, struct buffer *output, char **errmsg)
{ {
struct buffer *b64payload = NULL; struct buffer *b64payload = NULL;
@ -1070,8 +930,6 @@ int acme_res_certificate(struct task *task, struct acme_ctx *ctx, char **errmsg)
} }
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if (hc->res.status < 200 || hc->res.status >= 300) { if (hc->res.status < 200 || hc->res.status >= 300) {
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1) if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
t1->data = ret; t1->data = ret;
@ -1143,8 +1001,6 @@ int acme_res_chkorder(struct task *task, struct acme_ctx *ctx, char **errmsg)
} }
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if (hc->res.status < 200 || hc->res.status >= 300) { if (hc->res.status < 200 || hc->res.status >= 300) {
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1) if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
t1->data = ret; t1->data = ret;
@ -1274,8 +1130,6 @@ int acme_res_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
} }
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if (hc->res.status < 200 || hc->res.status >= 300) { if (hc->res.status < 200 || hc->res.status >= 300) {
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1) if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
t1->data = ret; t1->data = ret;
@ -1320,13 +1174,9 @@ int acme_req_challenge(struct task *task, struct acme_ctx *ctx, struct acme_auth
chunk_printf(req_in, "{}"); chunk_printf(req_in, "{}");
TRACE_DATA("REQ challenge dec", ACME_EV_REQ, ctx, &auth->chall, req_in);
if (acme_jws_payload(req_in, ctx->nonce, auth->chall, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0) if (acme_jws_payload(req_in, ctx->nonce, auth->chall, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
goto error; goto error;
TRACE_DATA("REQ challenge enc", ACME_EV_REQ, ctx, &auth->chall, req_out);
if (acme_http_req(task, ctx, auth->chall, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data))) if (acme_http_req(task, ctx, auth->chall, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
goto error; goto error;
@ -1361,8 +1211,6 @@ enum acme_ret acme_res_challenge(struct task *task, struct acme_ctx *ctx, struct
hdrs = hc->res.hdrs; hdrs = hc->res.hdrs;
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
for (hdr = hdrs; isttest(hdr->v); hdr++) { for (hdr = hdrs; isttest(hdr->v); hdr++) {
if (isteqi(hdr->n, ist("Replay-Nonce"))) { if (isteqi(hdr->n, ist("Replay-Nonce"))) {
istfree(&ctx->nonce); istfree(&ctx->nonce);
@ -1436,14 +1284,10 @@ int acme_post_as_get(struct task *task, struct acme_ctx *ctx, struct ist url, ch
if ((req_out = alloc_trash_chunk()) == NULL) if ((req_out = alloc_trash_chunk()) == NULL)
goto error_alloc; goto error_alloc;
TRACE_USER("POST-as-GET ", ACME_EV_REQ, ctx, &url);
/* empty payload */ /* empty payload */
if (acme_jws_payload(req_in, ctx->nonce, url, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0) if (acme_jws_payload(req_in, ctx->nonce, url, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
goto error_jws; goto error_jws;
TRACE_DATA("POST-as-GET enc", ACME_EV_REQ, ctx, &url, req_out);
if (acme_http_req(task, ctx, url, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data))) if (acme_http_req(task, ctx, url, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
goto error_http; goto error_http;
@ -1498,7 +1342,6 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
} }
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if (hc->res.status < 200 || hc->res.status >= 300) { if (hc->res.status < 200 || hc->res.status >= 300) {
/* XXX: need a generic URN error parser */ /* XXX: need a generic URN error parser */
@ -1513,23 +1356,6 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
goto error; goto error;
} }
/* check and save the DNS entry */
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.identifier.type", t1->area, t1->size);
if (ret == -1) {
memprintf(errmsg, "couldn't get a type \"dns\" from Authorization URL \"%s\"", auth->auth.ptr);
goto error;
}
t1->data = ret;
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.identifier.value", t2->area, t2->size);
if (ret == -1) {
memprintf(errmsg, "couldn't get a type \"dns\" from Authorization URL \"%s\"", auth->auth.ptr);
goto error;
}
t2->data = ret;
auth->dns = istdup(ist2(t2->area, t2->data));
/* get the multiple challenges and select the one from the configuration */ /* get the multiple challenges and select the one from the configuration */
for (i = 0; ; i++) { for (i = 0; ; i++) {
int ret; int ret;
@ -1579,35 +1405,6 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
goto error; goto error;
} }
/* compute a response for the TXT entry */
if (strcasecmp(ctx->cfg->challenge, "dns-01") == 0) {
struct sink *dpapi;
struct ist line[7];
if (acme_txt_record(ist(ctx->cfg->account.thumbprint), auth->token, &trash) == 0) {
memprintf(errmsg, "couldn't compute the dns-01 challenge");
goto error;
}
send_log(NULL, LOG_NOTICE,"acme: %s: dns-01 requires to set the \"_acme-challenge.%.*s\" TXT record to \"%.*s\" and use the \"acme challenge_ready\" command over the CLI\n",
ctx->store->path, (int)auth->dns.len, auth->dns.ptr, (int)trash.data, trash.area);
/* dump to the "dpapi" sink */
line[0] = ist("acme deploy ");
line[1] = ist(ctx->store->path);
line[2] = ist(" thumbprint ");
line[3] = ist(ctx->cfg->account.thumbprint);
line[4] = ist("\n");
line[5] = ist2( hc->res.buf.area, hc->res.buf.data); /* dump the HTTP response */
line[6] = ist("\n\0");
dpapi = sink_find("dpapi");
if (dpapi)
sink_write(dpapi, LOG_HEADER_NONE, 0, line, 7);
}
/* only useful for http-01 */
if (acme_add_challenge_map(ctx->cfg->map, auth->token.ptr, ctx->cfg->account.thumbprint, errmsg) != 0) { if (acme_add_challenge_map(ctx->cfg->map, auth->token.ptr, ctx->cfg->account.thumbprint, errmsg) != 0) {
memprintf(errmsg, "couldn't add the token to the '%s' map: %s", ctx->cfg->map, *errmsg); memprintf(errmsg, "couldn't add the token to the '%s' map: %s", ctx->cfg->map, *errmsg);
goto error; goto error;
@ -1658,13 +1455,10 @@ int acme_req_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
chunk_appendf(req_in, " ] }"); chunk_appendf(req_in, " ] }");
TRACE_DATA("NewOrder Decode", ACME_EV_REQ, ctx, &ctx->resources.newOrder, req_in);
if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newOrder, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0) if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newOrder, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
goto error; goto error;
TRACE_DATA("NewOrder JWS ", ACME_EV_REQ, ctx, &ctx->resources.newOrder, req_out);
if (acme_http_req(task, ctx, ctx->resources.newOrder, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data))) if (acme_http_req(task, ctx, ctx->resources.newOrder, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
goto error; goto error;
@ -1713,7 +1507,6 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
ctx->order = istdup(hdr->v); ctx->order = istdup(hdr->v);
} }
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if (hc->res.status < 200 || hc->res.status >= 300) { if (hc->res.status < 200 || hc->res.status >= 300) {
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1) if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
@ -1757,11 +1550,6 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
goto error; goto error;
} }
/* if the challenge is not dns-01, consider that the challenge
* is ready because computed by HAProxy */
if (strcasecmp(ctx->cfg->challenge, "dns-01") != 0)
auth->ready = 1;
auth->next = ctx->auths; auth->next = ctx->auths;
ctx->auths = auth; ctx->auths = auth;
ctx->next_auth = auth; ctx->next_auth = auth;
@ -1822,8 +1610,6 @@ int acme_req_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
else else
chunk_printf(req_in, "%s", accountreq); chunk_printf(req_in, "%s", accountreq);
TRACE_DATA("newAccount Decoded", ACME_EV_REQ, ctx, &ctx->resources.newAccount, req_in);
if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newAccount, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0) if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newAccount, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
goto error; goto error;
@ -1873,8 +1659,6 @@ int acme_res_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
} }
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if (hc->res.status < 200 || hc->res.status >= 300) { if (hc->res.status < 200 || hc->res.status >= 300) {
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1) if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
t1->data = ret; t1->data = ret;
@ -1921,8 +1705,6 @@ int acme_nonce(struct task *task, struct acme_ctx *ctx, char **errmsg)
goto error; goto error;
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
hdrs = hc->res.hdrs; hdrs = hc->res.hdrs;
for (hdr = hdrs; isttest(hdr->v); hdr++) { for (hdr = hdrs; isttest(hdr->v); hdr++) {
@ -1961,8 +1743,6 @@ int acme_directory(struct task *task, struct acme_ctx *ctx, char **errmsg)
goto error; goto error;
} }
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.newNonce", trash.area, trash.size)) <= 0) { if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.newNonce", trash.area, trash.size)) <= 0) {
memprintf(errmsg, "couldn't get newNonce URL from the directory URL"); memprintf(errmsg, "couldn't get newNonce URL from the directory URL");
goto error; goto error;
@ -2026,7 +1806,6 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
struct mt_list tmp = MT_LIST_LOCK_FULL(&ctx->el); struct mt_list tmp = MT_LIST_LOCK_FULL(&ctx->el);
re: re:
TRACE_USER("ACME Task Handle", ACME_EV_TASK, ctx, &st);
switch (st) { switch (st) {
case ACME_RESOURCES: case ACME_RESOURCES:
@ -2120,11 +1899,6 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
break; break;
case ACME_CHALLENGE: case ACME_CHALLENGE:
if (http_st == ACME_HTTP_REQ) { if (http_st == ACME_HTTP_REQ) {
/* if the challenge is not ready, wait to be wakeup */
if (!ctx->next_auth->ready)
goto wait;
if (acme_req_challenge(task, ctx, ctx->next_auth, &errmsg) != 0) if (acme_req_challenge(task, ctx, ctx->next_auth, &errmsg) != 0)
goto retry; goto retry;
} }
@ -2225,8 +1999,6 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
/* this is called when changing step in the state machine */ /* this is called when changing step in the state machine */
http_st = ACME_HTTP_REQ; http_st = ACME_HTTP_REQ;
ctx->retries = ACME_RETRY; /* reinit the retries */ ctx->retries = ACME_RETRY; /* reinit the retries */
ctx->http_state = http_st;
ctx->state = st;
if (ctx->retryafter == 0) if (ctx->retryafter == 0)
goto re; /* optimize by not leaving the task for the next httpreq to init */ goto re; /* optimize by not leaving the task for the next httpreq to init */
@ -2234,6 +2006,8 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
/* if we have a retryafter, wait before next request (usually finalize) */ /* if we have a retryafter, wait before next request (usually finalize) */
task->expire = tick_add(now_ms, ctx->retryafter * 1000); task->expire = tick_add(now_ms, ctx->retryafter * 1000);
ctx->retryafter = 0; ctx->retryafter = 0;
ctx->http_state = http_st;
ctx->state = st;
MT_LIST_UNLOCK_FULL(&ctx->el, tmp); MT_LIST_UNLOCK_FULL(&ctx->el, tmp);
return task; return task;
@ -2281,16 +2055,8 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
task = NULL; task = NULL;
return task; return task;
wait:
/* wait for a task_wakeup */
ctx->http_state = ACME_HTTP_REQ;
ctx->state = st;
task->expire = TICK_ETERNITY;
MT_LIST_UNLOCK_FULL(&ctx->el, tmp);
return task;
} }
/* /*
* Return 1 if the certificate must be regenerated * Return 1 if the certificate must be regenerated
* Check if the notAfter date will append in (validity period / 12) or 7 days per default * Check if the notAfter date will append in (validity period / 12) or 7 days per default
@ -2367,7 +2133,6 @@ struct task *acme_scheduler(struct task *task, void *context, unsigned int state
if (store->conf.acme.id) { if (store->conf.acme.id) {
if (acme_will_expire(store)) { if (acme_will_expire(store)) {
TRACE_USER("ACME Scheduling start", ACME_EV_SCHED);
if (acme_start_task(store, &errmsg) != 0) { if (acme_start_task(store, &errmsg) != 0) {
send_log(NULL, LOG_NOTICE,"acme: %s: %s Aborting.\n", store->path, errmsg ? errmsg : ""); send_log(NULL, LOG_NOTICE,"acme: %s: %s Aborting.\n", store->path, errmsg ? errmsg : "");
ha_free(&errmsg); ha_free(&errmsg);
@ -2556,14 +2321,12 @@ static int acme_start_task(struct ckch_store *store, char **errmsg)
ctx->store = newstore; ctx->store = newstore;
ctx->cfg = cfg; ctx->cfg = cfg;
task->context = ctx; task->context = ctx;
ctx->task = task;
MT_LIST_INIT(&ctx->el); MT_LIST_INIT(&ctx->el);
MT_LIST_APPEND(&acme_tasks, &ctx->el); MT_LIST_APPEND(&acme_tasks, &ctx->el);
send_log(NULL, LOG_NOTICE, "acme: %s: Starting update of the certificate.\n", ctx->store->path); send_log(NULL, LOG_NOTICE, "acme: %s: Starting update of the certificate.\n", ctx->store->path);
TRACE_USER("ACME Task start", ACME_EV_NEW, ctx);
task_wakeup(task, TASK_WOKEN_INIT); task_wakeup(task, TASK_WOKEN_INIT);
return 0; return 0;
@ -2609,55 +2372,6 @@ static int cli_acme_renew_parse(char **args, char *payload, struct appctx *appct
return cli_dynerr(appctx, errmsg); return cli_dynerr(appctx, errmsg);
} }
static int cli_acme_chall_ready_parse(char **args, char *payload, struct appctx *appctx, void *private)
{
char *errmsg = NULL;
const char *crt;
const char *dns;
struct mt_list back;
struct acme_ctx *ctx;
struct acme_auth *auth;
int found = 0;
if (!*args[2] && !*args[3] && !*args[4]) {
memprintf(&errmsg, ": not enough parameters\n");
goto err;
}
crt = args[2];
dns = args[4];
MT_LIST_FOR_EACH_ENTRY_LOCKED(ctx, &acme_tasks, el, back) {
if (strcmp(ctx->store->path, crt) != 0)
continue;
auth = ctx->auths;
while (auth) {
if (strncmp(dns, auth->dns.ptr, auth->dns.len) == 0) {
if (!auth->ready) {
auth->ready = 1;
task_wakeup(ctx->task, TASK_WOKEN_MSG);
found = 1;
} else {
memprintf(&errmsg, "ACME challenge for crt \"%s\" and dns \"%s\" was already READY !\n", crt, dns);
}
break;
}
auth = auth->next;
}
}
if (!found) {
memprintf(&errmsg, "Couldn't find the ACME task using crt \"%s\" and dns \"%s\" !\n", crt, dns);
goto err;
}
return cli_msg(appctx, LOG_INFO, "Challenge Ready!");
err:
return cli_dynerr(appctx, errmsg);
}
static int cli_acme_status_io_handler(struct appctx *appctx) static int cli_acme_status_io_handler(struct appctx *appctx)
{ {
struct ebmb_node *node = NULL; struct ebmb_node *node = NULL;
@ -2740,7 +2454,6 @@ static int cli_acme_ps(char **args, char *payload, struct appctx *appctx, void *
static struct cli_kw_list cli_kws = {{ },{ static struct cli_kw_list cli_kws = {{ },{
{ { "acme", "renew", NULL }, "acme renew <certfile> : renew a certificate using the ACME protocol", cli_acme_renew_parse, NULL, NULL, NULL, 0 }, { { "acme", "renew", NULL }, "acme renew <certfile> : renew a certificate using the ACME protocol", cli_acme_renew_parse, NULL, NULL, NULL, 0 },
{ { "acme", "status", NULL }, "acme status : show status of certificates configured with ACME", cli_acme_ps, cli_acme_status_io_handler, NULL, NULL, 0 }, { { "acme", "status", NULL }, "acme status : show status of certificates configured with ACME", cli_acme_ps, cli_acme_status_io_handler, NULL, NULL, 0 },
{ { "acme", "challenge_ready", NULL }, "acme challenge_ready <certfile> domain <domain> : show status of certificates configured with ACME", cli_acme_chall_ready_parse, NULL, NULL, NULL, 0 },
{ { NULL }, NULL, NULL, NULL } { { NULL }, NULL, NULL, NULL }
}}; }};

View File

@ -29,7 +29,7 @@
unsigned int nb_applets = 0; unsigned int nb_applets = 0;
DECLARE_TYPED_POOL(pool_head_appctx, "appctx", struct appctx); DECLARE_POOL(pool_head_appctx, "appctx", sizeof(struct appctx));
/* trace source and events */ /* trace source and events */

View File

@ -1425,7 +1425,7 @@ struct connection *conn_backend_get(int reuse_mode,
if (reuse_mode == PR_O_REUSE_SAFE && conn->mux->flags & MX_FL_HOL_RISK) { if (reuse_mode == PR_O_REUSE_SAFE && conn->mux->flags & MX_FL_HOL_RISK) {
/* attach the connection to the session private list */ /* attach the connection to the session private list */
conn->owner = sess; conn->owner = sess;
session_add_conn(sess, conn); session_add_conn(sess, conn, conn->target);
} }
else { else {
srv_add_to_avail_list(srv, conn); srv_add_to_avail_list(srv, conn);
@ -2159,7 +2159,7 @@ int connect_server(struct stream *s)
(reuse_mode == PR_O_REUSE_SAFE && (reuse_mode == PR_O_REUSE_SAFE &&
srv_conn->mux->flags & MX_FL_HOL_RISK)) { srv_conn->mux->flags & MX_FL_HOL_RISK)) {
/* If it fail now, the same will be done in mux->detach() callback */ /* If it fail now, the same will be done in mux->detach() callback */
session_add_conn(s->sess, srv_conn); session_add_conn(s->sess, srv_conn, srv_conn->target);
} }
} }
} }

View File

@ -229,7 +229,7 @@ static struct list caches = LIST_HEAD_INIT(caches);
static struct list caches_config = LIST_HEAD_INIT(caches_config); /* cache config to init */ static struct list caches_config = LIST_HEAD_INIT(caches_config); /* cache config to init */
static struct cache *tmp_cache_config = NULL; static struct cache *tmp_cache_config = NULL;
DECLARE_STATIC_TYPED_POOL(pool_head_cache_st, "cache_st", struct cache_st); DECLARE_STATIC_POOL(pool_head_cache_st, "cache_st", sizeof(struct cache_st));
static struct eb32_node *insert_entry(struct cache *cache, struct cache_tree *tree, struct cache_entry *new_entry); static struct eb32_node *insert_entry(struct cache *cache, struct cache_tree *tree, struct cache_entry *new_entry);
static void delete_entry(struct cache_entry *del_entry); static void delete_entry(struct cache_entry *del_entry);

View File

@ -367,10 +367,8 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
if ((*args[2] && (!*args[3] || strcmp(args[2], "from") != 0)) || if ((*args[2] && (!*args[3] || strcmp(args[2], "from") != 0)) ||
alertif_too_many_args(3, file, linenum, args, &err_code)) { alertif_too_many_args(3, file, linenum, args, &err_code)) {
if (rc & PR_CAP_FE) { if (rc & PR_CAP_FE)
err_code |= ERR_ALERT | ERR_FATAL;
ha_alert("parsing [%s:%d] : please use the 'bind' keyword for listening addresses.\n", file, linenum); ha_alert("parsing [%s:%d] : please use the 'bind' keyword for listening addresses.\n", file, linenum);
}
goto out; goto out;
} }
} }

View File

@ -2824,9 +2824,10 @@ int check_config_validity()
* as some of the fields may be accessed soon * as some of the fields may be accessed soon
*/ */
MT_LIST_FOR_EACH_ENTRY_LOCKED(newsrv, &servers_list, global_list, back) { MT_LIST_FOR_EACH_ENTRY_LOCKED(newsrv, &servers_list, global_list, back) {
err_code |= srv_init(newsrv); if (srv_init(newsrv) & ERR_CODE) {
if (err_code & ERR_CODE) cfgerr++;
goto out; continue;
}
} }
/* starting to initialize the main proxies list */ /* starting to initialize the main proxies list */

View File

@ -29,10 +29,8 @@
struct timeval start_date; /* the process's start date in wall-clock time */ struct timeval start_date; /* the process's start date in wall-clock time */
struct timeval ready_date; /* date when the process was considered ready */ struct timeval ready_date; /* date when the process was considered ready */
ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */ ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
volatile ullong _global_now_ns; /* locally stored common monotonic date between all threads, in ns (wraps every 585 yr) */ volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
volatile ullong *global_now_ns; /* common monotonic date, may point to _global_now_ns or shared memory */ volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
volatile uint _global_now_ms; /* locally stored common monotonic date in milliseconds (may wrap) */
volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */ /* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */ THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
@ -240,7 +238,7 @@ void clock_update_local_date(int max_wait, int interrupted)
now_ns += ms_to_ns(max_wait); now_ns += ms_to_ns(max_wait);
/* consider the most recent known date */ /* consider the most recent known date */
now_ns = MAX(now_ns, HA_ATOMIC_LOAD(global_now_ns)); now_ns = MAX(now_ns, HA_ATOMIC_LOAD(&global_now_ns));
/* this event is rare, but it requires proper handling because if /* this event is rare, but it requires proper handling because if
* we just left now_ns where it was, the date will not be updated * we just left now_ns where it was, the date will not be updated
@ -271,8 +269,8 @@ void clock_update_global_date()
* realistic regarding the global date, which only moves forward, * realistic regarding the global date, which only moves forward,
* otherwise catch up. * otherwise catch up.
*/ */
old_now_ns = _HA_ATOMIC_LOAD(global_now_ns); old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
old_now_ms = _HA_ATOMIC_LOAD(global_now_ms); old_now_ms = _HA_ATOMIC_LOAD(&global_now_ms);
do { do {
if (now_ns < old_now_ns) if (now_ns < old_now_ns)
@ -301,8 +299,8 @@ void clock_update_global_date()
/* let's try to update the global_now_ns (both in nanoseconds /* let's try to update the global_now_ns (both in nanoseconds
* and ms forms) or loop again. * and ms forms) or loop again.
*/ */
} while ((!_HA_ATOMIC_CAS(global_now_ns, &old_now_ns, now_ns) || } while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(global_now_ms, &old_now_ms, now_ms))) && (now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
__ha_cpu_relax()); __ha_cpu_relax());
if (!th_ctx->curr_mono_time) { if (!th_ctx->curr_mono_time) {
@ -324,12 +322,11 @@ void clock_init_process_date(void)
th_ctx->prev_mono_time = th_ctx->curr_mono_time = before_poll_mono_ns; th_ctx->prev_mono_time = th_ctx->curr_mono_time = before_poll_mono_ns;
gettimeofday(&date, NULL); gettimeofday(&date, NULL);
after_poll = before_poll = date; after_poll = before_poll = date;
_global_now_ns = th_ctx->curr_mono_time; global_now_ns = th_ctx->curr_mono_time;
if (!_global_now_ns) // CLOCK_MONOTONIC not supported if (!global_now_ns) // CLOCK_MONOTONIC not supported
_global_now_ns = tv_to_ns(&date); global_now_ns = tv_to_ns(&date);
now_ns = _global_now_ns; now_ns = global_now_ns;
global_now_ms = ns_to_ms(now_ns);
_global_now_ms = ns_to_ms(now_ns);
/* force time to wrap 20s after boot: we first compute the time offset /* force time to wrap 20s after boot: we first compute the time offset
* that once applied to the wall-clock date will make the local time * that once applied to the wall-clock date will make the local time
@ -337,19 +334,14 @@ void clock_init_process_date(void)
* and will be used to recompute the local time, both of which will * and will be used to recompute the local time, both of which will
* match and continue from this shifted date. * match and continue from this shifted date.
*/ */
now_offset = sec_to_ns((uint)((uint)(-_global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC)); now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
_global_now_ns += now_offset; global_now_ns += now_offset;
now_ns = _global_now_ns; now_ns = global_now_ns;
now_ms = ns_to_ms(now_ns); now_ms = ns_to_ms(now_ns);
/* correct for TICK_ETNERITY (0) */ /* correct for TICK_ETNERITY (0) */
if (now_ms == TICK_ETERNITY) if (now_ms == TICK_ETERNITY)
now_ms++; now_ms++;
_global_now_ms = now_ms; global_now_ms = now_ms;
/* for now global_now_ms points to the process-local _global_now_ms */
global_now_ms = &_global_now_ms;
/* same goes for global_ns_ns */
global_now_ns = &_global_now_ns;
th_ctx->idle_pct = 100; th_ctx->idle_pct = 100;
clock_update_date(0, 1); clock_update_date(0, 1);
@ -364,16 +356,6 @@ void clock_adjust_now_offset(void)
HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date)); HA_ATOMIC_STORE(&now_offset, now_ns - tv_to_ns(&date));
} }
void clock_set_now_offset(llong ofs)
{
HA_ATOMIC_STORE(&now_offset, ofs);
}
llong clock_get_now_offset(void)
{
return HA_ATOMIC_LOAD(&now_offset);
}
/* must be called once per thread to initialize their thread-local variables. /* must be called once per thread to initialize their thread-local variables.
* Note that other threads might also be initializing and running in parallel. * Note that other threads might also be initializing and running in parallel.
*/ */
@ -382,7 +364,7 @@ void clock_init_thread_date(void)
gettimeofday(&date, NULL); gettimeofday(&date, NULL);
after_poll = before_poll = date; after_poll = before_poll = date;
now_ns = _HA_ATOMIC_LOAD(global_now_ns); now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
th_ctx->idle_pct = 100; th_ctx->idle_pct = 100;
th_ctx->prev_cpu_time = now_cpu_time(); th_ctx->prev_cpu_time = now_cpu_time();
th_ctx->prev_mono_time = now_mono_time(); th_ctx->prev_mono_time = now_mono_time();

View File

@ -156,7 +156,7 @@ int comp_append_algo(struct comp_algo **algos, const char *algo)
} }
#if defined(USE_ZLIB) || defined(USE_SLZ) #if defined(USE_ZLIB) || defined(USE_SLZ)
DECLARE_STATIC_TYPED_POOL(pool_comp_ctx, "comp_ctx", struct comp_ctx); DECLARE_STATIC_POOL(pool_comp_ctx, "comp_ctx", sizeof(struct comp_ctx));
/* /*
* Alloc the comp_ctx * Alloc the comp_ctx

View File

@ -38,11 +38,11 @@
#include <haproxy/xxhash.h> #include <haproxy/xxhash.h>
DECLARE_TYPED_POOL(pool_head_connection, "connection", struct connection, 0, 64); DECLARE_POOL(pool_head_connection, "connection", sizeof(struct connection));
DECLARE_TYPED_POOL(pool_head_conn_hash_node, "conn_hash_node", struct conn_hash_node); DECLARE_POOL(pool_head_conn_hash_node, "conn_hash_node", sizeof(struct conn_hash_node));
DECLARE_TYPED_POOL(pool_head_sockaddr, "sockaddr", struct sockaddr_storage); DECLARE_POOL(pool_head_sockaddr, "sockaddr", sizeof(struct sockaddr_storage));
DECLARE_TYPED_POOL(pool_head_pp_tlv_128, "pp_tlv_128", struct conn_tlv_list, HA_PP2_TLV_VALUE_128); DECLARE_POOL(pool_head_pp_tlv_128, "pp_tlv_128", sizeof(struct conn_tlv_list) + HA_PP2_TLV_VALUE_128);
DECLARE_TYPED_POOL(pool_head_pp_tlv_256, "pp_tlv_256", struct conn_tlv_list, HA_PP2_TLV_VALUE_256); DECLARE_POOL(pool_head_pp_tlv_256, "pp_tlv_256", sizeof(struct conn_tlv_list) + HA_PP2_TLV_VALUE_256);
struct idle_conns idle_conns[MAX_THREADS] = { }; struct idle_conns idle_conns[MAX_THREADS] = { };
struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, }; struct xprt_ops *registered_xprt[XPRT_ENTRIES] = { NULL, };
@ -117,7 +117,7 @@ int conn_create_mux(struct connection *conn, int *closed_connection)
} }
else if (conn->flags & CO_FL_PRIVATE) { else if (conn->flags & CO_FL_PRIVATE) {
/* If it fail now, the same will be done in mux->detach() callback */ /* If it fail now, the same will be done in mux->detach() callback */
session_add_conn(sess, conn); session_add_conn(sess, conn, conn->target);
} }
return 0; return 0;
fail: fail:

View File

@ -52,12 +52,12 @@ void counters_be_shared_drop(struct be_counters_shared *counters)
_counters_shared_drop(counters); _counters_shared_drop(counters);
} }
/* prepare shared counters pointer for a given <guid> object /* retrieved shared counters pointer for a given <guid> object
* <size> hint is expected to reflect the actual tg member size (fe/be) * <size> hint is expected to reflect the actual tg member size (fe/be)
* if <guid> is not set, then sharing is disabled * if <guid> is not set, then sharing is disabled
* Returns the pointer on success or NULL on failure * Returns the pointer on success or NULL on failure
*/ */
static int _counters_shared_prepare(struct counters_shared *shared, const struct guid_node *guid, size_t size) static int _counters_shared_init(struct counters_shared *shared, const struct guid_node *guid, size_t size)
{ {
int it = 0; int it = 0;
@ -85,11 +85,11 @@ static int _counters_shared_prepare(struct counters_shared *shared, const struct
/* prepare shared fe counters pointer for a given <guid> object */ /* prepare shared fe counters pointer for a given <guid> object */
int counters_fe_shared_prepare(struct fe_counters_shared *shared, const struct guid_node *guid) int counters_fe_shared_prepare(struct fe_counters_shared *shared, const struct guid_node *guid)
{ {
return _counters_shared_prepare((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg)); return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg));
} }
/* prepare shared be counters pointer for a given <guid> object */ /* prepare shared be counters pointer for a given <guid> object */
int counters_be_shared_prepare(struct be_counters_shared *shared, const struct guid_node *guid) int counters_be_shared_init(struct be_counters_shared *shared, const struct guid_node *guid)
{ {
return _counters_shared_prepare((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg)); return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg));
} }

View File

@ -39,8 +39,8 @@
static THREAD_LOCAL char *dns_msg_trash; static THREAD_LOCAL char *dns_msg_trash;
DECLARE_STATIC_TYPED_POOL(dns_session_pool, "dns_session", struct dns_session); DECLARE_STATIC_POOL(dns_session_pool, "dns_session", sizeof(struct dns_session));
DECLARE_STATIC_TYPED_POOL(dns_query_pool, "dns_query", struct dns_query); DECLARE_STATIC_POOL(dns_query_pool, "dns_query", sizeof(struct dns_query));
DECLARE_STATIC_POOL(dns_msg_buf, "dns_msg_buf", DNS_TCP_MSG_RING_MAX_SIZE); DECLARE_STATIC_POOL(dns_msg_buf, "dns_msg_buf", DNS_TCP_MSG_RING_MAX_SIZE);
/* Opens an UDP socket on the namesaver's IP/Port, if required. Returns 0 on /* Opens an UDP socket on the namesaver's IP/Port, if required. Returns 0 on

View File

@ -32,7 +32,7 @@ int init_buffer()
int done; int done;
int i; int i;
pool_head_buffer = create_aligned_pool("buffer", global.tune.bufsize, 64, MEM_F_SHARED|MEM_F_EXACT); pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
if (!pool_head_buffer) if (!pool_head_buffer)
return 0; return 0;

View File

@ -58,10 +58,10 @@ struct event_hdl_async_task_default_ctx
}; };
/* memory pools declarations */ /* memory pools declarations */
DECLARE_STATIC_TYPED_POOL(pool_head_sub, "ehdl_sub", struct event_hdl_sub); DECLARE_STATIC_POOL(pool_head_sub, "ehdl_sub", sizeof(struct event_hdl_sub));
DECLARE_STATIC_TYPED_POOL(pool_head_sub_event, "ehdl_sub_e", struct event_hdl_async_event); DECLARE_STATIC_POOL(pool_head_sub_event, "ehdl_sub_e", sizeof(struct event_hdl_async_event));
DECLARE_STATIC_TYPED_POOL(pool_head_sub_event_data, "ehdl_sub_ed", struct event_hdl_async_event_data); DECLARE_STATIC_POOL(pool_head_sub_event_data, "ehdl_sub_ed", sizeof(struct event_hdl_async_event_data));
DECLARE_STATIC_TYPED_POOL(pool_head_sub_taskctx, "ehdl_sub_tctx", struct event_hdl_async_task_default_ctx); DECLARE_STATIC_POOL(pool_head_sub_taskctx, "ehdl_sub_tctx", sizeof(struct event_hdl_async_task_default_ctx));
/* global event_hdl tunables (public variable) */ /* global event_hdl tunables (public variable) */
struct event_hdl_tune event_hdl_tune; struct event_hdl_tune event_hdl_tune;

View File

@ -35,9 +35,9 @@ static struct fcgi_app *fcgi_apps = NULL;
struct flt_ops fcgi_flt_ops; struct flt_ops fcgi_flt_ops;
const char *fcgi_flt_id = "FCGI filter"; const char *fcgi_flt_id = "FCGI filter";
DECLARE_STATIC_TYPED_POOL(pool_head_fcgi_flt_ctx, "fcgi_flt_ctx", struct fcgi_flt_ctx); DECLARE_STATIC_POOL(pool_head_fcgi_flt_ctx, "fcgi_flt_ctx", sizeof(struct fcgi_flt_ctx));
DECLARE_STATIC_TYPED_POOL(pool_head_fcgi_param_rule, "fcgi_param_rule", struct fcgi_param_rule); DECLARE_STATIC_POOL(pool_head_fcgi_param_rule, "fcgi_param_rule", sizeof(struct fcgi_param_rule));
DECLARE_STATIC_TYPED_POOL(pool_head_fcgi_hdr_rule, "fcgi_hdr_rule", struct fcgi_hdr_rule); DECLARE_STATIC_POOL(pool_head_fcgi_hdr_rule, "fcgi_hdr_rule", sizeof(struct fcgi_hdr_rule));
/**************************************************************************/ /**************************************************************************/
/***************************** Uitls **************************************/ /***************************** Uitls **************************************/
@ -290,7 +290,7 @@ static int fcgi_flt_start(struct stream *s, struct filter *filter)
static void fcgi_flt_stop(struct stream *s, struct filter *filter) static void fcgi_flt_stop(struct stream *s, struct filter *filter)
{ {
struct fcgi_flt_ctx *fcgi_ctx = filter->ctx; struct flt_fcgi_ctx *fcgi_ctx = filter->ctx;
if (!fcgi_ctx) if (!fcgi_ctx)
return; return;

View File

@ -117,6 +117,7 @@ THREAD_LOCAL int poller_rd_pipe = -1; // Pipe to wake the thread
int poller_wr_pipe[MAX_THREADS] __read_mostly; // Pipe to wake the threads int poller_wr_pipe[MAX_THREADS] __read_mostly; // Pipe to wake the threads
volatile int ha_used_fds = 0; // Number of FD we're currently using volatile int ha_used_fds = 0; // Number of FD we're currently using
static struct fdtab *fdtab_addr; /* address of the allocated area containing fdtab */
/* adds fd <fd> to fd list <list> if it was not yet in it */ /* adds fd <fd> to fd list <list> if it was not yet in it */
void fd_add_to_fd_list(volatile struct fdlist *list, int fd) void fd_add_to_fd_list(volatile struct fdlist *list, int fd)
@ -1165,12 +1166,14 @@ int init_pollers()
int p; int p;
struct poller *bp; struct poller *bp;
/* always provide an aligned fdtab */ if ((fdtab_addr = calloc(1, global.maxsock * sizeof(*fdtab) + 64)) == NULL) {
if ((fdtab = ha_aligned_zalloc(64, global.maxsock * sizeof(*fdtab))) == NULL) {
ha_alert("Not enough memory to allocate %d entries for fdtab!\n", global.maxsock); ha_alert("Not enough memory to allocate %d entries for fdtab!\n", global.maxsock);
goto fail_tab; goto fail_tab;
} }
vma_set_name(fdtab, global.maxsock * sizeof(*fdtab), "fd", "fdtab"); vma_set_name(fdtab_addr, global.maxsock * sizeof(*fdtab) + 64, "fd", "fdtab_addr");
/* always provide an aligned fdtab */
fdtab = (struct fdtab*)((((size_t)fdtab_addr) + 63) & -(size_t)64);
if ((polled_mask = calloc(global.maxsock, sizeof(*polled_mask))) == NULL) { if ((polled_mask = calloc(global.maxsock, sizeof(*polled_mask))) == NULL) {
ha_alert("Not enough memory to allocate %d entries for polled_mask!\n", global.maxsock); ha_alert("Not enough memory to allocate %d entries for polled_mask!\n", global.maxsock);
@ -1211,7 +1214,7 @@ int init_pollers()
fail_info: fail_info:
free(polled_mask); free(polled_mask);
fail_polledmask: fail_polledmask:
ha_aligned_free(fdtab); free(fdtab_addr);
fail_tab: fail_tab:
return 0; return 0;
} }
@ -1232,7 +1235,7 @@ void deinit_pollers() {
} }
ha_free(&fdinfo); ha_free(&fdinfo);
ha_aligned_free(fdtab); ha_free(&fdtab_addr);
ha_free(&polled_mask); ha_free(&polled_mask);
} }

View File

@ -30,7 +30,7 @@
#define TRACE_SOURCE &trace_strm #define TRACE_SOURCE &trace_strm
/* Pool used to allocate filters */ /* Pool used to allocate filters */
DECLARE_STATIC_TYPED_POOL(pool_head_filter, "filter", struct filter); DECLARE_STATIC_POOL(pool_head_filter, "filter", sizeof(struct filter));
static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret); static int handle_analyzer_result(struct stream *s, struct channel *chn, unsigned int an_bit, int ret);

View File

@ -65,7 +65,7 @@ struct bwlim_state {
/* Pools used to allocate comp_state structs */ /* Pools used to allocate comp_state structs */
DECLARE_STATIC_TYPED_POOL(pool_head_bwlim_state, "bwlim_state", struct bwlim_state); DECLARE_STATIC_POOL(pool_head_bwlim_state, "bwlim_state", sizeof(struct bwlim_state));
/* Apply the bandwidth limitation of the filter <filter>. <len> is the maximum /* Apply the bandwidth limitation of the filter <filter>. <len> is the maximum

View File

@ -42,7 +42,7 @@ struct comp_state {
}; };
/* Pools used to allocate comp_state structs */ /* Pools used to allocate comp_state structs */
DECLARE_STATIC_TYPED_POOL(pool_head_comp_state, "comp_state", struct comp_state); DECLARE_STATIC_POOL(pool_head_comp_state, "comp_state", sizeof(struct comp_state));
static THREAD_LOCAL struct buffer tmpbuf; static THREAD_LOCAL struct buffer tmpbuf;
static THREAD_LOCAL struct buffer zbuf; static THREAD_LOCAL struct buffer zbuf;

View File

@ -245,8 +245,8 @@ int curpxopts;
int curpxopts2; int curpxopts2;
/* Pools used to allocate SPOE structs */ /* Pools used to allocate SPOE structs */
DECLARE_STATIC_TYPED_POOL(pool_head_spoe_ctx, "spoe_ctx", struct spoe_context); DECLARE_STATIC_POOL(pool_head_spoe_ctx, "spoe_ctx", sizeof(struct spoe_context));
DECLARE_STATIC_TYPED_POOL(pool_head_spoe_appctx, "spoe_appctx", struct spoe_appctx); DECLARE_STATIC_POOL(pool_head_spoe_appctx, "spoe_appctx", sizeof(struct spoe_appctx));
struct flt_ops spoe_ops; struct flt_ops spoe_ops;

View File

@ -33,7 +33,7 @@ uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
*/ */
for (;; __ha_cpu_relax()) { for (;; __ha_cpu_relax()) {
curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick); curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
now_ms_tmp = HA_ATOMIC_LOAD(global_now_ms); now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
if (now_ms_tmp - curr_tick < period) if (now_ms_tmp - curr_tick < period)
return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc); return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
@ -81,7 +81,7 @@ ullong _freq_ctr_total_from_values(uint period, int pend,
{ {
int remain; int remain;
remain = tick + period - HA_ATOMIC_LOAD(global_now_ms); remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
if (unlikely(remain < 0)) { if (unlikely(remain < 0)) {
/* We're past the first period, check if we can still report a /* We're past the first period, check if we can still report a
* part of last period or if we're too far away. * part of last period or if we're too far away.
@ -239,7 +239,7 @@ int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq
return 0; return 0;
} }
elapsed = HA_ATOMIC_LOAD(global_now_ms) - tick; elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
if (unlikely(elapsed < 0 || elapsed > period)) { if (unlikely(elapsed < 0 || elapsed > period)) {
/* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */ /* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */
return 0; return 0;

View File

@ -11,7 +11,6 @@
/* GUID global tree */ /* GUID global tree */
struct eb_root guid_tree = EB_ROOT_UNIQUE; struct eb_root guid_tree = EB_ROOT_UNIQUE;
__decl_thread(HA_RWLOCK_T guid_lock); __decl_thread(HA_RWLOCK_T guid_lock);
static int _guid_count = 0;
/* Initialize <guid> members. */ /* Initialize <guid> members. */
void guid_init(struct guid_node *guid) void guid_init(struct guid_node *guid)
@ -70,19 +69,15 @@ int guid_insert(enum obj_type *objt, const char *uid, char **errmsg)
memprintf(errmsg, "duplicate entry with %s", dup_name); memprintf(errmsg, "duplicate entry with %s", dup_name);
goto err; goto err;
} }
_guid_count += 1;
HA_RWLOCK_WRUNLOCK(GUID_LOCK, &guid_lock); HA_RWLOCK_WRUNLOCK(GUID_LOCK, &guid_lock);
guid->obj_type = objt; guid->obj_type = objt;
return 0; return 0;
err: err:
if (guid) if (guid)
ha_free(&guid->node.key); ha_free(&guid->node.key);
ha_free(&dup_name); ha_free(&dup_name);
if (guid)
guid->node.key = NULL; /* so that we can check that guid is not in a tree */
return 1; return 1;
} }
@ -93,8 +88,6 @@ void guid_remove(struct guid_node *guid)
{ {
HA_RWLOCK_WRLOCK(GUID_LOCK, &guid_lock); HA_RWLOCK_WRLOCK(GUID_LOCK, &guid_lock);
ebpt_delete(&guid->node); ebpt_delete(&guid->node);
if (guid->node.key)
_guid_count--;
ha_free(&guid->node.key); ha_free(&guid->node.key);
HA_RWLOCK_WRUNLOCK(GUID_LOCK, &guid_lock); HA_RWLOCK_WRUNLOCK(GUID_LOCK, &guid_lock);
} }
@ -178,14 +171,3 @@ char *guid_name(const struct guid_node *guid)
return NULL; return NULL;
} }
/* returns the number of guid inserted in guid_tree */
int guid_count(void)
{
int count;
HA_RWLOCK_WRLOCK(GUID_LOCK, &guid_lock);
count = _guid_count;
HA_RWLOCK_WRUNLOCK(GUID_LOCK, &guid_lock);
return count;
}

View File

@ -148,7 +148,7 @@ struct h3c {
struct h3_counters *prx_counters; struct h3_counters *prx_counters;
}; };
DECLARE_STATIC_TYPED_POOL(pool_head_h3c, "h3c", struct h3c); DECLARE_STATIC_POOL(pool_head_h3c, "h3c", sizeof(struct h3c));
#define H3_SF_UNI_INIT 0x00000001 /* stream type not parsed for unidirectional stream */ #define H3_SF_UNI_INIT 0x00000001 /* stream type not parsed for unidirectional stream */
#define H3_SF_UNI_NO_H3 0x00000002 /* unidirectional stream does not carry H3 frames */ #define H3_SF_UNI_NO_H3 0x00000002 /* unidirectional stream does not carry H3 frames */
@ -171,7 +171,7 @@ struct h3s {
int err; /* used for stream reset */ int err; /* used for stream reset */
}; };
DECLARE_STATIC_TYPED_POOL(pool_head_h3s, "h3s", struct h3s); DECLARE_STATIC_POOL(pool_head_h3s, "h3s", sizeof(struct h3s));
/* Initialize an uni-stream <qcs> by reading its type from <b>. /* Initialize an uni-stream <qcs> by reading its type from <b>.
* *

View File

@ -2215,6 +2215,19 @@ static void step_init_2(int argc, char** argv)
if (global.mode & MODE_DUMP_CFG) if (global.mode & MODE_DUMP_CFG)
deinit_and_exit(0); deinit_and_exit(0);
#ifdef USE_OPENSSL
/* Initialize SSL random generator. Must be called before chroot for
* access to /dev/urandom, and before ha_random_boot() which may use
* RAND_bytes().
*/
if (!ssl_initialize_random()) {
ha_alert("OpenSSL random data generator initialization failed.\n");
exit(EXIT_FAILURE);
}
#endif
ha_random_boot(argv); // the argv pointer brings some kernel-fed entropy
/* now we know the buffer size, we can initialize the channels and buffers */ /* now we know the buffer size, we can initialize the channels and buffers */
init_buffer(); init_buffer();
@ -3141,19 +3154,6 @@ int main(int argc, char **argv)
rlim_fd_cur_at_boot = limit.rlim_cur; rlim_fd_cur_at_boot = limit.rlim_cur;
rlim_fd_max_at_boot = limit.rlim_max; rlim_fd_max_at_boot = limit.rlim_max;
#ifdef USE_OPENSSL
/* Initialize SSL random generator. Must be called before chroot for
* access to /dev/urandom, and before ha_random_boot() which may use
* RAND_bytes().
*/
if (!ssl_initialize_random()) {
ha_alert("OpenSSL random data generator initialization failed.\n");
exit(EXIT_FAILURE);
}
#endif
ha_random_boot(argv); // the argv pointer brings some kernel-fed entropy
/* process all initcalls in order of potential dependency */ /* process all initcalls in order of potential dependency */
RUN_INITCALLS(STG_PREPARE); RUN_INITCALLS(STG_PREPARE);
RUN_INITCALLS(STG_LOCK); RUN_INITCALLS(STG_LOCK);

View File

@ -456,7 +456,7 @@ struct hlua_cli_ctx {
struct hlua_function *fcn; struct hlua_function *fcn;
}; };
DECLARE_STATIC_TYPED_POOL(pool_head_hlua_flt_ctx, "hlua_flt_ctx", struct hlua_flt_ctx); DECLARE_STATIC_POOL(pool_head_hlua_flt_ctx, "hlua_flt_ctx", sizeof(struct hlua_flt_ctx));
static int hlua_filter_from_payload(struct filter *filter); static int hlua_filter_from_payload(struct filter *filter);
@ -469,7 +469,7 @@ static struct list referenced_filters = LIST_HEAD_INIT(referenced_filters);
/* This is the memory pool containing struct lua for applets /* This is the memory pool containing struct lua for applets
* (including cli). * (including cli).
*/ */
DECLARE_STATIC_TYPED_POOL(pool_head_hlua, "hlua", struct hlua); DECLARE_STATIC_POOL(pool_head_hlua, "hlua", sizeof(struct hlua));
/* Used for Socket connection. */ /* Used for Socket connection. */
static struct proxy *socket_proxy; static struct proxy *socket_proxy;
@ -692,7 +692,7 @@ struct hlua_event_sub {
/* This is the memory pool containing struct hlua_event_sub /* This is the memory pool containing struct hlua_event_sub
* for event subscriptions from lua * for event subscriptions from lua
*/ */
DECLARE_STATIC_TYPED_POOL(pool_head_hlua_event_sub, "hlua_esub", struct hlua_event_sub); DECLARE_STATIC_POOL(pool_head_hlua_event_sub, "hlua_esub", sizeof(struct hlua_event_sub));
/* These functions converts types between HAProxy internal args or /* These functions converts types between HAProxy internal args or
* sample and LUA types. Another function permits to check if the * sample and LUA types. Another function permits to check if the
@ -13363,24 +13363,7 @@ static int hlua_load_per_thread(char **args, int section_type, struct proxy *cur
return -1; return -1;
} }
for (i = 1; *(args[i]) != 0; i++) { for (i = 1; *(args[i]) != 0; i++) {
/* first arg is filename */ per_thread_load[len][i - 1] = strdup(args[i]);
if (i == 1 && args[1][0] != '/') {
char *curpath;
char *fullpath = NULL;
/* filename is provided using relative path, store the absolute path
* to take current chdir into account for other threads file load
* which occur later
*/
curpath = getcwd(trash.area, trash.size);
if (!curpath) {
memprintf(err, "failed to retrieve cur path");
return -1;
}
per_thread_load[len][i - 1] = memprintf(&fullpath, "%s/%s", curpath, args[1]);
}
else
per_thread_load[len][i - 1] = strdup(args[i]);
if (per_thread_load[len][i - 1] == NULL) { if (per_thread_load[len][i - 1] == NULL) {
memprintf(err, "out of memory error"); memprintf(err, "out of memory error");
return -1; return -1;

View File

@ -517,7 +517,7 @@ struct hlua_queue_item {
/* This is the memory pool containing struct hlua_queue_item (queue items) /* This is the memory pool containing struct hlua_queue_item (queue items)
*/ */
DECLARE_STATIC_TYPED_POOL(pool_head_hlua_queue, "hlua_queue", struct hlua_queue_item); DECLARE_STATIC_POOL(pool_head_hlua_queue, "hlua_queue", sizeof(struct hlua_queue_item));
static struct hlua_queue *hlua_check_queue(lua_State *L, int ud) static struct hlua_queue *hlua_check_queue(lua_State *L, int ud)
{ {
@ -1913,21 +1913,6 @@ int hlua_listable_servers_pairs_iterator(lua_State *L)
return 2; return 2;
} }
/* ensure proper cleanup for listable_servers_pairs */
int hlua_listable_servers_pairs_gc(lua_State *L)
{
struct hlua_server_list_iterator_context *ctx;
ctx = lua_touserdata(L, 1);
/* we need to make sure that the watcher leaves in detached state even
* if the iterator was interrupted (ie: "break" from the loop), else
* the server watcher list will become corrupted
*/
watcher_detach(&ctx->srv_watch);
return 0;
}
/* init the iterator context, return iterator function /* init the iterator context, return iterator function
* with context as closure. The only argument is a * with context as closure. The only argument is a
* server list object. * server list object.
@ -1940,12 +1925,6 @@ int hlua_listable_servers_pairs(lua_State *L)
hlua_srv_list = hlua_check_server_list(L, 1); hlua_srv_list = hlua_check_server_list(L, 1);
ctx = lua_newuserdata(L, sizeof(*ctx)); ctx = lua_newuserdata(L, sizeof(*ctx));
/* add gc metamethod to the newly created userdata */
lua_newtable(L);
hlua_class_function(L, "__gc", hlua_listable_servers_pairs_gc);
lua_setmetatable(L, -2);
ctx->px = hlua_srv_list->px; ctx->px = hlua_srv_list->px;
ctx->next = NULL; ctx->next = NULL;
watcher_init(&ctx->srv_watch, &ctx->next, offsetof(struct server, watcher_list)); watcher_init(&ctx->srv_watch, &ctx->next, offsetof(struct server, watcher_list));

View File

@ -1641,7 +1641,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
conn_set_owner(srv_conn, sess, NULL); conn_set_owner(srv_conn, sess, NULL);
conn_set_private(srv_conn); conn_set_private(srv_conn);
/* If it fail now, the same will be done in mux->detach() callback */ /* If it fail now, the same will be done in mux->detach() callback */
session_add_conn(srv_conn->owner, srv_conn); session_add_conn(srv_conn->owner, srv_conn, srv_conn->target);
break; break;
} }
} }
@ -5294,7 +5294,7 @@ void http_set_term_flags(struct stream *s)
} }
DECLARE_TYPED_POOL(pool_head_http_txn, "http_txn", struct http_txn); DECLARE_POOL(pool_head_http_txn, "http_txn", sizeof(struct http_txn));
/* /*
* Local variables: * Local variables:

View File

@ -57,11 +57,6 @@ struct list per_thread_init_list = LIST_HEAD_INIT(per_thread_init_list);
*/ */
struct list post_deinit_list = LIST_HEAD_INIT(post_deinit_list); struct list post_deinit_list = LIST_HEAD_INIT(post_deinit_list);
/* These functions after everything is stopped, right before exit(), for the master
* process when haproxy was started in master-worker mode. They don't return anything.
*/
struct list post_deinit_master_list = LIST_HEAD_INIT(post_deinit_master_list);
/* These functions are called when freeing a proxy during the deinit, after /* These functions are called when freeing a proxy during the deinit, after
* everything isg stopped. They don't return anything. They should not release * everything isg stopped. They don't return anything. They should not release
* the proxy itself or any shared resources that are possibly used by other * the proxy itself or any shared resources that are possibly used by other
@ -165,22 +160,6 @@ void hap_register_post_deinit(void (*fct)())
LIST_APPEND(&post_deinit_list, &b->list); LIST_APPEND(&post_deinit_list, &b->list);
} }
/* used to register some de-initialization functions to call after everything
* has stopped, but only for the master process (when started in master-worker mode).
*/
void hap_register_post_deinit_master(void (*fct)())
{
struct post_deinit_fct *b;
b = calloc(1, sizeof(*b));
if (!b) {
fprintf(stderr, "out of memory\n");
exit(1);
}
b->fct = fct;
LIST_APPEND(&post_deinit_master_list, &b->list);
}
/* used to register some per proxy de-initialization functions to call after /* used to register some per proxy de-initialization functions to call after
* everything has stopped. * everything has stopped.
*/ */

View File

@ -25,7 +25,7 @@ struct fwlc_tree_elt {
unsigned int elements; unsigned int elements;
}; };
DECLARE_STATIC_TYPED_POOL(pool_head_fwlc_elt, "fwlc_tree_elt", struct fwlc_tree_elt); DECLARE_STATIC_POOL(pool_head_fwlc_elt, "fwlc_tree_elt", sizeof(struct fwlc_tree_elt));
#define FWLC_LBPRM_SEQ(lbprm) ((lbprm) & 0xffffffff) #define FWLC_LBPRM_SEQ(lbprm) ((lbprm) & 0xffffffff)
#define FWLC_LBPRM_SMALLEST(lbprm) ((lbprm) >> 32) #define FWLC_LBPRM_SMALLEST(lbprm) ((lbprm) >> 32)

View File

@ -285,8 +285,8 @@ static struct trace_source trace_fcgi __read_mostly = {
INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE); INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
/* FCGI connection and stream pools */ /* FCGI connection and stream pools */
DECLARE_STATIC_TYPED_POOL(pool_head_fcgi_conn, "fcgi_conn", struct fcgi_conn); DECLARE_STATIC_POOL(pool_head_fcgi_conn, "fcgi_conn", sizeof(struct fcgi_conn));
DECLARE_STATIC_TYPED_POOL(pool_head_fcgi_strm, "fcgi_strm", struct fcgi_strm); DECLARE_STATIC_POOL(pool_head_fcgi_strm, "fcgi_strm", sizeof(struct fcgi_strm));
struct task *fcgi_timeout_task(struct task *t, void *context, unsigned int state); struct task *fcgi_timeout_task(struct task *t, void *context, unsigned int state);
static int fcgi_process(struct fcgi_conn *fconn); static int fcgi_process(struct fcgi_conn *fconn);
@ -3723,25 +3723,22 @@ static void fcgi_detach(struct sedesc *sd)
(fconn->flags & FCGI_CF_KEEP_CONN)) { (fconn->flags & FCGI_CF_KEEP_CONN)) {
if (fconn->conn->flags & CO_FL_PRIVATE) { if (fconn->conn->flags & CO_FL_PRIVATE) {
/* Add the connection in the session serverlist, if not already done */ /* Add the connection in the session serverlist, if not already done */
if (!session_add_conn(sess, fconn->conn)) if (!session_add_conn(sess, fconn->conn, fconn->conn->target)) {
fconn->conn->owner = NULL; fconn->conn->owner = NULL;
if (eb_is_empty(&fconn->streams_by_id)) {
if (eb_is_empty(&fconn->streams_by_id)) { /* let's kill the connection right away */
if (!fconn->conn->owner) {
/* Session insertion above has failed and connection is idle, remove it. */
fconn->conn->mux->destroy(fconn); fconn->conn->mux->destroy(fconn);
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR); TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
return; return;
} }
}
if (eb_is_empty(&fconn->streams_by_id)) {
/* mark that the tasklet may lose its context to another thread and /* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock. * that the handler needs to check it under the idle conns lock.
*/ */
HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1); HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1);
if (session_check_idle_conn(fconn->conn->owner, fconn->conn) != 0) {
/* Ensure session can keep a new idle connection. */ /* The connection is destroyed, let's leave */
if (session_check_idle_conn(sess, fconn->conn) != 0) {
fconn->conn->mux->destroy(fconn);
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR); TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
return; return;
} }

View File

@ -333,8 +333,8 @@ INITCALL1(STG_REGISTER, stats_register_module, &h1_stats_module);
/* the h1c and h1s pools */ /* the h1c and h1s pools */
DECLARE_STATIC_TYPED_POOL(pool_head_h1c, "h1c", struct h1c); DECLARE_STATIC_POOL(pool_head_h1c, "h1c", sizeof(struct h1c));
DECLARE_STATIC_TYPED_POOL(pool_head_h1s, "h1s", struct h1s); DECLARE_STATIC_POOL(pool_head_h1s, "h1s", sizeof(struct h1s));
static int h1_recv(struct h1c *h1c); static int h1_recv(struct h1c *h1c);
static int h1_send(struct h1c *h1c); static int h1_send(struct h1c *h1c);
@ -1138,24 +1138,20 @@ static int h1s_finish_detach(struct h1s *h1s)
if (h1c->conn->flags & CO_FL_PRIVATE) { if (h1c->conn->flags & CO_FL_PRIVATE) {
/* Add the connection in the session server list, if not already done */ /* Add the connection in the session server list, if not already done */
if (!session_add_conn(sess, h1c->conn)) { if (!session_add_conn(sess, h1c->conn, h1c->conn->target)) {
/* HTTP/1.1 conn is always idle after detach, can be removed if session insert failed. */
h1c->conn->owner = NULL; h1c->conn->owner = NULL;
h1c->conn->mux->destroy(h1c); h1c->conn->mux->destroy(h1c);
goto released; goto released;
} }
/* Always idle at this step */
/* HTTP/1.1 conn is always idle after detach. */
/* mark that the tasklet may lose its context to another thread and /* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock. * that the handler needs to check it under the idle conns lock.
*/ */
HA_ATOMIC_OR(&h1c->wait_event.tasklet->state, TASK_F_USR1); HA_ATOMIC_OR(&h1c->wait_event.tasklet->state, TASK_F_USR1);
/* Ensure session can keep a new idle connection. */
if (session_check_idle_conn(sess, h1c->conn)) { if (session_check_idle_conn(sess, h1c->conn)) {
TRACE_DEVEL("outgoing connection rejected", H1_EV_STRM_END|H1_EV_H1C_END, h1c->conn); /* The connection got destroyed, let's leave */
h1c->conn->mux->destroy(h1c); TRACE_DEVEL("outgoing connection killed", H1_EV_STRM_END|H1_EV_H1C_END);
goto released; goto released;
} }
} }

View File

@ -448,10 +448,10 @@ static struct stats_module h2_stats_module = {
INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module); INITCALL1(STG_REGISTER, stats_register_module, &h2_stats_module);
/* the h2c connection pool */ /* the h2c connection pool */
DECLARE_STATIC_TYPED_POOL(pool_head_h2c, "h2c", struct h2c); DECLARE_STATIC_POOL(pool_head_h2c, "h2c", sizeof(struct h2c));
/* the h2s stream pool */ /* the h2s stream pool */
DECLARE_STATIC_TYPED_POOL(pool_head_h2s, "h2s", struct h2s); DECLARE_STATIC_POOL(pool_head_h2s, "h2s", sizeof(struct h2s));
/* the shared rx_bufs pool */ /* the shared rx_bufs pool */
struct pool_head *pool_head_h2_rx_bufs __read_mostly = NULL; struct pool_head *pool_head_h2_rx_bufs __read_mostly = NULL;
@ -5533,25 +5533,21 @@ static void h2_detach(struct sedesc *sd)
if (h2c->conn->flags & CO_FL_PRIVATE) { if (h2c->conn->flags & CO_FL_PRIVATE) {
/* Add the connection in the session server list, if not already done */ /* Add the connection in the session server list, if not already done */
if (!session_add_conn(sess, h2c->conn)) if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
h2c->conn->owner = NULL; h2c->conn->owner = NULL;
if (eb_is_empty(&h2c->streams_by_id)) {
if (eb_is_empty(&h2c->streams_by_id)) {
if (!h2c->conn->owner) {
/* Session insertion above has failed and connection is idle, remove it. */
h2c->conn->mux->destroy(h2c); h2c->conn->mux->destroy(h2c);
TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR); TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
return; return;
} }
}
if (eb_is_empty(&h2c->streams_by_id)) {
/* mark that the tasklet may lose its context to another thread and /* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock. * that the handler needs to check it under the idle conns lock.
*/ */
HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1); HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
/* Ensure session can keep a new idle connection. */ /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
if (session_check_idle_conn(sess, h2c->conn) != 0) {
h2c->conn->mux->destroy(h2c);
TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END); TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
return; return;
} }

View File

@ -27,7 +27,7 @@ struct mux_pt_ctx {
struct wait_event wait_event; struct wait_event wait_event;
}; };
DECLARE_STATIC_TYPED_POOL(pool_head_pt_ctx, "mux_pt", struct mux_pt_ctx); DECLARE_STATIC_POOL(pool_head_pt_ctx, "mux_pt", sizeof(struct mux_pt_ctx));
/* trace source and events */ /* trace source and events */
static void pt_trace(enum trace_level level, uint64_t mask, static void pt_trace(enum trace_level level, uint64_t mask,

View File

@ -33,9 +33,9 @@
#include <haproxy/trace.h> #include <haproxy/trace.h>
#include <haproxy/xref.h> #include <haproxy/xref.h>
DECLARE_TYPED_POOL(pool_head_qcc, "qcc", struct qcc); DECLARE_POOL(pool_head_qcc, "qcc", sizeof(struct qcc));
DECLARE_TYPED_POOL(pool_head_qcs, "qcs", struct qcs); DECLARE_POOL(pool_head_qcs, "qcs", sizeof(struct qcs));
DECLARE_STATIC_TYPED_POOL(pool_head_qc_stream_rxbuf, "qc_stream_rxbuf", struct qc_stream_rxbuf); DECLARE_STATIC_POOL(pool_head_qc_stream_rxbuf, "qc_stream_rxbuf", sizeof(struct qc_stream_rxbuf));
static void qmux_ctrl_send(struct qc_stream_desc *, uint64_t data, uint64_t offset); static void qmux_ctrl_send(struct qc_stream_desc *, uint64_t data, uint64_t offset);
static void qmux_ctrl_room(struct qc_stream_desc *, uint64_t room); static void qmux_ctrl_room(struct qc_stream_desc *, uint64_t room);
@ -1857,14 +1857,6 @@ int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
offset = qcs->rx.offset; offset = qcs->rx.offset;
} }
if (len && (qcc->flags & QC_CF_WAIT_HS)) {
if (!(qcc->conn->flags & CO_FL_EARLY_DATA)) {
/* Ensure 'Early-data: 1' will be set on the request. */
TRACE_PROTO("received early data", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
qcc->conn->flags |= CO_FL_EARLY_DATA;
}
}
left = len; left = len;
while (left) { while (left) {
struct qc_stream_rxbuf *buf; struct qc_stream_rxbuf *buf;
@ -3792,25 +3784,26 @@ static void qmux_strm_detach(struct sedesc *sd)
if (conn->flags & CO_FL_PRIVATE) { if (conn->flags & CO_FL_PRIVATE) {
TRACE_DEVEL("handle private connection reuse", QMUX_EV_STRM_END, conn); TRACE_DEVEL("handle private connection reuse", QMUX_EV_STRM_END, conn);
/* Ensure conn is attached into session. Most of the times /* Add connection into session. If an error occured,
* this is already done during connect so this is a no-op. * conn will be closed if idle, or insert will be
* retried on next detach.
*/ */
if (!session_add_conn(sess, conn)) { if (!session_add_conn(sess, conn, conn->target)) {
TRACE_ERROR("error during connection insert into session list", QMUX_EV_STRM_END, conn); TRACE_ERROR("error during connection insert into session list", QMUX_EV_STRM_END, conn);
conn->owner = NULL; conn->owner = NULL;
if (!qcc->nb_sc) {
qcc_shutdown(qcc);
goto end;
}
} }
if (!qcc->nb_sc) { /* If conn is idle, check if session can keep it. Conn is freed if this is not the case.
if (!conn->owner) { * TODO graceful shutdown should be preferable instead of plain mux->destroy().
/* Session insertion above has failed and connection is idle, remove it. */ */
goto release; if (!qcc->nb_sc && session_check_idle_conn(sess, conn)) {
} TRACE_DEVEL("idle conn rejected by session", QMUX_EV_STRM_END);
conn = NULL;
/* Ensure session can keep a new idle connection. */ goto end;
if (session_check_idle_conn(sess, conn)) {
TRACE_DEVEL("idle conn rejected by session", QMUX_EV_STRM_END, conn);
goto release;
}
} }
} }
else { else {
@ -3819,9 +3812,8 @@ static void qmux_strm_detach(struct sedesc *sd)
if (!srv_add_to_idle_list(objt_server(conn->target), conn, 1)) { if (!srv_add_to_idle_list(objt_server(conn->target), conn, 1)) {
/* Idle conn insert failure, gracefully close the connection. */ /* Idle conn insert failure, gracefully close the connection. */
TRACE_DEVEL("idle connection cannot be kept on the server", QMUX_EV_STRM_END, conn); TRACE_DEVEL("idle connection cannot be kept on the server", QMUX_EV_STRM_END, conn);
goto release; qcc_shutdown(qcc);
} }
goto end; goto end;
} }
else if (!conn->hash_node->node.node.leaf_p && else if (!conn->hash_node->node.node.leaf_p &&

View File

@ -210,8 +210,8 @@ static struct trace_source trace_spop __read_mostly = {
INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE); INITCALL1(STG_REGISTER, trace_register_source, TRACE_SOURCE);
/* SPOP connection and stream pools */ /* SPOP connection and stream pools */
DECLARE_STATIC_TYPED_POOL(pool_head_spop_conn, "spop_conn", struct spop_conn); DECLARE_STATIC_POOL(pool_head_spop_conn, "spop_conn", sizeof(struct spop_conn));
DECLARE_STATIC_TYPED_POOL(pool_head_spop_strm, "spop_strm", struct spop_strm); DECLARE_STATIC_POOL(pool_head_spop_strm, "spop_strm", sizeof(struct spop_strm));
const struct ist spop_err_reasons[SPOP_ERR_ENTRIES] = { const struct ist spop_err_reasons[SPOP_ERR_ENTRIES] = {
@ -2977,25 +2977,21 @@ static void spop_detach(struct sedesc *sd)
if (!(spop_conn->flags & (SPOP_CF_RCVD_SHUT|SPOP_CF_ERR_PENDING|SPOP_CF_ERROR))) { if (!(spop_conn->flags & (SPOP_CF_RCVD_SHUT|SPOP_CF_ERR_PENDING|SPOP_CF_ERROR))) {
if (spop_conn->conn->flags & CO_FL_PRIVATE) { if (spop_conn->conn->flags & CO_FL_PRIVATE) {
/* Add the connection in the session server list, if not already done */ /* Add the connection in the session server list, if not already done */
if (!session_add_conn(sess, spop_conn->conn)) if (!session_add_conn(sess, spop_conn->conn, spop_conn->conn->target)) {
spop_conn->conn->owner = NULL; spop_conn->conn->owner = NULL;
if (eb_is_empty(&spop_conn->streams_by_id)) {
if (eb_is_empty(&spop_conn->streams_by_id)) {
if (!spop_conn->conn->owner) {
/* Session insertion above has failed and connection is idle, remove it. */
spop_conn->conn->mux->destroy(spop_conn); spop_conn->conn->mux->destroy(spop_conn);
TRACE_DEVEL("leaving on error after killing outgoing connection", SPOP_EV_STRM_END|SPOP_EV_SPOP_CONN_ERR); TRACE_DEVEL("leaving on error after killing outgoing connection", SPOP_EV_STRM_END|SPOP_EV_SPOP_CONN_ERR);
return; return;
} }
}
if (eb_is_empty(&spop_conn->streams_by_id)) {
/* mark that the tasklet may lose its context to another thread and /* mark that the tasklet may lose its context to another thread and
* that the handler needs to check it under the idle conns lock. * that the handler needs to check it under the idle conns lock.
*/ */
HA_ATOMIC_OR(&spop_conn->wait_event.tasklet->state, TASK_F_USR1); HA_ATOMIC_OR(&spop_conn->wait_event.tasklet->state, TASK_F_USR1);
if (session_check_idle_conn(spop_conn->conn->owner, spop_conn->conn) != 0) {
/* Ensure session can keep a new idle connection. */ /* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
if (session_check_idle_conn(sess, spop_conn->conn) != 0) {
spop_conn->conn->mux->destroy(spop_conn);
TRACE_DEVEL("leaving without reusable idle connection", SPOP_EV_STRM_END); TRACE_DEVEL("leaving without reusable idle connection", SPOP_EV_STRM_END);
return; return;
} }

View File

@ -29,7 +29,6 @@
#include <haproxy/list.h> #include <haproxy/list.h>
#include <haproxy/log.h> #include <haproxy/log.h>
#include <haproxy/listener.h> #include <haproxy/listener.h>
#include <haproxy/list.h>
#include <haproxy/mworker.h> #include <haproxy/mworker.h>
#include <haproxy/peers.h> #include <haproxy/peers.h>
#include <haproxy/proto_sockpair.h> #include <haproxy/proto_sockpair.h>
@ -626,13 +625,7 @@ void mworker_catch_sigchld(struct sig_handler *sh)
} }
/* Better rely on the system than on a list of process to check if it was the last one */ /* Better rely on the system than on a list of process to check if it was the last one */
else if (exitpid == -1 && errno == ECHILD) { else if (exitpid == -1 && errno == ECHILD) {
struct post_deinit_fct *pdff;
ha_warning("All workers exited. Exiting... (%d)\n", (exitcode > 0) ? exitcode : EXIT_SUCCESS); ha_warning("All workers exited. Exiting... (%d)\n", (exitcode > 0) ? exitcode : EXIT_SUCCESS);
list_for_each_entry(pdff, &post_deinit_master_list, list)
pdff->fct();
atexit_flag = 0; atexit_flag = 0;
if (exitcode > 0) if (exitcode > 0)
exit(exitcode); /* parent must leave using the status code that provoked the exit */ exit(exitcode); /* parent must leave using the status code that provoked the exit */

View File

@ -20,7 +20,7 @@
#include <haproxy/thread.h> #include <haproxy/thread.h>
DECLARE_STATIC_TYPED_POOL(pool_head_pipe, "pipe", struct pipe); DECLARE_STATIC_POOL(pool_head_pipe, "pipe", sizeof(struct pipe));
struct pipe *pipes_live = NULL; /* pipes which are still ready to use */ struct pipe *pipes_live = NULL; /* pipes which are still ready to use */

View File

@ -290,74 +290,27 @@ static int mem_should_fail(const struct pool_head *pool)
* is available for a new creation. Two flags are supported : * is available for a new creation. Two flags are supported :
* - MEM_F_SHARED to indicate that the pool may be shared with other users * - MEM_F_SHARED to indicate that the pool may be shared with other users
* - MEM_F_EXACT to indicate that the size must not be rounded up * - MEM_F_EXACT to indicate that the size must not be rounded up
* The name must be a stable pointer during all the program's life time.
* The file and line are passed to store the registration location in the
* registration struct. Use create_pool() instead which does it for free.
* The alignment will be stored as-is in the registration.
*/ */
struct pool_head *create_pool_with_loc(const char *name, unsigned int size, struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
unsigned int align, unsigned int flags,
const char *file, unsigned int line)
{
struct pool_registration *reg;
struct pool_head *pool;
reg = calloc(1, sizeof(*reg));
if (!reg)
return NULL;
reg->name = name;
reg->file = file;
reg->line = line;
reg->size = size;
reg->flags = flags;
reg->align = align;
pool = create_pool_from_reg(name, reg);
if (!pool)
free(reg);
return pool;
}
/* create a pool from a pool registration. All configuration is taken from
* there. The alignment will automatically be raised to sizeof(void*) or the
* next power of two so that it's always possible to lazily pass alignof() or
* sizeof(). Alignments are always respected when merging pools.
*/
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg)
{ {
unsigned int extra_mark, extra_caller, extra; unsigned int extra_mark, extra_caller, extra;
unsigned int flags = reg->flags; struct pool_registration *reg;
unsigned int size = reg->size; struct pool_head *pool;
unsigned int alignment = reg->align;
struct pool_head *pool = NULL;
struct pool_head *entry; struct pool_head *entry;
struct list *start; struct list *start;
unsigned int align; unsigned int align;
unsigned int best_diff; unsigned int best_diff;
int thr __maybe_unused; int thr __maybe_unused;
/* extend alignment if needed */ pool = NULL;
if (alignment < sizeof(void*)) reg = calloc(1, sizeof(*reg));
alignment = sizeof(void*); if (!reg)
else if (alignment & (alignment - 1)) { goto fail;
/* not power of two! round up to next power of two by filling
* all LSB in O(log(log(N))) then increment the result.
*/
int shift = 1;
do {
alignment |= alignment >> shift;
shift *= 2;
} while (alignment & (alignment + 1));
alignment++;
}
if (reg->type_align && alignment < reg->type_align) { strlcpy2(reg->name, name, sizeof(reg->name));
ha_alert("BUG in the code: at %s:%u, requested creation of pool '%s' aligned to %u " reg->size = size;
"while type requires alignment of %u! Please report to developers. Aborting.\n", reg->flags = flags;
reg->file, reg->line, name, alignment, reg->type_align); reg->align = 0;
return NULL;
}
extra_mark = (pool_debugging & POOL_DBG_TAG) ? POOL_EXTRA_MARK : 0; extra_mark = (pool_debugging & POOL_DBG_TAG) ? POOL_EXTRA_MARK : 0;
extra_caller = (pool_debugging & POOL_DBG_CALLER) ? POOL_EXTRA_CALLER : 0; extra_caller = (pool_debugging & POOL_DBG_CALLER) ? POOL_EXTRA_CALLER : 0;
@ -454,7 +407,6 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
strlcpy2(pool->name, name, sizeof(pool->name)); strlcpy2(pool->name, name, sizeof(pool->name));
pool->alloc_sz = size + extra; pool->alloc_sz = size + extra;
pool->size = size; pool->size = size;
pool->align = alignment;
pool->flags = flags; pool->flags = flags;
LIST_APPEND(start, &pool->list); LIST_APPEND(start, &pool->list);
LIST_INIT(&pool->regs); LIST_INIT(&pool->regs);
@ -474,8 +426,6 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
pool->size = size; pool->size = size;
pool->alloc_sz = size + extra; pool->alloc_sz = size + extra;
} }
if (alignment > pool->align)
pool->align = alignment;
DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name); DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
} }
@ -483,8 +433,10 @@ struct pool_head *create_pool_from_reg(const char *name, struct pool_registratio
pool->users++; pool->users++;
pool->sum_size += size; pool->sum_size += size;
fail:
return pool; return pool;
fail:
free(reg);
return NULL;
} }
/* Tries to allocate an object for the pool <pool> using the system's allocator /* Tries to allocate an object for the pool <pool> using the system's allocator
@ -497,9 +449,9 @@ void *pool_get_from_os_noinc(struct pool_head *pool)
void *ptr; void *ptr;
if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF)) if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF))
ptr = pool_alloc_area_uaf(pool->alloc_sz, pool->align); ptr = pool_alloc_area_uaf(pool->alloc_sz);
else else
ptr = pool_alloc_area(pool->alloc_sz, pool->align); ptr = pool_alloc_area(pool->alloc_sz);
if (ptr) if (ptr)
return ptr; return ptr;
_HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed); _HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
@ -1085,8 +1037,7 @@ void *pool_destroy(struct pool_head *pool)
list_for_each_entry_safe(reg, back, &pool->regs, list) { list_for_each_entry_safe(reg, back, &pool->regs, list) {
LIST_DELETE(&reg->list); LIST_DELETE(&reg->list);
if (!(reg->flags & MEM_F_STATREG)) free(reg);
free(reg);
} }
LIST_DELETE(&pool->list); LIST_DELETE(&pool->list);
@ -1340,10 +1291,10 @@ void dump_pools_to_trash(int how, int max, const char *pfx)
chunk_appendf(&trash, ". Use SIGQUIT to flush them.\n"); chunk_appendf(&trash, ". Use SIGQUIT to flush them.\n");
for (i = 0; i < nbpools && i < max; i++) { for (i = 0; i < nbpools && i < max; i++) {
chunk_appendf(&trash, " - Pool %s (%u bytes/%u) : %lu allocated (%lu bytes), %lu used" chunk_appendf(&trash, " - Pool %s (%lu bytes) : %lu allocated (%lu bytes), %lu used"
" (~%lu by thread caches)" " (~%lu by thread caches)"
", needed_avg %lu, %lu failures, %u users, @%p%s\n", ", needed_avg %lu, %lu failures, %u users, @%p%s\n",
pool_info[i].entry->name, pool_info[i].entry->size, pool_info[i].entry->align, pool_info[i].entry->name, (ulong)pool_info[i].entry->size,
pool_info[i].alloc_items, pool_info[i].alloc_bytes, pool_info[i].alloc_items, pool_info[i].alloc_bytes,
pool_info[i].used_items, pool_info[i].cached_items, pool_info[i].used_items, pool_info[i].cached_items,
pool_info[i].need_avg, pool_info[i].failed_items, pool_info[i].need_avg, pool_info[i].failed_items,
@ -1356,12 +1307,8 @@ void dump_pools_to_trash(int how, int max, const char *pfx)
if (detailed) { if (detailed) {
struct pool_registration *reg; struct pool_registration *reg;
list_for_each_entry(reg, &pool_info[i].entry->regs, list) { list_for_each_entry(reg, &pool_info[i].entry->regs, list)
chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u", reg->name, reg->size, reg->flags, reg->align); chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u\n", reg->name, reg->size, reg->flags, reg->align);
if (reg->file && reg->line)
chunk_appendf(&trash, " [%s:%u]", reg->file, reg->line);
chunk_appendf(&trash, "\n");
}
} }
} }
@ -1575,12 +1522,12 @@ static int cli_io_handler_dump_pools(struct appctx *appctx)
* resulting pointer into <ptr>. If the allocation fails, it quits with after * resulting pointer into <ptr>. If the allocation fails, it quits with after
* emitting an error message. * emitting an error message.
*/ */
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg) void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
{ {
*ptr = create_pool_from_reg(name, reg); *ptr = create_pool(name, size, MEM_F_SHARED);
if (!*ptr) { if (!*ptr) {
ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n", ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
name, reg->size, strerror(errno)); name, size, strerror(errno));
exit(1); exit(1);
} }
} }

View File

@ -1768,7 +1768,7 @@ static int proxy_postcheck(struct proxy *px)
* be_counters may be used even if the proxy lacks the backend * be_counters may be used even if the proxy lacks the backend
* capability * capability
*/ */
if (!counters_be_shared_prepare(&px->be_counters.shared, &px->guid)) { if (!counters_be_shared_init(&px->be_counters.shared, &px->guid)) {
ha_alert("out of memory while setting up shared counters for %s %s\n", ha_alert("out of memory while setting up shared counters for %s %s\n",
proxy_type_str(px), px->id); proxy_type_str(px), px->id);
err_code |= ERR_ALERT | ERR_FATAL; err_code |= ERR_ALERT | ERR_FATAL;
@ -2823,8 +2823,6 @@ void proxy_adjust_all_maxconn()
*/ */
static int post_section_px_cleanup() static int post_section_px_cleanup()
{ {
if (!curproxy)
return 0; // nothing to do
if ((curproxy->cap & PR_CAP_LISTEN) && !(curproxy->cap & PR_CAP_DEF)) { if ((curproxy->cap & PR_CAP_LISTEN) && !(curproxy->cap & PR_CAP_DEF)) {
/* This is a regular proxy (not defaults). It doesn't need /* This is a regular proxy (not defaults). It doesn't need
* to keep a default-server section if it still had one. We * to keep a default-server section if it still had one. We

View File

@ -92,7 +92,7 @@ s * queue's lock.
#define KEY_CLASS_OFFSET_BOUNDARY(key) (KEY_CLASS(key) | NOW_OFFSET_BOUNDARY()) #define KEY_CLASS_OFFSET_BOUNDARY(key) (KEY_CLASS(key) | NOW_OFFSET_BOUNDARY())
#define MAKE_KEY(class, offset) (((u32)(class + 0x7ff) << 20) | ((u32)(now_ms + offset) & 0xfffff)) #define MAKE_KEY(class, offset) (((u32)(class + 0x7ff) << 20) | ((u32)(now_ms + offset) & 0xfffff))
DECLARE_TYPED_POOL(pool_head_pendconn, "pendconn", struct pendconn, 0, 64); DECLARE_POOL(pool_head_pendconn, "pendconn", sizeof(struct pendconn));
/* returns the effective dynamic maxconn for a server, considering the minconn /* returns the effective dynamic maxconn for a server, considering the minconn
* and the proxy's usage relative to its dynamic connections limit. It is * and the proxy's usage relative to its dynamic connections limit. It is
@ -747,7 +747,7 @@ int pendconn_dequeue(struct stream *strm)
if (p->target) { if (p->target) {
/* a server picked this pendconn, it must skip LB */ /* a server picked this pendconn, it must skip LB */
stream_set_srv_target(strm, p->target); strm->target = &p->target->obj_type;
strm->flags |= SF_ASSIGNED; strm->flags |= SF_ASSIGNED;
} }

View File

@ -7,7 +7,7 @@
#include <haproxy/quic_trace.h> #include <haproxy/quic_trace.h>
#include <haproxy/trace.h> #include <haproxy/trace.h>
DECLARE_STATIC_TYPED_POOL(pool_head_quic_arng, "quic_arng", struct quic_arng_node); DECLARE_STATIC_POOL(pool_head_quic_arng, "quic_arng", sizeof(struct quic_arng_node));
/* Deallocate <l> list of ACK ranges. */ /* Deallocate <l> list of ACK ranges. */
void quic_free_arngs(struct quic_conn *qc, struct quic_arngs *arngs) void quic_free_arngs(struct quic_conn *qc, struct quic_arngs *arngs)

View File

@ -138,10 +138,11 @@ const struct quic_version *preferred_version;
*/ */
const struct quic_version quic_version_VN_reserved = { .num = 0, }; const struct quic_version quic_version_VN_reserved = { .num = 0, };
DECLARE_STATIC_TYPED_POOL(pool_head_quic_conn, "quic_conn", struct quic_conn); DECLARE_STATIC_POOL(pool_head_quic_conn, "quic_conn", sizeof(struct quic_conn));
DECLARE_STATIC_TYPED_POOL(pool_head_quic_conn_closed, "quic_conn_closed", struct quic_conn_closed); DECLARE_STATIC_POOL(pool_head_quic_conn_closed, "quic_conn_closed", sizeof(struct quic_conn_closed));
DECLARE_STATIC_TYPED_POOL(pool_head_quic_cids, "quic_cids", struct eb_root); DECLARE_STATIC_POOL(pool_head_quic_cids, "quic_cids", sizeof(struct eb_root));
DECLARE_TYPED_POOL(pool_head_quic_connection_id, "quic_connection_id", struct quic_connection_id); DECLARE_POOL(pool_head_quic_connection_id,
"quic_connection_id", sizeof(struct quic_connection_id));
struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state); struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state);
static int quic_conn_init_timer(struct quic_conn *qc); static int quic_conn_init_timer(struct quic_conn *qc);
@ -150,7 +151,7 @@ static int quic_conn_init_idle_timer_task(struct quic_conn *qc, struct proxy *px
/* Returns 1 if the peer has validated <qc> QUIC connection address, 0 if not. */ /* Returns 1 if the peer has validated <qc> QUIC connection address, 0 if not. */
int quic_peer_validated_addr(struct quic_conn *qc) int quic_peer_validated_addr(struct quic_conn *qc)
{ {
if (qc_is_back(qc)) if (objt_server(qc->target))
return 1; return 1;
if (qc->flags & QUIC_FL_CONN_PEER_VALIDATED_ADDR) if (qc->flags & QUIC_FL_CONN_PEER_VALIDATED_ADDR)
@ -477,7 +478,7 @@ int quic_build_post_handshake_frames(struct quic_conn *qc)
qel = qc->ael; qel = qc->ael;
/* Only servers must send a HANDSHAKE_DONE frame. */ /* Only servers must send a HANDSHAKE_DONE frame. */
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
size_t new_token_frm_len; size_t new_token_frm_len;
frm = qc_frm_alloc(QUIC_FT_HANDSHAKE_DONE); frm = qc_frm_alloc(QUIC_FT_HANDSHAKE_DONE);
@ -824,7 +825,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
st = qc->state; st = qc->state;
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
if (st >= QUIC_HS_ST_COMPLETE && !quic_tls_pktns_is_dcd(qc, qc->hpktns)) if (st >= QUIC_HS_ST_COMPLETE && !quic_tls_pktns_is_dcd(qc, qc->hpktns))
discard_hpktns = 1; discard_hpktns = 1;
} }
@ -840,13 +841,13 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
qc_set_timer(qc); qc_set_timer(qc);
qc_el_rx_pkts_del(qc->hel); qc_el_rx_pkts_del(qc->hel);
qc_release_pktns_frms(qc, qc->hel->pktns); qc_release_pktns_frms(qc, qc->hel->pktns);
if (qc_is_back(qc)) { if (objt_server(qc->target)) {
/* I/O callback switch */ /* I/O callback switch */
qc->wait_event.tasklet->process = quic_conn_app_io_cb; qc->wait_event.tasklet->process = quic_conn_app_io_cb;
} }
} }
if (!qc_is_back(qc) && st >= QUIC_HS_ST_COMPLETE) { if (objt_listener(qc->target) && st >= QUIC_HS_ST_COMPLETE) {
/* Note: if no token for address validation was received /* Note: if no token for address validation was received
* for a 0RTT connection, some 0RTT packet could still be * for a 0RTT connection, some 0RTT packet could still be
* waiting for HP removal AFTER the successful handshake completion. * waiting for HP removal AFTER the successful handshake completion.
@ -912,7 +913,7 @@ struct task *quic_conn_io_cb(struct task *t, void *context, unsigned int state)
* discard Initial keys when it first sends a Handshake packet... * discard Initial keys when it first sends a Handshake packet...
*/ */
if (qc_is_back(qc) && !quic_tls_pktns_is_dcd(qc, qc->ipktns) && if (objt_server(qc->target) && !quic_tls_pktns_is_dcd(qc, qc->ipktns) &&
qc->hpktns && qc->hpktns->tx.in_flight > 0) { qc->hpktns && qc->hpktns->tx.in_flight > 0) {
/* Discard the Initial packet number space. */ /* Discard the Initial packet number space. */
TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PRSHPKT, qc); TRACE_PROTO("discarding Initial pktns", QUIC_EV_CONN_PRSHPKT, qc);
@ -1028,7 +1029,7 @@ struct task *qc_process_timer(struct task *task, void *ctx, unsigned int state)
} }
} }
} }
else if (qc_is_back(qc) && qc->state <= QUIC_HS_ST_COMPLETE) { else if (objt_server(qc->target) && qc->state <= QUIC_HS_ST_COMPLETE) {
if (quic_tls_has_tx_sec(qc->hel)) if (quic_tls_has_tx_sec(qc->hel))
qc->hel->pktns->tx.pto_probe = 1; qc->hel->pktns->tx.pto_probe = 1;
if (quic_tls_has_tx_sec(qc->iel)) if (quic_tls_has_tx_sec(qc->iel))
@ -1177,11 +1178,6 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
cc_algo = l->bind_conf->quic_cc_algo; cc_algo = l->bind_conf->quic_cc_algo;
qc->flags = 0; qc->flags = 0;
/* Duplicate GSO status on listener to connection */
if (HA_ATOMIC_LOAD(&l->flags) & LI_F_UDP_GSO_NOTSUPP)
qc->flags |= QUIC_FL_CONN_UDP_GSO_EIO;
/* Mark this connection as having not received any token when 0-RTT is enabled. */ /* Mark this connection as having not received any token when 0-RTT is enabled. */
if (l->bind_conf->ssl_conf.early_data && !token) if (l->bind_conf->ssl_conf.early_data && !token)
qc->flags |= QUIC_FL_CONN_NO_TOKEN_RCVD; qc->flags |= QUIC_FL_CONN_NO_TOKEN_RCVD;
@ -1197,7 +1193,7 @@ struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
else { else {
struct quic_connection_id *conn_cid = NULL; struct quic_connection_id *conn_cid = NULL;
qc->flags = QUIC_FL_CONN_IS_BACK|QUIC_FL_CONN_PEER_VALIDATED_ADDR; qc->flags = QUIC_FL_CONN_PEER_VALIDATED_ADDR;
qc->state = QUIC_HS_ST_CLIENT_INITIAL; qc->state = QUIC_HS_ST_CLIENT_INITIAL;
/* This is the original connection ID from the peer server /* This is the original connection ID from the peer server
@ -1607,7 +1603,7 @@ int quic_conn_release(struct quic_conn *qc)
/* Connection released before handshake completion. */ /* Connection released before handshake completion. */
if (unlikely(qc->state < QUIC_HS_ST_COMPLETE)) { if (unlikely(qc->state < QUIC_HS_ST_COMPLETE)) {
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
BUG_ON(__objt_listener(qc->target)->rx.quic_curr_handshake == 0); BUG_ON(__objt_listener(qc->target)->rx.quic_curr_handshake == 0);
HA_ATOMIC_DEC(&__objt_listener(qc->target)->rx.quic_curr_handshake); HA_ATOMIC_DEC(&__objt_listener(qc->target)->rx.quic_curr_handshake);
} }
@ -2017,16 +2013,9 @@ void qc_bind_tid_commit(struct quic_conn *qc, struct listener *new_li)
/* At this point no connection was accounted for yet on this /* At this point no connection was accounted for yet on this
* listener so it's OK to just swap the pointer. * listener so it's OK to just swap the pointer.
*/ */
if (new_li && new_li != __objt_listener(qc->target)) { if (new_li && new_li != __objt_listener(qc->target))
qc->target = &new_li->obj_type; qc->target = &new_li->obj_type;
/* Update GSO conn support based on new listener status. */
if (HA_ATOMIC_LOAD(&new_li->flags) & LI_F_UDP_GSO_NOTSUPP)
qc->flags |= QUIC_FL_CONN_UDP_GSO_EIO;
else
qc->flags &= ~QUIC_FL_CONN_UDP_GSO_EIO;
}
/* Rebind the connection FD. */ /* Rebind the connection FD. */
if (qc_test_fd(qc)) { if (qc_test_fd(qc)) {
/* Reading is reactivated by the new thread. */ /* Reading is reactivated by the new thread. */

View File

@ -22,8 +22,8 @@
#include <haproxy/quic_tx.h> #include <haproxy/quic_tx.h>
#include <haproxy/trace.h> #include <haproxy/trace.h>
DECLARE_TYPED_POOL(pool_head_quic_frame, "quic_frame", struct quic_frame); DECLARE_POOL(pool_head_quic_frame, "quic_frame", sizeof(struct quic_frame));
DECLARE_TYPED_POOL(pool_head_qf_crypto, "qf_crypto", struct qf_crypto); DECLARE_POOL(pool_head_qf_crypto, "qf_crypto", sizeof(struct qf_crypto));
const char *quic_frame_type_string(enum quic_frame_type ft) const char *quic_frame_type_string(enum quic_frame_type ft)
{ {

View File

@ -150,22 +150,22 @@ void quic_tls_compat_keylog_callback(const SSL *ssl, const char *line)
if (sizeof(QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE) - 1 == n && if (sizeof(QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE) - 1 == n &&
!strncmp(start, QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE, n)) { !strncmp(start, QUIC_OPENSSL_COMPAT_CLIENT_HANDSHAKE, n)) {
level = ssl_encryption_handshake; level = ssl_encryption_handshake;
write = !qc_is_back(qc) ? 0 : 1; write = objt_listener(qc->target) ? 0 : 1;
} }
else if (sizeof(QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE) - 1 == n && else if (sizeof(QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE) - 1 == n &&
!strncmp(start, QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE, n)) { !strncmp(start, QUIC_OPENSSL_COMPAT_SERVER_HANDSHAKE, n)) {
level = ssl_encryption_handshake; level = ssl_encryption_handshake;
write = !qc_is_back(qc) ? 1 : 0; write = objt_listener(qc->target) ? 1 : 0;
} }
else if (sizeof(QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION) - 1 == n && else if (sizeof(QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION) - 1 == n &&
!strncmp(start, QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION, n)) { !strncmp(start, QUIC_OPENSSL_COMPAT_CLIENT_APPLICATION, n)) {
level = ssl_encryption_application; level = ssl_encryption_application;
write = !qc_is_back(qc) ? 0 : 1; write = objt_listener(qc->target) ? 0 : 1;
} }
else if (sizeof(QUIC_OPENSSL_COMPAT_SERVER_APPLICATION) - 1 == n && else if (sizeof(QUIC_OPENSSL_COMPAT_SERVER_APPLICATION) - 1 == n &&
!strncmp(start, QUIC_OPENSSL_COMPAT_SERVER_APPLICATION, n)) { !strncmp(start, QUIC_OPENSSL_COMPAT_SERVER_APPLICATION, n)) {
level = ssl_encryption_application; level = ssl_encryption_application;
write = !qc_is_back(qc) ? 1 : 0; write = objt_listener(qc->target) ? 1 : 0;
} }
else else
goto leave; goto leave;

View File

@ -166,7 +166,7 @@ void qc_prep_fast_retrans(struct quic_conn *qc,
/* When building a packet from another one, the field which may increase the /* When building a packet from another one, the field which may increase the
* packet size is the packet number. And the maximum increase is 4 bytes. * packet size is the packet number. And the maximum increase is 4 bytes.
*/ */
if (!quic_peer_validated_addr(qc) && !qc_is_back(qc) && if (!quic_peer_validated_addr(qc) && objt_listener(qc->target) &&
pkt->len + 4 > quic_may_send_bytes(qc)) { pkt->len + 4 > quic_may_send_bytes(qc)) {
qc->flags |= QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED; qc->flags |= QUIC_FL_CONN_ANTI_AMPLIFICATION_REACHED;
TRACE_PROTO("anti-amplification limit would be reached", QUIC_EV_CONN_SPPKTS, qc, pkt); TRACE_PROTO("anti-amplification limit would be reached", QUIC_EV_CONN_SPPKTS, qc, pkt);
@ -230,7 +230,7 @@ void qc_prep_hdshk_fast_retrans(struct quic_conn *qc,
/* When building a packet from another one, the field which may increase the /* When building a packet from another one, the field which may increase the
* packet size is the packet number. And the maximum increase is 4 bytes. * packet size is the packet number. And the maximum increase is 4 bytes.
*/ */
if (!quic_peer_validated_addr(qc) && !qc_is_back(qc)) { if (!quic_peer_validated_addr(qc) && objt_listener(qc->target)) {
size_t dglen = pkt->len + 4; size_t dglen = pkt->len + 4;
size_t may_send; size_t may_send;

View File

@ -36,8 +36,8 @@
#include <haproxy/trace.h> #include <haproxy/trace.h>
DECLARE_POOL(pool_head_quic_conn_rxbuf, "quic_conn_rxbuf", QUIC_CONN_RX_BUFSZ); DECLARE_POOL(pool_head_quic_conn_rxbuf, "quic_conn_rxbuf", QUIC_CONN_RX_BUFSZ);
DECLARE_TYPED_POOL(pool_head_quic_dgram, "quic_dgram", struct quic_dgram); DECLARE_POOL(pool_head_quic_dgram, "quic_dgram", sizeof(struct quic_dgram));
DECLARE_TYPED_POOL(pool_head_quic_rx_packet, "quic_rx_packet", struct quic_rx_packet); DECLARE_POOL(pool_head_quic_rx_packet, "quic_rx_packet", sizeof(struct quic_rx_packet));
/* Decode an expected packet number from <truncated_on> its truncated value, /* Decode an expected packet number from <truncated_on> its truncated value,
* depending on <largest_pn> the largest received packet number, and <pn_nbits> * depending on <largest_pn> the largest received packet number, and <pn_nbits>
@ -920,7 +920,7 @@ static int qc_parse_pkt_frms(struct quic_conn *qc, struct quic_rx_packet *pkt,
break; break;
case QUIC_RX_RET_FRM_DUP: case QUIC_RX_RET_FRM_DUP:
if (!qc_is_back(qc) && qel == qc->iel && if (objt_listener(qc->target) && qel == qc->iel &&
!(qc->flags & QUIC_FL_CONN_HANDSHAKE_SPEED_UP)) { !(qc->flags & QUIC_FL_CONN_HANDSHAKE_SPEED_UP)) {
fast_retrans = 1; fast_retrans = 1;
} }
@ -936,7 +936,7 @@ static int qc_parse_pkt_frms(struct quic_conn *qc, struct quic_rx_packet *pkt,
break; break;
case QUIC_FT_NEW_TOKEN: case QUIC_FT_NEW_TOKEN:
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
TRACE_ERROR("reject NEW_TOKEN frame emitted by client", TRACE_ERROR("reject NEW_TOKEN frame emitted by client",
QUIC_EV_CONN_PRSHPKT, qc); QUIC_EV_CONN_PRSHPKT, qc);
@ -1096,7 +1096,7 @@ static int qc_parse_pkt_frms(struct quic_conn *qc, struct quic_rx_packet *pkt,
} }
break; break;
case QUIC_FT_HANDSHAKE_DONE: case QUIC_FT_HANDSHAKE_DONE:
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
TRACE_ERROR("non accepted QUIC_FT_HANDSHAKE_DONE frame", TRACE_ERROR("non accepted QUIC_FT_HANDSHAKE_DONE frame",
QUIC_EV_CONN_PRSHPKT, qc); QUIC_EV_CONN_PRSHPKT, qc);
@ -1186,7 +1186,7 @@ static int qc_parse_pkt_frms(struct quic_conn *qc, struct quic_rx_packet *pkt,
* has successfully parse a Handshake packet. The Initial encryption must also * has successfully parse a Handshake packet. The Initial encryption must also
* be discarded. * be discarded.
*/ */
if (pkt->type == QUIC_PACKET_TYPE_HANDSHAKE && !qc_is_back(qc)) { if (pkt->type == QUIC_PACKET_TYPE_HANDSHAKE && objt_listener(qc->target)) {
if (qc->state >= QUIC_HS_ST_SERVER_INITIAL) { if (qc->state >= QUIC_HS_ST_SERVER_INITIAL) {
if (qc->ipktns && !quic_tls_pktns_is_dcd(qc, qc->ipktns)) { if (qc->ipktns && !quic_tls_pktns_is_dcd(qc, qc->ipktns)) {
/* Discard the handshake packet number space. */ /* Discard the handshake packet number space. */
@ -1225,7 +1225,7 @@ static inline void qc_handle_spin_bit(struct quic_conn *qc, struct quic_rx_packe
pkt->pn <= largest_pn) pkt->pn <= largest_pn)
return; return;
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
if (pkt->flags & QUIC_FL_RX_PACKET_SPIN_BIT) if (pkt->flags & QUIC_FL_RX_PACKET_SPIN_BIT)
qc->flags |= QUIC_FL_CONN_SPIN_BIT; qc->flags |= QUIC_FL_CONN_SPIN_BIT;
else else
@ -1248,7 +1248,7 @@ static void qc_rm_hp_pkts(struct quic_conn *qc, struct quic_enc_level *el)
TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc); TRACE_ENTER(QUIC_EV_CONN_ELRMHP, qc);
/* A server must not process incoming 1-RTT packets before the handshake is complete. */ /* A server must not process incoming 1-RTT packets before the handshake is complete. */
if (el == qc->ael && !qc_is_back(qc) && qc->state < QUIC_HS_ST_COMPLETE) { if (el == qc->ael && objt_listener(qc->target) && qc->state < QUIC_HS_ST_COMPLETE) {
TRACE_PROTO("RX hp not removed (handshake not completed)", TRACE_PROTO("RX hp not removed (handshake not completed)",
QUIC_EV_CONN_ELRMHP, qc); QUIC_EV_CONN_ELRMHP, qc);
goto out; goto out;

View File

@ -10,7 +10,7 @@
#include <haproxy/ssl_sock.h> #include <haproxy/ssl_sock.h>
#include <haproxy/trace.h> #include <haproxy/trace.h>
DECLARE_TYPED_POOL(pool_head_quic_ssl_sock_ctx, "quic_ssl_sock_ctx", struct ssl_sock_ctx); DECLARE_POOL(pool_head_quic_ssl_sock_ctx, "quic_ssl_sock_ctx", sizeof(struct ssl_sock_ctx));
const char *quic_ciphers = "TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384" const char *quic_ciphers = "TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384"
":TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256"; ":TLS_CHACHA20_POLY1305_SHA256:TLS_AES_128_CCM_SHA256";
#ifdef HAVE_OPENSSL_QUIC #ifdef HAVE_OPENSSL_QUIC
@ -232,7 +232,7 @@ static int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t
* listener and if a token was received. Note that a listener derives only RX * listener and if a token was received. Note that a listener derives only RX
* secrets for this level. * secrets for this level.
*/ */
if (!qc_is_back(qc) && level == ssl_encryption_early_data) { if (objt_listener(qc->target) && level == ssl_encryption_early_data) {
if (qc->flags & QUIC_FL_CONN_NO_TOKEN_RCVD) { if (qc->flags & QUIC_FL_CONN_NO_TOKEN_RCVD) {
/* Leave a chance to the address validation to be completed by the /* Leave a chance to the address validation to be completed by the
* handshake without starting the mux: one does not want to process * handshake without starting the mux: one does not want to process
@ -281,7 +281,7 @@ static int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t
} }
/* Set the transport parameters in the TLS stack. */ /* Set the transport parameters in the TLS stack. */
if (level == ssl_encryption_handshake && !qc_is_back(qc) && if (level == ssl_encryption_handshake && objt_listener(qc->target) &&
!qc_ssl_set_quic_transport_params(qc->xprt_ctx->ssl, qc, ver, 1)) !qc_ssl_set_quic_transport_params(qc->xprt_ctx->ssl, qc, ver, 1))
goto leave; goto leave;
@ -292,7 +292,7 @@ static int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t
struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx; struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
#if !defined(USE_QUIC_OPENSSL_COMPAT) && !defined(HAVE_OPENSSL_QUIC) #if !defined(USE_QUIC_OPENSSL_COMPAT) && !defined(HAVE_OPENSSL_QUIC)
if (qc_is_back(qc)) { if (objt_server(qc->target)) {
const unsigned char *tp; const unsigned char *tp;
size_t tplen; size_t tplen;
@ -580,6 +580,7 @@ static int ha_quic_ossl_got_transport_params(SSL *ssl, const unsigned char *para
{ {
int ret = 0; int ret = 0;
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index); struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
struct listener *l = objt_listener(qc->target);
TRACE_ENTER(QUIC_EV_TRANSP_PARAMS, qc); TRACE_ENTER(QUIC_EV_TRANSP_PARAMS, qc);
@ -588,7 +589,7 @@ static int ha_quic_ossl_got_transport_params(SSL *ssl, const unsigned char *para
QUIC_EV_TRANSP_PARAMS, qc); QUIC_EV_TRANSP_PARAMS, qc);
ret = 1; ret = 1;
} }
else if (!quic_transport_params_store(qc, qc_is_back(qc), params, params + params_len)) { else if (!quic_transport_params_store(qc, !l, params, params + params_len)) {
goto err; goto err;
} }
@ -955,7 +956,7 @@ static int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
* provided by the stack. This happens after having received the peer * provided by the stack. This happens after having received the peer
* handshake level CRYPTO data which are validated by the TLS stack. * handshake level CRYPTO data which are validated by the TLS stack.
*/ */
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
if (__objt_listener(qc->target)->bind_conf->ssl_conf.early_data && if (__objt_listener(qc->target)->bind_conf->ssl_conf.early_data &&
(!qc->ael || !qc->ael->tls_ctx.rx.secret)) { (!qc->ael || !qc->ael->tls_ctx.rx.secret)) {
TRACE_PROTO("SSL handshake in progress", TRACE_PROTO("SSL handshake in progress",
@ -969,7 +970,7 @@ static int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
#endif #endif
/* Check the alpn could be negotiated */ /* Check the alpn could be negotiated */
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
if (!qc->app_ops) { if (!qc->app_ops) {
TRACE_ERROR("No negotiated ALPN", QUIC_EV_CONN_IO_CB, qc, &state); TRACE_ERROR("No negotiated ALPN", QUIC_EV_CONN_IO_CB, qc, &state);
quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL); quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL);
@ -999,7 +1000,7 @@ static int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
} }
qc->flags |= QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS; qc->flags |= QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS;
if (!qc_is_back(qc)) { if (objt_listener(ctx->qc->target)) {
struct listener *l = __objt_listener(qc->target); struct listener *l = __objt_listener(qc->target);
/* I/O callback switch */ /* I/O callback switch */
qc->wait_event.tasklet->process = quic_conn_app_io_cb; qc->wait_event.tasklet->process = quic_conn_app_io_cb;
@ -1244,7 +1245,7 @@ int qc_alloc_ssl_sock_ctx(struct quic_conn *qc, struct connection *conn)
ctx->sent_early_data = 0; ctx->sent_early_data = 0;
ctx->qc = qc; ctx->qc = qc;
if (!qc_is_back(qc)) { if (objt_listener(qc->target)) {
struct bind_conf *bc = __objt_listener(qc->target)->bind_conf; struct bind_conf *bc = __objt_listener(qc->target)->bind_conf;
if (qc_ssl_sess_init(qc, bc->initial_ctx, &ctx->ssl, NULL, 1) == -1) if (qc_ssl_sess_init(qc, bc->initial_ctx, &ctx->ssl, NULL, 1) == -1)

View File

@ -12,9 +12,12 @@
#include <haproxy/quic_utils.h> #include <haproxy/quic_utils.h>
#include <haproxy/task.h> #include <haproxy/task.h>
DECLARE_STATIC_TYPED_POOL(pool_head_quic_stream_desc, "qc_stream_desc", struct qc_stream_desc); DECLARE_STATIC_POOL(pool_head_quic_stream_desc, "qc_stream_desc",
DECLARE_STATIC_TYPED_POOL(pool_head_quic_stream_buf, "qc_stream_buf", struct qc_stream_buf); sizeof(struct qc_stream_desc));
DECLARE_STATIC_TYPED_POOL(pool_head_quic_stream_ack, "qc_stream_ack", struct qc_stream_ack); DECLARE_STATIC_POOL(pool_head_quic_stream_buf, "qc_stream_buf",
sizeof(struct qc_stream_buf));
DECLARE_STATIC_POOL(pool_head_quic_stream_ack, "qc_stream_ack",
sizeof(struct qc_stream_ack));
static struct pool_head *pool_head_sbuf; static struct pool_head *pool_head_sbuf;

View File

@ -15,15 +15,15 @@
#include <haproxy/quic_stream.h> #include <haproxy/quic_stream.h>
DECLARE_TYPED_POOL(pool_head_quic_enc_level, "quic_enc_level", struct quic_enc_level); DECLARE_POOL(pool_head_quic_enc_level, "quic_enc_level", sizeof(struct quic_enc_level));
DECLARE_TYPED_POOL(pool_head_quic_pktns, "quic_pktns", struct quic_pktns); DECLARE_POOL(pool_head_quic_pktns, "quic_pktns", sizeof(struct quic_pktns));
DECLARE_TYPED_POOL(pool_head_quic_tls_ctx, "quic_tls_ctx", struct quic_tls_ctx); DECLARE_POOL(pool_head_quic_tls_ctx, "quic_tls_ctx", sizeof(struct quic_tls_ctx));
DECLARE_POOL(pool_head_quic_tls_secret, "quic_tls_secret", QUIC_TLS_SECRET_LEN); DECLARE_POOL(pool_head_quic_tls_secret, "quic_tls_secret", QUIC_TLS_SECRET_LEN);
DECLARE_POOL(pool_head_quic_tls_iv, "quic_tls_iv", QUIC_TLS_IV_LEN); DECLARE_POOL(pool_head_quic_tls_iv, "quic_tls_iv", QUIC_TLS_IV_LEN);
DECLARE_POOL(pool_head_quic_tls_key, "quic_tls_key", QUIC_TLS_KEY_LEN); DECLARE_POOL(pool_head_quic_tls_key, "quic_tls_key", QUIC_TLS_KEY_LEN);
DECLARE_TYPED_POOL(pool_head_quic_crypto_buf, "quic_crypto_buf", struct quic_crypto_buf); DECLARE_POOL(pool_head_quic_crypto_buf, "quic_crypto_buf", sizeof(struct quic_crypto_buf));
DECLARE_STATIC_TYPED_POOL(pool_head_quic_cstream, "quic_cstream", struct quic_cstream); DECLARE_STATIC_POOL(pool_head_quic_cstream, "quic_cstream", sizeof(struct quic_cstream));
/* Initial salt depending on QUIC version to derive client/server initial secrets. /* Initial salt depending on QUIC version to derive client/server initial secrets.
* This one is for draft-29 QUIC version. * This one is for draft-29 QUIC version.

View File

@ -115,9 +115,8 @@ static void quic_trace(enum trace_level level, uint64_t mask, const struct trace
if (qc) { if (qc) {
const struct quic_tls_ctx *tls_ctx; const struct quic_tls_ctx *tls_ctx;
chunk_appendf(&trace_buf, " : qc@%p(%c) idle_timer_task@%p flags=0x%x", chunk_appendf(&trace_buf, " : qc@%p idle_timer_task@%p flags=0x%x",
qc, (qc->flags & QUIC_FL_CONN_IS_BACK) ? 'B' : 'F', qc, qc->idle_timer_task, qc->flags);
qc->idle_timer_task, qc->flags);
if (mask & QUIC_EV_CONN_NEW) { if (mask & QUIC_EV_CONN_NEW) {
const int *ssl_err = a2; const int *ssl_err = a2;

View File

@ -31,7 +31,7 @@
#include <haproxy/quic_tune.h> #include <haproxy/quic_tune.h>
#include <haproxy/ssl_sock-t.h> #include <haproxy/ssl_sock-t.h>
DECLARE_TYPED_POOL(pool_head_quic_tx_packet, "quic_tx_packet", struct quic_tx_packet); DECLARE_POOL(pool_head_quic_tx_packet, "quic_tx_packet", sizeof(struct quic_tx_packet));
DECLARE_POOL(pool_head_quic_cc_buf, "quic_cc_buf", QUIC_MAX_CC_BUFSIZE); DECLARE_POOL(pool_head_quic_cc_buf, "quic_cc_buf", QUIC_MAX_CC_BUFSIZE);
static struct quic_tx_packet *qc_build_pkt(unsigned char **pos, const unsigned char *buf_end, static struct quic_tx_packet *qc_build_pkt(unsigned char **pos, const unsigned char *buf_end,
@ -307,7 +307,11 @@ static int qc_send_ppkts(struct buffer *buf, struct ssl_sock_ctx *ctx)
/* If datagram bigger than MTU, several ones were encoded for GSO usage. */ /* If datagram bigger than MTU, several ones were encoded for GSO usage. */
if (dglen > qc->path->mtu) { if (dglen > qc->path->mtu) {
if (likely(!(qc->flags & QUIC_FL_CONN_UDP_GSO_EIO))) { /* TODO: note that at this time for connection to backends this
* part is not run because no more than an MTU has been prepared for
* such connections (dglen <= qc->path->mtu). So, here l is not NULL.
*/
if (likely(!(HA_ATOMIC_LOAD(&l->flags) & LI_F_UDP_GSO_NOTSUPP))) {
TRACE_PROTO("send multiple datagrams with GSO", QUIC_EV_CONN_SPPKTS, qc); TRACE_PROTO("send multiple datagrams with GSO", QUIC_EV_CONN_SPPKTS, qc);
gso = qc->path->mtu; gso = qc->path->mtu;
} }
@ -329,9 +333,6 @@ static int qc_send_ppkts(struct buffer *buf, struct ssl_sock_ctx *ctx)
int ret = qc_snd_buf(qc, &tmpbuf, tmpbuf.data, 0, gso); int ret = qc_snd_buf(qc, &tmpbuf, tmpbuf.data, 0, gso);
if (ret < 0) { if (ret < 0) {
if (gso && ret == -EIO) { if (gso && ret == -EIO) {
/* GSO must not be used if already disabled. */
BUG_ON(qc->flags & QUIC_FL_CONN_UDP_GSO_EIO);
/* TODO: note that at this time for connection to backends this /* TODO: note that at this time for connection to backends this
* part is not run because no more than an MTU has been * part is not run because no more than an MTU has been
* prepared for such connections (l is not NULL). * prepared for such connections (l is not NULL).
@ -341,7 +342,6 @@ static int qc_send_ppkts(struct buffer *buf, struct ssl_sock_ctx *ctx)
*/ */
TRACE_ERROR("mark listener UDP GSO as unsupported", QUIC_EV_CONN_SPPKTS, qc, first_pkt); TRACE_ERROR("mark listener UDP GSO as unsupported", QUIC_EV_CONN_SPPKTS, qc, first_pkt);
HA_ATOMIC_OR(&l->flags, LI_F_UDP_GSO_NOTSUPP); HA_ATOMIC_OR(&l->flags, LI_F_UDP_GSO_NOTSUPP);
qc->flags |= QUIC_FL_CONN_UDP_GSO_EIO;
continue; continue;
} }
@ -586,6 +586,7 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf,
int dgram_cnt = 0; int dgram_cnt = 0;
/* Restrict GSO emission to comply with sendmsg limitation. See QUIC_MAX_GSO_DGRAMS for more details. */ /* Restrict GSO emission to comply with sendmsg limitation. See QUIC_MAX_GSO_DGRAMS for more details. */
uchar gso_dgram_cnt = 0; uchar gso_dgram_cnt = 0;
struct listener *l = objt_listener(qc->target);
TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc); TRACE_ENTER(QUIC_EV_CONN_IO_CB, qc);
/* Currently qc_prep_pkts() does not handle buffer wrapping so the /* Currently qc_prep_pkts() does not handle buffer wrapping so the
@ -649,7 +650,7 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf,
* to stay under MTU limit. * to stay under MTU limit.
*/ */
if (!dglen) { if (!dglen) {
if (!quic_peer_validated_addr(qc) && !qc_is_back(qc)) if (!quic_peer_validated_addr(qc) && objt_listener(qc->target))
end = pos + QUIC_MIN(qc->path->mtu, quic_may_send_bytes(qc)); end = pos + QUIC_MIN(qc->path->mtu, quic_may_send_bytes(qc));
else else
end = pos + qc->path->mtu; end = pos + qc->path->mtu;
@ -671,7 +672,7 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf,
* datagrams carrying ack-eliciting Initial packets to at least the * datagrams carrying ack-eliciting Initial packets to at least the
* smallest allowed maximum datagram size of 1200 bytes. * smallest allowed maximum datagram size of 1200 bytes.
*/ */
if (qel == qc->iel && (qc_is_back(qc) || !LIST_ISEMPTY(frms) || probe)) { if (qel == qc->iel && (!l || !LIST_ISEMPTY(frms) || probe)) {
/* Ensure that no Initial packets are sent into too small datagrams */ /* Ensure that no Initial packets are sent into too small datagrams */
if (end - pos < QUIC_INITIAL_PACKET_MINLEN) { if (end - pos < QUIC_INITIAL_PACKET_MINLEN) {
TRACE_PROTO("No more enough room to build an Initial packet", TRACE_PROTO("No more enough room to build an Initial packet",
@ -703,8 +704,8 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf,
cur_pkt = qc_build_pkt(&pos, end, qel, tls_ctx, frms, cur_pkt = qc_build_pkt(&pos, end, qel, tls_ctx, frms,
qc, ver, dglen, pkt_type, must_ack, qc, ver, dglen, pkt_type, must_ack,
padding && padding &&
((qc_is_back(qc) && (!next_qel || LIST_ISEMPTY(next_qel->send_frms))) || ((!l && (!next_qel || LIST_ISEMPTY(next_qel->send_frms))) ||
(!qc_is_back(qc) && !next_qel && (!probe || !LIST_ISEMPTY(frms)))), (l && !next_qel && (!probe || !LIST_ISEMPTY(frms)))),
probe, cc, &err); probe, cc, &err);
if (!cur_pkt) { if (!cur_pkt) {
switch (err) { switch (err) {
@ -787,10 +788,10 @@ static int qc_prep_pkts(struct quic_conn *qc, struct buffer *buf,
prv_pkt = cur_pkt; prv_pkt = cur_pkt;
} }
else if (!(quic_tune.options & QUIC_TUNE_NO_UDP_GSO) && else if (!(quic_tune.options & QUIC_TUNE_NO_UDP_GSO) &&
!(qc->flags & QUIC_FL_CONN_UDP_GSO_EIO) &&
dglen == qc->path->mtu && dglen == qc->path->mtu &&
(char *)end < b_wrap(buf) && (char *)end < b_wrap(buf) &&
++gso_dgram_cnt < QUIC_MAX_GSO_DGRAMS) { ++gso_dgram_cnt < QUIC_MAX_GSO_DGRAMS &&
l && !(HA_ATOMIC_LOAD(&l->flags) & LI_F_UDP_GSO_NOTSUPP)) {
/* TODO: note that for backends GSO is not used. No more than /* TODO: note that for backends GSO is not used. No more than
* an MTU is prepared. * an MTU is prepared.
*/ */

View File

@ -64,9 +64,9 @@ static THREAD_LOCAL unsigned int recurse = 0; /* counter to track calls to publi
static THREAD_LOCAL uint64_t resolv_query_id_seed = 0; /* random seed */ static THREAD_LOCAL uint64_t resolv_query_id_seed = 0; /* random seed */
struct resolvers *curr_resolvers = NULL; struct resolvers *curr_resolvers = NULL;
DECLARE_STATIC_TYPED_POOL(resolv_answer_item_pool, "resolv_answer_item", struct resolv_answer_item); DECLARE_STATIC_POOL(resolv_answer_item_pool, "resolv_answer_item", sizeof(struct resolv_answer_item));
DECLARE_STATIC_TYPED_POOL(resolv_resolution_pool, "resolv_resolution", struct resolv_resolution); DECLARE_STATIC_POOL(resolv_resolution_pool, "resolv_resolution", sizeof(struct resolv_resolution));
DECLARE_TYPED_POOL(resolv_requester_pool, "resolv_requester", struct resolv_requester); DECLARE_POOL(resolv_requester_pool, "resolv_requester", sizeof(struct resolv_requester));
static unsigned int resolution_uuid = 1; static unsigned int resolution_uuid = 1;
unsigned int resolv_failed_resolutions = 0; unsigned int resolv_failed_resolutions = 0;

View File

@ -1983,7 +1983,7 @@ int sample_conv_var2smp_str(const struct arg *arg, struct sample *smp)
} }
} }
static int sample_conv_2dec_check(struct arg *args, struct sample_conv *conv, static int sample_conv_be2dec_check(struct arg *args, struct sample_conv *conv,
const char *file, int line, char **err) const char *file, int line, char **err)
{ {
if (args[1].data.sint <= 0 || args[1].data.sint > sizeof(unsigned long long)) { if (args[1].data.sint <= 0 || args[1].data.sint > sizeof(unsigned long long)) {
@ -1999,13 +1999,13 @@ static int sample_conv_2dec_check(struct arg *args, struct sample_conv *conv,
return 1; return 1;
} }
/* Converts big-endian/little-endian binary input sample to a string containing an unsigned /* Converts big-endian binary input sample to a string containing an unsigned
* integer number per <chunk_size> input bytes separated with <separator>. * integer number per <chunk_size> input bytes separated with <separator>.
* Optional <truncate> flag indicates if input is truncated at <chunk_size> * Optional <truncate> flag indicates if input is truncated at <chunk_size>
* boundaries. * boundaries.
* Arguments: separator (string), chunk_size (integer), truncate (0,1), big endian (0,1) * Arguments: separator (string), chunk_size (integer), truncate (0,1)
*/ */
static int sample_conv_2dec(const struct arg *args, struct sample *smp, void *private, int be) static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
{ {
struct buffer *trash = get_trash_chunk(); struct buffer *trash = get_trash_chunk();
const int last = args[2].data.sint ? smp->data.u.str.data - args[1].data.sint + 1 : smp->data.u.str.data; const int last = args[2].data.sint ? smp->data.u.str.data - args[1].data.sint + 1 : smp->data.u.str.data;
@ -2029,12 +2029,8 @@ static int sample_conv_2dec(const struct arg *args, struct sample *smp, void *pr
max_size -= args[0].data.str.data; max_size -= args[0].data.str.data;
/* Add integer */ /* Add integer */
for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++) { for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++)
if (be) number = (number << 8) + (unsigned char)smp->data.u.str.area[ptr++];
number = (number << 8) + (unsigned char)smp->data.u.str.area[ptr++];
else
number |= (unsigned char)smp->data.u.str.area[ptr++] << (i*8);
}
pos = ulltoa(number, trash->area + trash->data, trash->size - trash->data); pos = ulltoa(number, trash->area + trash->data, trash->size - trash->data);
if (pos) if (pos)
@ -2051,28 +2047,6 @@ static int sample_conv_2dec(const struct arg *args, struct sample *smp, void *pr
return 1; return 1;
} }
/* Converts big-endian binary input sample to a string containing an unsigned
* integer number per <chunk_size> input bytes separated with <separator>.
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
* boundaries.
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
*/
static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
{
return sample_conv_2dec(args, smp, private, 1);
}
/* Converts little-endian binary input sample to a string containing an unsigned
* integer number per <chunk_size> input bytes separated with <separator>.
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
* boundaries.
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
*/
static int sample_conv_le2dec(const struct arg *args, struct sample *smp, void *private)
{
return sample_conv_2dec(args, smp, private, 0);
}
static int sample_conv_be2hex_check(struct arg *args, struct sample_conv *conv, static int sample_conv_be2hex_check(struct arg *args, struct sample_conv *conv,
const char *file, int line, char **err) const char *file, int line, char **err)
{ {
@ -5441,8 +5415,7 @@ static struct sample_conv_kw_list sample_conv_kws = {ILH, {
{ "upper", sample_conv_str2upper, 0, NULL, SMP_T_STR, SMP_T_STR }, { "upper", sample_conv_str2upper, 0, NULL, SMP_T_STR, SMP_T_STR },
{ "lower", sample_conv_str2lower, 0, NULL, SMP_T_STR, SMP_T_STR }, { "lower", sample_conv_str2lower, 0, NULL, SMP_T_STR, SMP_T_STR },
{ "length", sample_conv_length, 0, NULL, SMP_T_STR, SMP_T_SINT }, { "length", sample_conv_length, 0, NULL, SMP_T_STR, SMP_T_SINT },
{ "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_2dec_check, SMP_T_BIN, SMP_T_STR }, { "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_be2dec_check, SMP_T_BIN, SMP_T_STR },
{ "le2dec", sample_conv_le2dec, ARG3(1,STR,SINT,SINT), sample_conv_2dec_check, SMP_T_BIN, SMP_T_STR },
{ "be2hex", sample_conv_be2hex, ARG3(1,STR,SINT,SINT), sample_conv_be2hex_check, SMP_T_BIN, SMP_T_STR }, { "be2hex", sample_conv_be2hex, ARG3(1,STR,SINT,SINT), sample_conv_be2hex_check, SMP_T_BIN, SMP_T_STR },
{ "hex", sample_conv_bin2hex, 0, NULL, SMP_T_BIN, SMP_T_STR }, { "hex", sample_conv_bin2hex, 0, NULL, SMP_T_BIN, SMP_T_STR },
{ "hex2i", sample_conv_hex2int, 0, NULL, SMP_T_STR, SMP_T_SINT }, { "hex2i", sample_conv_hex2int, 0, NULL, SMP_T_STR, SMP_T_SINT },

View File

@ -3122,9 +3122,9 @@ void srv_free_params(struct server *srv)
free(srv->hostname); free(srv->hostname);
free(srv->hostname_dn); free(srv->hostname_dn);
free((char*)srv->conf.file); free((char*)srv->conf.file);
ha_aligned_free(srv->per_thr); free(srv->per_thr);
ha_aligned_free(srv->per_tgrp); free(srv->per_tgrp);
ha_aligned_free(srv->curr_idle_thr); free(srv->curr_idle_thr);
free(srv->pool_conn_name); free(srv->pool_conn_name);
release_sample_expr(srv->pool_conn_name_expr); release_sample_expr(srv->pool_conn_name_expr);
free(srv->resolvers_id); free(srv->resolvers_id);
@ -3450,7 +3450,7 @@ int srv_init(struct server *srv)
if (err_code & ERR_CODE) if (err_code & ERR_CODE)
goto out; goto out;
if (!counters_be_shared_prepare(&srv->counters.shared, &srv->guid)) { if (!counters_be_shared_init(&srv->counters.shared, &srv->guid)) {
ha_alert("memory error while setting up shared counters for %s/%s server\n", srv->proxy->id, srv->id); ha_alert("memory error while setting up shared counters for %s/%s server\n", srv->proxy->id, srv->id);
err_code |= ERR_ALERT | ERR_FATAL; err_code |= ERR_ALERT | ERR_FATAL;
goto out; goto out;
@ -3482,7 +3482,7 @@ int srv_init(struct server *srv)
/* initialize idle conns lists */ /* initialize idle conns lists */
if (srv->max_idle_conns != 0) { if (srv->max_idle_conns != 0) {
srv->curr_idle_thr = ha_aligned_zalloc(64, global.nbthread * sizeof(*srv->curr_idle_thr)); srv->curr_idle_thr = calloc(global.nbthread, sizeof(*srv->curr_idle_thr));
if (!srv->curr_idle_thr) { if (!srv->curr_idle_thr) {
ha_alert("memory error during idle conn list init for %s/%s server\n", ha_alert("memory error during idle conn list init for %s/%s server\n",
srv->proxy->id, srv->id); srv->proxy->id, srv->id);
@ -5918,8 +5918,8 @@ static int srv_init_per_thr(struct server *srv)
{ {
int i; int i;
srv->per_thr = ha_aligned_zalloc(64, global.nbthread * sizeof(*srv->per_thr)); srv->per_thr = calloc(global.nbthread, sizeof(*srv->per_thr));
srv->per_tgrp = ha_aligned_zalloc(64, global.nbtgroups * sizeof(*srv->per_tgrp)); srv->per_tgrp = calloc(global.nbtgroups, sizeof(*srv->per_tgrp));
if (!srv->per_thr || !srv->per_tgrp) if (!srv->per_thr || !srv->per_tgrp)
return -1; return -1;

View File

@ -28,8 +28,9 @@
#include <haproxy/vars.h> #include <haproxy/vars.h>
DECLARE_TYPED_POOL(pool_head_session, "session", struct session); DECLARE_POOL(pool_head_session, "session", sizeof(struct session));
DECLARE_TYPED_POOL(pool_head_sess_priv_conns, "session priv conns list", struct sess_priv_conns); DECLARE_POOL(pool_head_sess_priv_conns, "session priv conns list",
sizeof(struct sess_priv_conns));
int conn_complete_session(struct connection *conn); int conn_complete_session(struct connection *conn);

View File

@ -30,7 +30,7 @@ struct signal_descriptor signal_state[MAX_SIGNAL];
sigset_t blocked_sig; sigset_t blocked_sig;
int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */ int signal_pending = 0; /* non-zero if t least one signal remains unprocessed */
DECLARE_STATIC_TYPED_POOL(pool_head_sig_handlers, "sig_handlers", struct sig_handler); DECLARE_STATIC_POOL(pool_head_sig_handlers, "sig_handlers", sizeof(struct sig_handler));
/* Common signal handler, used by all signals. Received signals are queued. /* Common signal handler, used by all signals. Received signals are queued.
* Signal number zero has a specific status, as it cannot be delivered by the * Signal number zero has a specific status, as it cannot be delivered by the

View File

@ -88,13 +88,9 @@ struct connection *sock_accept_conn(struct listener *l, int *status)
* the legacy accept() + fcntl(). * the legacy accept() + fcntl().
*/ */
if (unlikely(accept4_broken) || if (unlikely(accept4_broken) ||
/* Albeit it appears it does not make sense to carry on with accept
* if we encounter EPERM, some old embedded ARM Linux 2.6.x sets as
* such instead of ENOSYS.
*/
(((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, &laddr, (((cfd = accept4(l->rx.fd, (struct sockaddr*)addr, &laddr,
SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == -1) && SOCK_NONBLOCK | (master ? SOCK_CLOEXEC : 0))) == -1) &&
(errno == ENOSYS || errno == EINVAL || errno == EBADF || errno == EPERM) && (errno == ENOSYS || errno == EINVAL || errno == EBADF) &&
((accept4_broken = 1)))) ((accept4_broken = 1))))
#endif #endif
{ {

View File

@ -491,11 +491,11 @@ int is_inet6_reachable(void)
int fd; int fd;
if (tick_isset(last_check) && if (tick_isset(last_check) &&
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(global_now_ms))) !tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(&global_now_ms)))
return HA_ATOMIC_LOAD(&sock_inet6_seems_reachable); return HA_ATOMIC_LOAD(&sock_inet6_seems_reachable);
/* update the test date to ensure nobody else does it in parallel */ /* update the test date to ensure nobody else does it in parallel */
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(global_now_ms)); HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(&global_now_ms));
fd = socket(AF_INET6, SOCK_DGRAM, 0); fd = socket(AF_INET6, SOCK_DGRAM, 0);
if (fd >= 0) { if (fd >= 0) {

View File

@ -1390,6 +1390,24 @@ static int cli_parse_add_crtlist(char **args, char *payload, struct appctx *appc
goto error; goto error;
} }
if (eb_gettag(crtlist->entries.b[EB_RGHT])) {
char *slash;
slash = strrchr(cert_path, '/');
if (!slash) {
memprintf(&err, "'%s' is a directory, certificate path '%s' must contain the directory path", (char *)crtlist->node.key, cert_path);
goto error;
}
/* temporary replace / by 0 to do an strcmp */
*slash = '\0';
if (strcmp(cert_path, (char*)crtlist->node.key) != 0) {
*slash = '/';
memprintf(&err, "'%s' is a directory, certificate path '%s' must contain the directory path", (char *)crtlist->node.key, cert_path);
goto error;
}
*slash = '/';
}
if (*cert_path != '@' && *cert_path != '/' && global_ssl.crt_base) { if (*cert_path != '@' && *cert_path != '/' && global_ssl.crt_base) {
if ((strlen(global_ssl.crt_base) + 1 + strlen(cert_path)) > sizeof(path) || if ((strlen(global_ssl.crt_base) + 1 + strlen(cert_path)) > sizeof(path) ||
snprintf(path, sizeof(path), "%s/%s", global_ssl.crt_base, cert_path) > sizeof(path)) { snprintf(path, sizeof(path), "%s/%s", global_ssl.crt_base, cert_path) > sizeof(path)) {

View File

@ -155,7 +155,7 @@ struct global_ssl global_ssl = {
static BIO_METHOD *ha_meth; static BIO_METHOD *ha_meth;
DECLARE_STATIC_TYPED_POOL(ssl_sock_ctx_pool, "ssl_sock_ctx", struct ssl_sock_ctx); DECLARE_STATIC_POOL(ssl_sock_ctx_pool, "ssl_sock_ctx", sizeof(struct ssl_sock_ctx));
DECLARE_POOL(ssl_sock_client_sni_pool, "ssl_sock_client_sni", TLSEXT_MAXLEN_host_name + 1); DECLARE_POOL(ssl_sock_client_sni_pool, "ssl_sock_client_sni", TLSEXT_MAXLEN_host_name + 1);
@ -5104,7 +5104,6 @@ static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
ctx->xprt_st = 0; ctx->xprt_st = 0;
ctx->xprt_ctx = NULL; ctx->xprt_ctx = NULL;
ctx->error_code = 0; ctx->error_code = 0;
ctx->can_send_early_data = 1;
next_sslconn = increment_sslconn(); next_sslconn = increment_sslconn();
if (!next_sslconn) { if (!next_sslconn) {
@ -5459,7 +5458,6 @@ static int ssl_sock_handshake(struct connection *conn, unsigned int flag)
/* read some data: consider handshake completed */ /* read some data: consider handshake completed */
goto reneg_ok; goto reneg_ok;
} }
ctx->can_send_early_data = 0;
ret = SSL_do_handshake(ctx->ssl); ret = SSL_do_handshake(ctx->ssl);
check_error: check_error:
if (ret != 1) { if (ret != 1) {
@ -5930,12 +5928,7 @@ static size_t ssl_sock_to_buf(struct connection *conn, void *xprt_ctx, struct bu
} }
#endif #endif
/* if (conn->flags & (CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS)) {
* We have to check can_send_early_data here, as the handshake flags
* may have been removed in case we want to try to send early data.
*/
if (ctx->can_send_early_data ||
(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_SSL_WAIT_HS))) {
/* a handshake was requested */ /* a handshake was requested */
TRACE_LEAVE(SSL_EV_CONN_RECV, conn); TRACE_LEAVE(SSL_EV_CONN_RECV, conn);
return 0; return 0;
@ -6108,7 +6101,7 @@ static size_t ssl_sock_from_buf(struct connection *conn, void *xprt_ctx, const s
ctx->xprt_st &= ~SSL_SOCK_SEND_MORE; ctx->xprt_st &= ~SSL_SOCK_SEND_MORE;
#ifdef SSL_READ_EARLY_DATA_SUCCESS #ifdef SSL_READ_EARLY_DATA_SUCCESS
if (ctx->can_send_early_data && conn_is_back(conn)) { if (!SSL_is_init_finished(ctx->ssl) && conn_is_back(conn)) {
unsigned int max_early; unsigned int max_early;
if (objt_listener(conn->target)) if (objt_listener(conn->target))

View File

@ -24,8 +24,8 @@
#include <haproxy/stconn.h> #include <haproxy/stconn.h>
#include <haproxy/xref.h> #include <haproxy/xref.h>
DECLARE_TYPED_POOL(pool_head_connstream, "stconn", struct stconn); DECLARE_POOL(pool_head_connstream, "stconn", sizeof(struct stconn));
DECLARE_TYPED_POOL(pool_head_sedesc, "sedesc", struct sedesc); DECLARE_POOL(pool_head_sedesc, "sedesc", sizeof(struct sedesc));
/* functions used by default on a detached stream connector */ /* functions used by default on a detached stream connector */
static void sc_app_abort(struct stconn *sc); static void sc_app_abort(struct stconn *sc);

View File

@ -3157,9 +3157,9 @@ static enum act_parse_ret parse_add_gpc(const char **args, int *arg, struct prox
return ACT_RET_PRS_ERR; return ACT_RET_PRS_ERR;
} }
if (rule->arg.gpc.sc >= global.tune.nb_stk_ctr) { if (rule->arg.gpc.sc >= MAX_SESS_STKCTR) {
memprintf(err, "invalid stick table track ID '%s'. The max allowed ID is %d (tune.stick-counters)", memprintf(err, "invalid stick table track ID '%s' for '%s'. The max allowed ID is %d",
args[*arg-1], global.tune.nb_stk_ctr - 1); cmd_name, args[*arg-1], MAX_SESS_STKCTR-1);
return ACT_RET_PRS_ERR; return ACT_RET_PRS_ERR;
} }
} }

View File

@ -62,7 +62,7 @@
#include <haproxy/vars.h> #include <haproxy/vars.h>
DECLARE_TYPED_POOL(pool_head_stream, "stream", struct stream); DECLARE_POOL(pool_head_stream, "stream", sizeof(struct stream));
DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN); DECLARE_POOL(pool_head_uniqueid, "uniqueid", UNIQUEID_LEN);
/* incremented by each "show sess" to fix a delimiter between streams */ /* incremented by each "show sess" to fix a delimiter between streams */

View File

@ -27,13 +27,13 @@
extern struct task *process_stream(struct task *t, void *context, unsigned int state); extern struct task *process_stream(struct task *t, void *context, unsigned int state);
extern void stream_update_timings(struct task *t, uint64_t lat, uint64_t cpu); extern void stream_update_timings(struct task *t, uint64_t lat, uint64_t cpu);
DECLARE_TYPED_POOL(pool_head_task, "task", struct task, 0, 64); DECLARE_POOL(pool_head_task, "task", sizeof(struct task));
DECLARE_TYPED_POOL(pool_head_tasklet, "tasklet", struct tasklet, 0, 64); DECLARE_POOL(pool_head_tasklet, "tasklet", sizeof(struct tasklet));
/* This is the memory pool containing all the signal structs. These /* This is the memory pool containing all the signal structs. These
* struct are used to store each required signal between two tasks. * struct are used to store each required signal between two tasks.
*/ */
DECLARE_TYPED_POOL(pool_head_notification, "notification", struct notification); DECLARE_POOL(pool_head_notification, "notification", sizeof(struct notification));
/* The lock protecting all wait queues at once. For now we have no better /* The lock protecting all wait queues at once. For now we have no better
* alternative since a task may have to be removed from a queue and placed * alternative since a task may have to be removed from a queue and placed

View File

@ -72,7 +72,7 @@
struct eb_root shared_tcpchecks = EB_ROOT; struct eb_root shared_tcpchecks = EB_ROOT;
DECLARE_TYPED_POOL(pool_head_tcpcheck_rule, "tcpcheck_rule", struct tcpcheck_rule); DECLARE_POOL(pool_head_tcpcheck_rule, "tcpcheck_rule", sizeof(struct tcpcheck_rule));
/**************************************************************************/ /**************************************************************************/
/*************** Init/deinit tcp-check rules and ruleset ******************/ /*************** Init/deinit tcp-check rules and ruleset ******************/

View File

@ -24,7 +24,7 @@
/* This contains a pool of struct vars */ /* This contains a pool of struct vars */
DECLARE_STATIC_TYPED_POOL(var_pool, "vars", struct var); DECLARE_STATIC_POOL(var_pool, "vars", sizeof(struct var));
/* list of variables for the process scope. */ /* list of variables for the process scope. */
struct vars proc_vars THREAD_ALIGNED(64); struct vars proc_vars THREAD_ALIGNED(64);

View File

@ -20,7 +20,7 @@ struct xprt_handshake_ctx {
void *xprt_ctx; void *xprt_ctx;
}; };
DECLARE_STATIC_TYPED_POOL(xprt_handshake_ctx_pool, "xprt_handshake_ctx", struct xprt_handshake_ctx); DECLARE_STATIC_POOL(xprt_handshake_ctx_pool, "xprt_handshake_ctx", sizeof(struct xprt_handshake_ctx));
/* This XPRT doesn't take care of sending or receiving data, once its handshake /* This XPRT doesn't take care of sending or receiving data, once its handshake
* is done, it just removes itself * is done, it just removes itself