mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
Compare commits
40 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
d76ee72d03 | ||
|
ef915e672a | ||
|
f0d0922aa1 | ||
|
6ea0e3e2f8 | ||
|
eb075d15f6 | ||
|
ac23b873f5 | ||
|
efa856a8b0 | ||
|
ff62aacb20 | ||
|
f51d58bd2e | ||
|
ee5bc28865 | ||
|
18505f9718 | ||
|
325d1bdcca | ||
|
e921fe894f | ||
|
2ce0c63206 | ||
|
cf8871ae40 | ||
|
b1f854bb2e | ||
|
ffbb3cc306 | ||
|
aeff2a3b2a | ||
|
66f28dbd3f | ||
|
8afd3e588d | ||
|
9ee14ed2d9 | ||
|
3dde7626ba | ||
|
365a69648c | ||
|
09275fd549 | ||
|
a6e67e7b41 | ||
|
697f7d1142 | ||
|
2ecc5290f2 | ||
|
dd9645d6b9 | ||
|
57e9425dbc | ||
|
ec1ab8d171 | ||
|
668c2cfb09 | ||
|
cfe9bec1ea | ||
|
14966c856b | ||
|
4a20b3835a | ||
|
713ebd2750 | ||
|
2ffe515d97 | ||
|
83a335f925 | ||
|
cedb4f0461 | ||
|
7fa812a1ac | ||
|
021a0681be |
42
CHANGELOG
42
CHANGELOG
@ -1,6 +1,48 @@
|
||||
ChangeLog :
|
||||
===========
|
||||
|
||||
2025/08/06 : 3.3-dev6
|
||||
- MINOR: acme: implement traces
|
||||
- BUG/MINOR: hlua: take default-path into account with lua-load-per-thread
|
||||
- CLEANUP: counters: rename counters_be_shared_init to counters_be_shared_prepare
|
||||
- MINOR: clock: make global_now_ms a pointer
|
||||
- MINOR: clock: make global_now_ns a pointer as well
|
||||
- MINOR: mux-quic: release conn after shutdown on BE reuse failure
|
||||
- MINOR: session: strengthen connection attach to session
|
||||
- MINOR: session: remove redundant target argument from session_add_conn()
|
||||
- MINOR: session: strengthen idle conn limit check
|
||||
- MINOR: session: do not release conn in session_check_idle_conn()
|
||||
- MINOR: session: streamline session_check_idle_conn() usage
|
||||
- MINOR: muxes: refactor private connection detach
|
||||
- BUG/MEDIUM: mux-quic: ensure Early-data header is set
|
||||
- BUILD: acme: avoid declaring TRACE_SOURCE in acme-t.h
|
||||
- MINOR: acme: emit a log for DNS-01 challenge response
|
||||
- MINOR: acme: emit the DNS-01 challenge details on the dpapi sink
|
||||
- MEDIUM: acme: allow to wait and restart the task for DNS-01
|
||||
- MINOR: acme: update the log for DNS-01
|
||||
- BUG/MINOR: acme: possible integer underflow in acme_txt_record()
|
||||
- BUG/MEDIUM: hlua_fcn: ensure systematic watcher cleanup for server list iterator
|
||||
- MINOR: sample: Add le2dec (little endian to decimal) sample fetch
|
||||
- BUILD: fcgi: fix the struct name of fcgi_flt_ctx
|
||||
- BUILD: compat: provide relaxed versions of the MIN/MAX macros
|
||||
- BUILD: quic: use _MAX() to avoid build issues in pools declarations
|
||||
- BUILD: compat: always set _POSIX_VERSION to ease comparisons
|
||||
- MINOR: implement ha_aligned_alloc() to return aligned memory areas
|
||||
- MINOR: pools: support creating a pool from a pool registration
|
||||
- MINOR: pools: add a new flag to declare static registrations
|
||||
- MINOR: pools: force the name at creation time to be a const.
|
||||
- MEDIUM: pools: change the static pool creation to pass a registration
|
||||
- DEBUG: pools: store the pool registration file name and line number
|
||||
- DEBUG: pools: also retrieve file and line for direct callers of create_pool()
|
||||
- MEDIUM: pools: add an alignment property
|
||||
- MINOR: pools: add macros to register aligned pools
|
||||
- MINOR: pools: add macros to declare pools based on a struct type
|
||||
- MEDIUM: pools: respect pool alignment in allocations
|
||||
|
||||
2025/07/28 : 3.3-dev5
|
||||
- BUG/MEDIUM: queue/stats: also use stream_set_srv_target() for pendconns
|
||||
- DOC: list missing global QUIC settings
|
||||
|
||||
2025/07/26 : 3.3-dev4
|
||||
- CLEANUP: server: do not check for duplicates anymore in findserver()
|
||||
- REORG: server: move findserver() from proxy.c to server.c
|
||||
|
@ -3,7 +3,7 @@
|
||||
Configuration Manual
|
||||
----------------------
|
||||
version 3.3
|
||||
2025/07/26
|
||||
2025/08/06
|
||||
|
||||
|
||||
This document covers the configuration language as implemented in the version
|
||||
@ -1744,6 +1744,7 @@ The following keywords are supported in the "global" section :
|
||||
- insecure-setuid-wanted
|
||||
- issuers-chain-path
|
||||
- key-base
|
||||
- limited-quic
|
||||
- localpeer
|
||||
- log
|
||||
- log-send-hostname
|
||||
@ -1753,6 +1754,7 @@ The following keywords are supported in the "global" section :
|
||||
- lua-prepend-path
|
||||
- mworker-max-reloads
|
||||
- nbthread
|
||||
- no-quic
|
||||
- node
|
||||
- numa-cpu-mapping
|
||||
- ocsp-update.disable
|
||||
@ -1882,6 +1884,7 @@ The following keywords are supported in the "global" section :
|
||||
- tune.pool-low-fd-ratio
|
||||
- tune.pt.zero-copy-forwarding
|
||||
- tune.quic.cc-hystart
|
||||
- tune.quic.cc.cubic.min-losses
|
||||
- tune.quic.disable-tx-pacing
|
||||
- tune.quic.disable-udp-gso
|
||||
- tune.quic.frontend.glitches-threshold
|
||||
@ -19898,6 +19901,7 @@ and(value) integer integer
|
||||
b64dec string binary
|
||||
base64 binary string
|
||||
be2dec(separator,chunk_size[,truncate]) binary string
|
||||
le2dec(separator,chunk_size[,truncate]) binary string
|
||||
be2hex([separator[,chunk_size[,truncate]]]) binary string
|
||||
bool integer boolean
|
||||
bytes(offset[,length]) binary binary
|
||||
@ -20138,6 +20142,19 @@ be2dec(<separator>,<chunk_size>[,<truncate>])
|
||||
bin(01020304050607),be2dec(,2,1) # 2587721286
|
||||
bin(7f000001),be2dec(.,1) # 127.0.0.1
|
||||
|
||||
le2dec(<separator>,<chunk_size>[,<truncate>])
|
||||
Converts little-endian binary input sample to a string containing an unsigned
|
||||
integer number per <chunk_size> input bytes. <separator> is inserted every
|
||||
<chunk_size> binary input bytes if specified. The <truncate> flag indicates
|
||||
whether the binary input is truncated at <chunk_size> boundaries. The maximum
|
||||
value for <chunk_size> is limited by the size of long long int (8 bytes).
|
||||
|
||||
Example:
|
||||
bin(01020304050607),le2dec(:,2) # 513:1284:2055:7
|
||||
bin(01020304050607),le2dec(-,2,1) # 513-1284-2055
|
||||
bin(01020304050607),le2dec(,2,1) # 51312842055
|
||||
bin(7f000001),le2dec(.,1) # 127.0.0.1
|
||||
|
||||
be2hex([<separator>[,<chunk_size>[,<truncate>]]])
|
||||
Converts big-endian binary input sample to a hex string containing two hex
|
||||
digits per input byte. It is used to log or transfer hex dumps of some
|
||||
|
@ -51,9 +51,11 @@ enum http_st {
|
||||
};
|
||||
|
||||
struct acme_auth {
|
||||
struct ist dns; /* dns entry */
|
||||
struct ist auth; /* auth URI */
|
||||
struct ist chall; /* challenge URI */
|
||||
struct ist token; /* token */
|
||||
int ready; /* is the challenge ready ? */
|
||||
void *next;
|
||||
};
|
||||
|
||||
@ -79,6 +81,20 @@ struct acme_ctx {
|
||||
X509_REQ *req;
|
||||
struct ist finalize;
|
||||
struct ist certificate;
|
||||
struct task *task;
|
||||
struct mt_list el;
|
||||
};
|
||||
|
||||
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
||||
#define ACME_EV_NEW (1ULL << 1) /* new task */
|
||||
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
|
||||
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
|
||||
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
|
||||
|
||||
#define ACME_VERB_CLEAN 1
|
||||
#define ACME_VERB_MINIMAL 2
|
||||
#define ACME_VERB_SIMPLE 3
|
||||
#define ACME_VERB_ADVANCED 4
|
||||
#define ACME_VERB_COMPLETE 5
|
||||
|
||||
#endif
|
||||
|
@ -620,9 +620,92 @@ struct mem_stats {
|
||||
_HA_ATOMIC_ADD(&_.size, __y); \
|
||||
strdup(__x); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_alloc
|
||||
#define ha_aligned_alloc(a,s) ({ \
|
||||
size_t __a = (a); \
|
||||
size_t __s = (s); \
|
||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
||||
.caller = { \
|
||||
.file = __FILE__, .line = __LINE__, \
|
||||
.what = MEM_STATS_TYPE_MALLOC, \
|
||||
.func = __func__, \
|
||||
}, \
|
||||
}; \
|
||||
HA_WEAK(__start_mem_stats); \
|
||||
HA_WEAK(__stop_mem_stats); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
||||
_ha_aligned_alloc(__a, __s); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_alloc_safe
|
||||
#define ha_aligned_alloc_safe(a,s) ({ \
|
||||
size_t __a = (a); \
|
||||
size_t __s = (s); \
|
||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
||||
.caller = { \
|
||||
.file = __FILE__, .line = __LINE__, \
|
||||
.what = MEM_STATS_TYPE_MALLOC, \
|
||||
.func = __func__, \
|
||||
}, \
|
||||
}; \
|
||||
HA_WEAK(__start_mem_stats); \
|
||||
HA_WEAK(__stop_mem_stats); \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
||||
_ha_aligned_alloc_safe(__a, __s); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_free
|
||||
#define ha_aligned_free(x) ({ \
|
||||
typeof(x) __x = (x); \
|
||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
||||
.caller = { \
|
||||
.file = __FILE__, .line = __LINE__, \
|
||||
.what = MEM_STATS_TYPE_FREE, \
|
||||
.func = __func__, \
|
||||
}, \
|
||||
}; \
|
||||
HA_WEAK(__start_mem_stats); \
|
||||
HA_WEAK(__stop_mem_stats); \
|
||||
if (__builtin_constant_p((x))) { \
|
||||
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
|
||||
} \
|
||||
if (__x) \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_ha_aligned_free(__x); \
|
||||
})
|
||||
|
||||
#undef ha_aligned_free_size
|
||||
#define ha_aligned_free_size(p,s) ({ \
|
||||
void *__p = (p); size_t __s = (s); \
|
||||
static struct mem_stats _ __attribute__((used,__section__("mem_stats"),__aligned__(sizeof(void*)))) = { \
|
||||
.caller = { \
|
||||
.file = __FILE__, .line = __LINE__, \
|
||||
.what = MEM_STATS_TYPE_FREE, \
|
||||
.func = __func__, \
|
||||
}, \
|
||||
}; \
|
||||
HA_WEAK(__start_mem_stats); \
|
||||
HA_WEAK(__stop_mem_stats); \
|
||||
if (__builtin_constant_p((p))) { \
|
||||
HA_LINK_ERROR(call_to_ha_aligned_free_attempts_to_free_a_constant); \
|
||||
} \
|
||||
if (__p) { \
|
||||
_HA_ATOMIC_INC(&_.calls); \
|
||||
_HA_ATOMIC_ADD(&_.size, __s); \
|
||||
} \
|
||||
_ha_aligned_free(__p); \
|
||||
})
|
||||
|
||||
#else // DEBUG_MEM_STATS
|
||||
|
||||
#define will_free(x, y) do { } while (0)
|
||||
#define ha_aligned_alloc(a,s) _ha_aligned_alloc(a, s)
|
||||
#define ha_aligned_alloc_safe(a,s) _ha_aligned_alloc_safe(a, s)
|
||||
#define ha_aligned_free(p) _ha_aligned_free(p)
|
||||
#define ha_aligned_free_size(p,s) _ha_aligned_free(p)
|
||||
|
||||
#endif /* DEBUG_MEM_STATS*/
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
extern struct timeval start_date; /* the process's start date in wall-clock time */
|
||||
extern struct timeval ready_date; /* date when the process was considered ready */
|
||||
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
||||
extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
|
||||
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
|
||||
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
|
||||
|
@ -94,11 +94,21 @@ typedef struct { } empty_t;
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* unsafe ones for use with constant macros needed in initializers */
|
||||
#ifndef _MIN
|
||||
#define _MIN(a, b) ((a < b) ? a : b)
|
||||
#endif
|
||||
|
||||
#ifndef _MAX
|
||||
#define _MAX(a, b) ((a > b) ? a : b)
|
||||
#endif
|
||||
|
||||
/* safe versions for use anywhere except in initializers */
|
||||
#ifndef MIN
|
||||
#define MIN(a, b) ({ \
|
||||
typeof(a) _a = (a); \
|
||||
typeof(a) _b = (b); \
|
||||
((_a < _b) ? _a : _b); \
|
||||
_MIN(_a, _b); \
|
||||
})
|
||||
#endif
|
||||
|
||||
@ -106,10 +116,15 @@ typedef struct { } empty_t;
|
||||
#define MAX(a, b) ({ \
|
||||
typeof(a) _a = (a); \
|
||||
typeof(a) _b = (b); \
|
||||
((_a > _b) ? _a : _b); \
|
||||
_MAX(_a, _b); \
|
||||
})
|
||||
#endif
|
||||
|
||||
/* always set a _POSIX_VERSION if there isn't any, in order to ease compares */
|
||||
#ifndef _POSIX_VERSION
|
||||
# define _POSIX_VERSION 0
|
||||
#endif
|
||||
|
||||
/* this is for libc5 for example */
|
||||
#ifndef TCP_NODELAY
|
||||
#define TCP_NODELAY 1
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <haproxy/guid-t.h>
|
||||
|
||||
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
|
||||
int counters_be_shared_init(struct be_counters_shared *counters, const struct guid_node *guid);
|
||||
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid);
|
||||
|
||||
void counters_fe_shared_drop(struct fe_counters_shared *counters);
|
||||
void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||
|
@ -284,10 +284,11 @@ static __inline void watcher_attach(struct watcher *w, void *target)
|
||||
MT_LIST_APPEND(list, &w->el);
|
||||
}
|
||||
|
||||
/* Untracks target via <w> watcher. Invalid if <w> is not attached first. */
|
||||
/* Untracks target via <w> watcher. Does nothing if <w> is not attached */
|
||||
static __inline void watcher_detach(struct watcher *w)
|
||||
{
|
||||
BUG_ON_HOT(!MT_LIST_INLIST(&w->el));
|
||||
if (!MT_LIST_INLIST(&w->el))
|
||||
return;
|
||||
*w->pptr = NULL;
|
||||
MT_LIST_DELETE(&w->el);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <stdlib.h>
|
||||
#include <haproxy/api.h>
|
||||
#include <haproxy/tools.h>
|
||||
|
||||
|
||||
/************* normal allocator *************/
|
||||
@ -32,9 +33,9 @@
|
||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||
* to those of malloc().
|
||||
*/
|
||||
static forceinline void *pool_alloc_area(size_t size)
|
||||
static forceinline void *pool_alloc_area(size_t size, size_t align)
|
||||
{
|
||||
return malloc(size);
|
||||
return ha_aligned_alloc(align, size);
|
||||
}
|
||||
|
||||
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
|
||||
@ -43,8 +44,7 @@ static forceinline void *pool_alloc_area(size_t size)
|
||||
*/
|
||||
static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
||||
{
|
||||
will_free(area, size);
|
||||
free(area);
|
||||
ha_aligned_free_size(area, size);
|
||||
}
|
||||
|
||||
/************* use-after-free allocator *************/
|
||||
@ -52,14 +52,15 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||
* to those of malloc(). However the allocation is rounded up to 4kB so that a
|
||||
* full page is allocated. This ensures the object can be freed alone so that
|
||||
* future dereferences are easily detected. The returned object is always
|
||||
* 16-bytes aligned to avoid issues with unaligned structure objects. In case
|
||||
* some padding is added, the area's start address is copied at the end of the
|
||||
* padding to help detect underflows.
|
||||
* future dereferences are easily detected. The returned object is always at
|
||||
* least 16-bytes aligned to avoid issues with unaligned structure objects, and
|
||||
* in any case, is always at least aligned as required by the pool, though no
|
||||
* more than 4096. In case some padding is added, the area's start address is
|
||||
* copied at the end of the padding to help detect underflows.
|
||||
*/
|
||||
static inline void *pool_alloc_area_uaf(size_t size)
|
||||
static inline void *pool_alloc_area_uaf(size_t size, size_t align)
|
||||
{
|
||||
size_t pad = (4096 - size) & 0xFF0;
|
||||
size_t pad = (4096 - size) & 0xFF0 & -align;
|
||||
void *ret;
|
||||
|
||||
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
|
@ -28,6 +28,7 @@
|
||||
#define MEM_F_SHARED 0x1
|
||||
#define MEM_F_EXACT 0x2
|
||||
#define MEM_F_UAF 0x4
|
||||
#define MEM_F_STATREG 0x8 /* static registration: do not free it! */
|
||||
|
||||
/* A special pointer for the pool's free_list that indicates someone is
|
||||
* currently manipulating it. Serves as a short-lived lock.
|
||||
@ -69,7 +70,9 @@ struct pool_cache_head {
|
||||
*/
|
||||
struct pool_registration {
|
||||
struct list list; /* link element */
|
||||
char name[12]; /* name of the pool */
|
||||
const char *name; /* name of the pool */
|
||||
const char *file; /* where the pool is declared */
|
||||
unsigned int line; /* line in the file where the pool is declared, 0 if none */
|
||||
unsigned int size; /* expected object size */
|
||||
unsigned int flags; /* MEM_F_* */
|
||||
unsigned int align; /* expected alignment; 0=unspecified */
|
||||
@ -125,6 +128,7 @@ struct pool_head {
|
||||
unsigned int minavail; /* how many chunks are expected to be used */
|
||||
unsigned int size; /* chunk size */
|
||||
unsigned int flags; /* MEM_F_* */
|
||||
unsigned int align; /* alignment size */
|
||||
unsigned int users; /* number of pools sharing this zone */
|
||||
unsigned int alloc_sz; /* allocated size (includes hidden fields) */
|
||||
unsigned int sum_size; /* sum of all registered users' size */
|
||||
|
@ -30,19 +30,71 @@
|
||||
#include <haproxy/pool-t.h>
|
||||
#include <haproxy/thread.h>
|
||||
|
||||
/* This registers a call to create_pool_callback(ptr, name, size) */
|
||||
/* This creates a pool_reg registers a call to create_pool_callback(ptr) with it.
|
||||
* Do not use this one, use REGISTER_POOL() instead.
|
||||
*/
|
||||
#define __REGISTER_POOL(_line, _ptr, _name, _size, _align) \
|
||||
static struct pool_registration __pool_reg_##_line = { \
|
||||
.name = _name, \
|
||||
.file = __FILE__, \
|
||||
.line = __LINE__, \
|
||||
.size = _size, \
|
||||
.flags = MEM_F_STATREG, \
|
||||
.align = _align, \
|
||||
}; \
|
||||
INITCALL3(STG_POOL, create_pool_callback, (_ptr), (_name), &__pool_reg_##_line);
|
||||
|
||||
/* intermediary level for line number resolution, do not use this one, use
|
||||
* REGISTER_POOL() instead.
|
||||
*/
|
||||
#define _REGISTER_POOL(line, ptr, name, size, align) \
|
||||
__REGISTER_POOL(line, ptr, name, size, align)
|
||||
|
||||
/* This registers a call to create_pool_callback(ptr) with these args */
|
||||
#define REGISTER_POOL(ptr, name, size) \
|
||||
INITCALL3(STG_POOL, create_pool_callback, (ptr), (name), (size))
|
||||
_REGISTER_POOL(__LINE__, ptr, name, size, 0)
|
||||
|
||||
/* This macro declares a pool head <ptr> and registers its creation */
|
||||
#define DECLARE_POOL(ptr, name, size) \
|
||||
struct pool_head *(ptr) __read_mostly = NULL; \
|
||||
REGISTER_POOL(&ptr, name, size)
|
||||
_REGISTER_POOL(__LINE__, &ptr, name, size, 0)
|
||||
|
||||
/* This macro declares a static pool head <ptr> and registers its creation */
|
||||
#define DECLARE_STATIC_POOL(ptr, name, size) \
|
||||
static struct pool_head *(ptr) __read_mostly; \
|
||||
REGISTER_POOL(&ptr, name, size)
|
||||
_REGISTER_POOL(__LINE__, &ptr, name, size, 0)
|
||||
|
||||
/*** below are the aligned pool macros, taking one extra arg for alignment ***/
|
||||
|
||||
/* This registers a call to create_pool_callback(ptr) with these args */
|
||||
#define REGISTER_ALIGNED_POOL(ptr, name, size, align) \
|
||||
_REGISTER_POOL(__LINE__, ptr, name, size, align)
|
||||
|
||||
/* This macro declares an aligned pool head <ptr> and registers its creation */
|
||||
#define DECLARE_ALIGNED_POOL(ptr, name, size, align) \
|
||||
struct pool_head *(ptr) __read_mostly = NULL; \
|
||||
_REGISTER_POOL(__LINE__, &ptr, name, size, align)
|
||||
|
||||
/* This macro declares a static aligned pool head <ptr> and registers its creation */
|
||||
#define DECLARE_STATIC_ALIGNED_POOL(ptr, name, size, align) \
|
||||
static struct pool_head *(ptr) __read_mostly; \
|
||||
_REGISTER_POOL(__LINE__, &ptr, name, size, align)
|
||||
|
||||
/*** below are the typed pool macros, taking a type and an extra size ***/
|
||||
|
||||
/* This registers a call to create_pool_callback(ptr) with these args */
|
||||
#define REGISTER_TYPED_POOL(ptr, name, type, extra) \
|
||||
_REGISTER_POOL(__LINE__, ptr, name, sizeof(type) + extra, __alignof__(type))
|
||||
|
||||
/* This macro declares an aligned pool head <ptr> and registers its creation */
|
||||
#define DECLARE_TYPED_POOL(ptr, name, type, extra) \
|
||||
struct pool_head *(ptr) __read_mostly = NULL; \
|
||||
_REGISTER_POOL(__LINE__, &ptr, name, sizeof(type) + extra, __alignof__(type))
|
||||
|
||||
/* This macro declares a static aligned pool head <ptr> and registers its creation */
|
||||
#define DECLARE_STATIC_TYPED_POOL(ptr, name, type, extra) \
|
||||
static struct pool_head *(ptr) __read_mostly; \
|
||||
_REGISTER_POOL(__LINE__, &ptr, name, sizeof(type) + extra, __alignof__(type))
|
||||
|
||||
/* By default, free objects are linked by a pointer stored at the beginning of
|
||||
* the memory area. When DEBUG_MEMORY_POOLS is set, the allocated area is
|
||||
@ -123,14 +175,22 @@ unsigned long long pool_total_allocated(void);
|
||||
unsigned long long pool_total_used(void);
|
||||
void pool_flush(struct pool_head *pool);
|
||||
void pool_gc(struct pool_head *pool_ctx);
|
||||
struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
|
||||
void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size);
|
||||
struct pool_head *create_pool_with_loc(const char *name, unsigned int size, unsigned int align,
|
||||
unsigned int flags, const char *file, unsigned int line);
|
||||
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg);
|
||||
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg);
|
||||
void *pool_destroy(struct pool_head *pool);
|
||||
void pool_destroy_all(void);
|
||||
void *__pool_alloc(struct pool_head *pool, unsigned int flags);
|
||||
void __pool_free(struct pool_head *pool, void *ptr);
|
||||
void pool_inspect_item(const char *msg, struct pool_head *pool, const void *item, const void *caller, ssize_t ofs);
|
||||
|
||||
#define create_pool(name, size, flags) \
|
||||
create_pool_with_loc(name, size, 0, flags, __FILE__, __LINE__)
|
||||
|
||||
#define create_aligned_pool(name, size, align, flags) \
|
||||
create_pool_with_loc(name, size, align, flags, __FILE__, __LINE__)
|
||||
|
||||
|
||||
/****************** Thread-local cache management ******************/
|
||||
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
#define QUIC_MIN_CC_PKTSIZE 128
|
||||
#define QUIC_DGRAM_HEADLEN (sizeof(uint16_t) + sizeof(void *))
|
||||
#define QUIC_MAX_CC_BUFSIZE MAX(QUIC_INITIAL_IPV6_MTU, QUIC_INITIAL_IPV4_MTU)
|
||||
#define QUIC_MAX_CC_BUFSIZE _MAX(QUIC_INITIAL_IPV6_MTU, QUIC_INITIAL_IPV4_MTU)
|
||||
|
||||
/* Sendmsg input buffer cannot be bigger than 65535 bytes. This comes from UDP
|
||||
* header which uses a 2-bytes length field. QUIC datagrams are limited to 1252
|
||||
|
@ -171,25 +171,31 @@ static inline void session_unown_conn(struct session *sess, struct connection *c
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the connection <conn> to the private conns list of session <sess>. This
|
||||
* function is called only if the connection is private. Nothing is performed
|
||||
* if the connection is already in the session list or if the session does not
|
||||
* owned the connection.
|
||||
/* Add the connection <conn> to the private conns list of session <sess>. Each
|
||||
* connection is indexed by their respective target in the session. Nothing is
|
||||
* performed if the connection is already in the session list.
|
||||
*
|
||||
* Returns true if conn is inserted or already present else false if a failure
|
||||
* occurs during insertion.
|
||||
*/
|
||||
static inline int session_add_conn(struct session *sess, struct connection *conn, void *target)
|
||||
static inline int session_add_conn(struct session *sess, struct connection *conn)
|
||||
{
|
||||
struct sess_priv_conns *pconns = NULL;
|
||||
struct server *srv = objt_server(conn->target);
|
||||
int found = 0;
|
||||
|
||||
BUG_ON(objt_listener(conn->target));
|
||||
/* Connection target is used to index it in the session. Only BE conns are expected in session list. */
|
||||
BUG_ON(!conn->target || objt_listener(conn->target));
|
||||
|
||||
/* Already attach to the session or not the connection owner */
|
||||
if (!LIST_ISEMPTY(&conn->sess_el) || (conn->owner && conn->owner != sess))
|
||||
/* A connection cannot be attached already to another session. */
|
||||
BUG_ON(conn->owner && conn->owner != sess);
|
||||
|
||||
/* Already attach to the session */
|
||||
if (!LIST_ISEMPTY(&conn->sess_el))
|
||||
return 1;
|
||||
|
||||
list_for_each_entry(pconns, &sess->priv_conns, sess_el) {
|
||||
if (pconns->target == target) {
|
||||
if (pconns->target == conn->target) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -199,7 +205,7 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
|
||||
pconns = pool_alloc(pool_head_sess_priv_conns);
|
||||
if (!pconns)
|
||||
return 0;
|
||||
pconns->target = target;
|
||||
pconns->target = conn->target;
|
||||
LIST_INIT(&pconns->conn_list);
|
||||
LIST_APPEND(&sess->priv_conns, &pconns->sess_el);
|
||||
|
||||
@ -219,25 +225,34 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Returns 0 if the session can keep the idle conn, -1 if it was destroyed. The
|
||||
* connection must be private.
|
||||
/* Check that session <sess> is able to keep idle connection <conn>. This must
|
||||
* be called each time a connection stored in a session becomes idle.
|
||||
*
|
||||
* Returns 0 if the connection is kept, else non-zero if the connection was
|
||||
* explicitely removed from session.
|
||||
*/
|
||||
static inline int session_check_idle_conn(struct session *sess, struct connection *conn)
|
||||
{
|
||||
/* Another session owns this connection */
|
||||
if (conn->owner != sess)
|
||||
/* Connection must be attached to session prior to this function call. */
|
||||
BUG_ON(!conn->owner || conn->owner != sess);
|
||||
|
||||
/* Connection is not attached to a session. */
|
||||
if (!conn->owner)
|
||||
return 0;
|
||||
|
||||
/* Ensure conn is not already accounted as idle to prevent sess idle count excess increment. */
|
||||
BUG_ON(conn->flags & CO_FL_SESS_IDLE);
|
||||
|
||||
if (sess->idle_conns >= sess->fe->max_out_conns) {
|
||||
session_unown_conn(sess, conn);
|
||||
conn->owner = NULL;
|
||||
conn->flags &= ~CO_FL_SESS_IDLE;
|
||||
conn->mux->destroy(conn->ctx);
|
||||
return -1;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
conn->flags |= CO_FL_SESS_IDLE;
|
||||
sess->idle_conns++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@
|
||||
|
||||
/* currently updated and stored in time.c */
|
||||
extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
|
||||
extern volatile unsigned int global_now_ms;
|
||||
extern volatile unsigned int *global_now_ms;
|
||||
|
||||
/* return 1 if tick is set, otherwise 0 */
|
||||
static inline int tick_isset(int expire)
|
||||
|
@ -1178,6 +1178,80 @@ static inline void *my_realloc2(void *ptr, size_t size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* portable memalign(): tries to accommodate OS specificities, and may fall
|
||||
* back to plain malloc() if not supported, meaning that alignment guarantees
|
||||
* are only a performance bonus but not granted. The caller is responsible for
|
||||
* guaranteeing that the requested alignment is at least sizeof(void*) and a
|
||||
* power of two. If uncertain, use ha_aligned_alloc() instead. The pointer
|
||||
* needs to be passed to ha_aligned_free() for freeing (due to cygwin). Please
|
||||
* use ha_aligned_alloc() instead (which does perform accounting).
|
||||
*/
|
||||
static inline void *_ha_aligned_alloc(size_t alignment, size_t size)
|
||||
{
|
||||
/* let's consider that most OSes have posix_memalign() and make the
|
||||
* exception for the other ones. This way if an OS fails to build,
|
||||
* we'll know about it and handle it as a new exception instead of
|
||||
* relying on old fallbacks that may break (e.g. most BSDs have
|
||||
* dropped memalign()).
|
||||
*/
|
||||
|
||||
#if defined(_WIN32)
|
||||
/* MINGW (Cygwin) uses _aligned_malloc() */
|
||||
return _aligned_malloc(size, alignment);
|
||||
#elif _POSIX_VERSION < 200112L || defined(__sun)
|
||||
/* Old OSes or Solaris */
|
||||
return memalign(alignment, size);
|
||||
#else
|
||||
void *ret;
|
||||
|
||||
/* most BSD, Linux since glibc 2.2, Solaris 11 */
|
||||
if (posix_memalign(&ret, alignment, size) == 0)
|
||||
return ret;
|
||||
else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* portable memalign(): tries to accommodate OS specificities, and may fall
|
||||
* back to plain malloc() if not supported, meaning that alignment guarantees
|
||||
* are only a performance bonus but not granted. The size will automatically be
|
||||
* rounded up to the next power of two and set to a minimum of sizeof(void*).
|
||||
* The checks are cheap and generally optimized away by the compiler since most
|
||||
* input arguments are build time constants. The pointer needs to be passed to
|
||||
* ha_aligned_free() for freeing (due to cygwin). Please use
|
||||
* ha_aligned_alloc_safe() instead (which does perform accounting).
|
||||
*/
|
||||
static inline void *_ha_aligned_alloc_safe(size_t alignment, size_t size)
|
||||
{
|
||||
if (unlikely(alignment < sizeof(void*)))
|
||||
alignment = sizeof(void*);
|
||||
else if (unlikely(alignment & (alignment - 1))) {
|
||||
/* not power of two! round up to next power of two by filling
|
||||
* all LSB in O(log(log(N))) then increment the result.
|
||||
*/
|
||||
int shift = 1;
|
||||
do {
|
||||
alignment |= alignment >> shift;
|
||||
shift *= 2;
|
||||
} while (unlikely(alignment & (alignment + 1)));
|
||||
alignment++;
|
||||
}
|
||||
return _ha_aligned_alloc(alignment, size);
|
||||
}
|
||||
|
||||
/* To be used to free a pointer returned by _ha_aligned_alloc() or
|
||||
* _ha_aligned_alloc_safe(). Please use ha_aligned_free() instead
|
||||
* (which does perform accounting).
|
||||
*/
|
||||
static inline void _ha_aligned_free(void *ptr)
|
||||
{
|
||||
#if defined(_WIN32)
|
||||
return _aligned_free(ptr);
|
||||
#else
|
||||
free(ptr);
|
||||
#endif
|
||||
}
|
||||
|
||||
int parse_dotted_uints(const char *s, unsigned int **nums, size_t *sz);
|
||||
|
||||
/* PRNG */
|
||||
|
56
reg-tests/converter/le2dec.vtc
Normal file
56
reg-tests/converter/le2dec.vtc
Normal file
@ -0,0 +1,56 @@
|
||||
varnishtest "le2dec converter Test"
|
||||
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.0-dev0)'"
|
||||
feature ignore_unknown_macro
|
||||
|
||||
server s1 {
|
||||
rxreq
|
||||
txresp -hdr "Connection: close"
|
||||
} -repeat 3 -start
|
||||
|
||||
haproxy h1 -conf {
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${fe}"
|
||||
|
||||
#### requests
|
||||
http-request set-var(txn.input) req.hdr(input)
|
||||
|
||||
http-response set-header le2dec-1 "%[var(txn.input),le2dec(:,1)]"
|
||||
http-response set-header le2dec-2 "%[var(txn.input),le2dec(-,3)]"
|
||||
http-response set-header le2dec-3 "%[var(txn.input),le2dec(::,3,1)]"
|
||||
|
||||
default_backend be
|
||||
|
||||
backend be
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
} -start
|
||||
|
||||
client c1 -connect ${h1_fe_sock} {
|
||||
txreq -url "/" \
|
||||
-hdr "input:"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.le2dec-1 == ""
|
||||
expect resp.http.le2dec-2 == ""
|
||||
expect resp.http.le2dec-3 == ""
|
||||
txreq -url "/" \
|
||||
-hdr "input: 0123456789"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.le2dec-1 == "48:49:50:51:52:53:54:55:56:57"
|
||||
expect resp.http.le2dec-2 == "3289392-3486771-3684150-57"
|
||||
expect resp.http.le2dec-3 == "3289392::3486771::3684150"
|
||||
txreq -url "/" \
|
||||
-hdr "input: abcdefghijklmnopqrstuvwxyz"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.le2dec-1 == "97:98:99:100:101:102:103:104:105:106:107:108:109:110:111:112:113:114:115:116:117:118:119:120:121:122"
|
||||
expect resp.http.le2dec-2 == "6513249-6710628-6908007-7105386-7302765-7500144-7697523-7894902-31353"
|
||||
expect resp.http.le2dec-3 == "6513249::6710628::6908007::7105386::7302765::7500144::7697523::7894902"
|
||||
} -run
|
293
src/acme.c
293
src/acme.c
@ -34,9 +34,111 @@
|
||||
#include <haproxy/ssl_sock.h>
|
||||
#include <haproxy/ssl_utils.h>
|
||||
#include <haproxy/tools.h>
|
||||
#include <haproxy/trace.h>
|
||||
|
||||
#define TRACE_SOURCE &trace_acme
|
||||
|
||||
#if defined(HAVE_ACME)
|
||||
|
||||
static void acme_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
|
||||
const struct ist where, const struct ist func,
|
||||
const void *a1, const void *a2, const void *a3, const void *a4);
|
||||
|
||||
static const struct trace_event acme_trace_events[] = {
|
||||
{ .mask = ACME_EV_SCHED, .name = "acme_sched", .desc = "Wakeup scheduled ACME task" },
|
||||
{ .mask = ACME_EV_NEW, .name = "acme_new", .desc = "New ACME task" },
|
||||
{ .mask = ACME_EV_TASK, .name = "acme_task", .desc = "ACME task" },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
static const struct name_desc acme_trace_lockon_args[4] = {
|
||||
/* arg1 */ { .name="acme_ctx", .desc="ACME context" },
|
||||
/* arg2 */ { },
|
||||
/* arg3 */ { },
|
||||
/* arg4 */ { }
|
||||
};
|
||||
|
||||
static const struct name_desc acme_trace_decoding[] = {
|
||||
{ .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
|
||||
{ .name="minimal", .desc="report only conn, no real decoding" },
|
||||
{ .name="simple", .desc="add error messages" },
|
||||
{ .name="advanced", .desc="add handshake-related details" },
|
||||
{ .name="complete", .desc="add full data dump when available" },
|
||||
{ /* end */ }
|
||||
};
|
||||
|
||||
|
||||
struct trace_source trace_acme = {
|
||||
.name = IST("acme"),
|
||||
.desc = "ACME",
|
||||
.arg_def = TRC_ARG_PRIV,
|
||||
.default_cb = acme_trace,
|
||||
.known_events = acme_trace_events,
|
||||
.lockon_args = acme_trace_lockon_args,
|
||||
.decoding = acme_trace_decoding,
|
||||
.report_events = ~0, /* report everything by default */
|
||||
};
|
||||
|
||||
INITCALL1(STG_REGISTER, trace_register_source, &trace_acme);
|
||||
|
||||
static void acme_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
|
||||
const struct ist where, const struct ist func,
|
||||
const void *a1, const void *a2, const void *a3, const void *a4)
|
||||
{
|
||||
const struct acme_ctx *ctx = a1;
|
||||
|
||||
if (src->verbosity <= ACME_VERB_CLEAN)
|
||||
return;
|
||||
|
||||
chunk_appendf(&trace_buf, " :");
|
||||
|
||||
if (mask >= ACME_EV_NEW)
|
||||
chunk_appendf(&trace_buf, " acme_ctx=%p", ctx);
|
||||
|
||||
|
||||
if (mask == ACME_EV_NEW)
|
||||
chunk_appendf(&trace_buf, ", crt=%s", ctx->store->path);
|
||||
|
||||
if (mask >= ACME_EV_TASK) {
|
||||
|
||||
switch (ctx->http_state) {
|
||||
case ACME_HTTP_REQ:
|
||||
chunk_appendf(&trace_buf, ", http_st: ACME_HTTP_REQ");
|
||||
break;
|
||||
case ACME_HTTP_RES:
|
||||
chunk_appendf(&trace_buf, ", http_st: ACME_HTTP_RES");
|
||||
break;
|
||||
}
|
||||
chunk_appendf(&trace_buf, ", st: ");
|
||||
switch (ctx->state) {
|
||||
case ACME_RESOURCES: chunk_appendf(&trace_buf, "ACME_RESOURCES"); break;
|
||||
case ACME_NEWNONCE: chunk_appendf(&trace_buf, "ACME_NEWNONCE"); break;
|
||||
case ACME_CHKACCOUNT: chunk_appendf(&trace_buf, "ACME_CHKACCOUNT"); break;
|
||||
case ACME_NEWACCOUNT: chunk_appendf(&trace_buf, "ACME_NEWACCOUNT"); break;
|
||||
case ACME_NEWORDER: chunk_appendf(&trace_buf, "ACME_NEWORDER"); break;
|
||||
case ACME_AUTH: chunk_appendf(&trace_buf, "ACME_AUTH"); break;
|
||||
case ACME_CHALLENGE: chunk_appendf(&trace_buf, "ACME_CHALLENGE"); break;
|
||||
case ACME_CHKCHALLENGE: chunk_appendf(&trace_buf, "ACME_CHKCHALLENGE"); break;
|
||||
case ACME_FINALIZE: chunk_appendf(&trace_buf, "ACME_FINALIZE"); break;
|
||||
case ACME_CHKORDER: chunk_appendf(&trace_buf, "ACME_CHKORDER"); break;
|
||||
case ACME_CERTIFICATE: chunk_appendf(&trace_buf, "ACME_CERTIFICATE"); break;
|
||||
case ACME_END: chunk_appendf(&trace_buf, "ACME_END"); break;
|
||||
}
|
||||
}
|
||||
if (mask & (ACME_EV_REQ|ACME_EV_RES)) {
|
||||
const struct ist *url = a2;
|
||||
const struct buffer *buf = a3;
|
||||
|
||||
if (mask & ACME_EV_REQ)
|
||||
chunk_appendf(&trace_buf, " url: %.*s", (int)url->len, url->ptr);
|
||||
|
||||
if (src->verbosity >= ACME_VERB_COMPLETE && level >= TRACE_LEVEL_DATA) {
|
||||
chunk_appendf(&trace_buf, " Buffer Dump:\n");
|
||||
chunk_appendf(&trace_buf, "%.*s", (int)buf->data, buf->area);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct mt_list acme_tasks = MT_LIST_HEAD_INIT(acme_tasks);
|
||||
|
||||
@ -653,6 +755,7 @@ static void acme_ctx_destroy(struct acme_ctx *ctx)
|
||||
istfree(&auth->auth);
|
||||
istfree(&auth->chall);
|
||||
istfree(&auth->token);
|
||||
istfree(&auth->token);
|
||||
next = auth->next;
|
||||
free(auth);
|
||||
auth = next;
|
||||
@ -788,6 +891,43 @@ int acme_http_req(struct task *task, struct acme_ctx *ctx, struct ist url, enum
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* compute a TXT record for DNS-01 challenge
|
||||
* base64url(sha256(token || '.' || base64url(Thumbprint(accountKey))))
|
||||
*
|
||||
* https://datatracker.ietf.org/doc/html/rfc8555/#section-8.4
|
||||
*
|
||||
*/
|
||||
unsigned int acme_txt_record(const struct ist thumbprint, const struct ist token, struct buffer *output)
|
||||
{
|
||||
unsigned char md[EVP_MAX_MD_SIZE];
|
||||
struct buffer *tmp = NULL;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
if ((tmp = alloc_trash_chunk()) == NULL)
|
||||
goto out;
|
||||
|
||||
chunk_istcat(tmp, token);
|
||||
chunk_appendf(tmp, ".");
|
||||
chunk_istcat(tmp, thumbprint);
|
||||
|
||||
if (EVP_Digest(tmp->area, tmp->data, md, &size, EVP_sha256(), NULL) == 0)
|
||||
goto out;
|
||||
|
||||
ret = a2base64url((const char *)md, size, output->area, output->size);
|
||||
if (ret < 0)
|
||||
ret = 0;
|
||||
output->data = ret;
|
||||
|
||||
out:
|
||||
free_trash_chunk(tmp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int acme_jws_payload(struct buffer *req, struct ist nonce, struct ist url, EVP_PKEY *pkey, struct ist kid, struct buffer *output, char **errmsg)
|
||||
{
|
||||
struct buffer *b64payload = NULL;
|
||||
@ -930,6 +1070,8 @@ int acme_res_certificate(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1001,6 +1143,8 @@ int acme_res_chkorder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1130,6 +1274,8 @@ int acme_res_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1174,9 +1320,13 @@ int acme_req_challenge(struct task *task, struct acme_ctx *ctx, struct acme_auth
|
||||
|
||||
chunk_printf(req_in, "{}");
|
||||
|
||||
TRACE_DATA("REQ challenge dec", ACME_EV_REQ, ctx, &auth->chall, req_in);
|
||||
|
||||
if (acme_jws_payload(req_in, ctx->nonce, auth->chall, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error;
|
||||
|
||||
TRACE_DATA("REQ challenge enc", ACME_EV_REQ, ctx, &auth->chall, req_out);
|
||||
|
||||
if (acme_http_req(task, ctx, auth->chall, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||
goto error;
|
||||
|
||||
@ -1211,6 +1361,8 @@ enum acme_ret acme_res_challenge(struct task *task, struct acme_ctx *ctx, struct
|
||||
|
||||
hdrs = hc->res.hdrs;
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
for (hdr = hdrs; isttest(hdr->v); hdr++) {
|
||||
if (isteqi(hdr->n, ist("Replay-Nonce"))) {
|
||||
istfree(&ctx->nonce);
|
||||
@ -1284,10 +1436,14 @@ int acme_post_as_get(struct task *task, struct acme_ctx *ctx, struct ist url, ch
|
||||
if ((req_out = alloc_trash_chunk()) == NULL)
|
||||
goto error_alloc;
|
||||
|
||||
TRACE_USER("POST-as-GET ", ACME_EV_REQ, ctx, &url);
|
||||
|
||||
/* empty payload */
|
||||
if (acme_jws_payload(req_in, ctx->nonce, url, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error_jws;
|
||||
|
||||
TRACE_DATA("POST-as-GET enc", ACME_EV_REQ, ctx, &url, req_out);
|
||||
|
||||
if (acme_http_req(task, ctx, url, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||
goto error_http;
|
||||
|
||||
@ -1342,6 +1498,7 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||
}
|
||||
|
||||
}
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
/* XXX: need a generic URN error parser */
|
||||
@ -1356,6 +1513,23 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* check and save the DNS entry */
|
||||
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.identifier.type", t1->area, t1->size);
|
||||
if (ret == -1) {
|
||||
memprintf(errmsg, "couldn't get a type \"dns\" from Authorization URL \"%s\"", auth->auth.ptr);
|
||||
goto error;
|
||||
}
|
||||
t1->data = ret;
|
||||
|
||||
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.identifier.value", t2->area, t2->size);
|
||||
if (ret == -1) {
|
||||
memprintf(errmsg, "couldn't get a type \"dns\" from Authorization URL \"%s\"", auth->auth.ptr);
|
||||
goto error;
|
||||
}
|
||||
t2->data = ret;
|
||||
|
||||
auth->dns = istdup(ist2(t2->area, t2->data));
|
||||
|
||||
/* get the multiple challenges and select the one from the configuration */
|
||||
for (i = 0; ; i++) {
|
||||
int ret;
|
||||
@ -1405,6 +1579,35 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* compute a response for the TXT entry */
|
||||
if (strcasecmp(ctx->cfg->challenge, "DNS-01") == 0) {
|
||||
struct sink *dpapi;
|
||||
struct ist line[7];
|
||||
|
||||
if (acme_txt_record(ist(ctx->cfg->account.thumbprint), auth->token, &trash) == 0) {
|
||||
memprintf(errmsg, "couldn't compute the DNS-01 challenge");
|
||||
goto error;
|
||||
}
|
||||
|
||||
send_log(NULL, LOG_NOTICE,"acme: %s: DNS-01 requires to set the \"_acme-challenge.%.*s\" TXT record to \"%.*s\" and use the \"acme challenge_ready\" command over the CLI\n",
|
||||
ctx->store->path, (int)auth->dns.len, auth->dns.ptr, (int)trash.data, trash.area);
|
||||
|
||||
/* dump to the "dpapi" sink */
|
||||
|
||||
line[0] = ist("acme deploy ");
|
||||
line[1] = ist(ctx->store->path);
|
||||
line[2] = ist(" thumbprint ");
|
||||
line[3] = ist(ctx->cfg->account.thumbprint);
|
||||
line[4] = ist("\n");
|
||||
line[5] = ist2( hc->res.buf.area, hc->res.buf.data); /* dump the HTTP response */
|
||||
line[6] = ist("\n\0");
|
||||
|
||||
dpapi = sink_find("dpapi");
|
||||
if (dpapi)
|
||||
sink_write(dpapi, LOG_HEADER_NONE, 0, line, 7);
|
||||
}
|
||||
|
||||
/* only useful for HTTP-01 */
|
||||
if (acme_add_challenge_map(ctx->cfg->map, auth->token.ptr, ctx->cfg->account.thumbprint, errmsg) != 0) {
|
||||
memprintf(errmsg, "couldn't add the token to the '%s' map: %s", ctx->cfg->map, *errmsg);
|
||||
goto error;
|
||||
@ -1455,10 +1658,13 @@ int acme_req_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
|
||||
chunk_appendf(req_in, " ] }");
|
||||
|
||||
TRACE_DATA("NewOrder Decode", ACME_EV_REQ, ctx, &ctx->resources.newOrder, req_in);
|
||||
|
||||
|
||||
if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newOrder, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error;
|
||||
|
||||
TRACE_DATA("NewOrder JWS ", ACME_EV_REQ, ctx, &ctx->resources.newOrder, req_out);
|
||||
if (acme_http_req(task, ctx, ctx->resources.newOrder, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||
goto error;
|
||||
|
||||
@ -1507,6 +1713,7 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
ctx->order = istdup(hdr->v);
|
||||
}
|
||||
}
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
@ -1550,6 +1757,11 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* if the challenge is not DNS-01, consider that the challenge
|
||||
* is ready because computed by HAProxy */
|
||||
if (strcasecmp(ctx->cfg->challenge, "DNS-01") != 0)
|
||||
auth->ready = 1;
|
||||
|
||||
auth->next = ctx->auths;
|
||||
ctx->auths = auth;
|
||||
ctx->next_auth = auth;
|
||||
@ -1610,6 +1822,8 @@ int acme_req_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
|
||||
else
|
||||
chunk_printf(req_in, "%s", accountreq);
|
||||
|
||||
TRACE_DATA("newAccount Decoded", ACME_EV_REQ, ctx, &ctx->resources.newAccount, req_in);
|
||||
|
||||
if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newAccount, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error;
|
||||
|
||||
@ -1659,6 +1873,8 @@ int acme_res_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1705,6 +1921,8 @@ int acme_nonce(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
goto error;
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
hdrs = hc->res.hdrs;
|
||||
|
||||
for (hdr = hdrs; isttest(hdr->v); hdr++) {
|
||||
@ -1743,6 +1961,8 @@ int acme_directory(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
goto error;
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.newNonce", trash.area, trash.size)) <= 0) {
|
||||
memprintf(errmsg, "couldn't get newNonce URL from the directory URL");
|
||||
goto error;
|
||||
@ -1806,6 +2026,7 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
struct mt_list tmp = MT_LIST_LOCK_FULL(&ctx->el);
|
||||
|
||||
re:
|
||||
TRACE_USER("ACME Task Handle", ACME_EV_TASK, ctx, &st);
|
||||
|
||||
switch (st) {
|
||||
case ACME_RESOURCES:
|
||||
@ -1899,6 +2120,11 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
break;
|
||||
case ACME_CHALLENGE:
|
||||
if (http_st == ACME_HTTP_REQ) {
|
||||
|
||||
/* if the challenge is not ready, wait to be wakeup */
|
||||
if (!ctx->next_auth->ready)
|
||||
goto wait;
|
||||
|
||||
if (acme_req_challenge(task, ctx, ctx->next_auth, &errmsg) != 0)
|
||||
goto retry;
|
||||
}
|
||||
@ -1999,6 +2225,8 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
/* this is called when changing step in the state machine */
|
||||
http_st = ACME_HTTP_REQ;
|
||||
ctx->retries = ACME_RETRY; /* reinit the retries */
|
||||
ctx->http_state = http_st;
|
||||
ctx->state = st;
|
||||
|
||||
if (ctx->retryafter == 0)
|
||||
goto re; /* optimize by not leaving the task for the next httpreq to init */
|
||||
@ -2006,8 +2234,6 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
/* if we have a retryafter, wait before next request (usually finalize) */
|
||||
task->expire = tick_add(now_ms, ctx->retryafter * 1000);
|
||||
ctx->retryafter = 0;
|
||||
ctx->http_state = http_st;
|
||||
ctx->state = st;
|
||||
|
||||
MT_LIST_UNLOCK_FULL(&ctx->el, tmp);
|
||||
return task;
|
||||
@ -2055,8 +2281,16 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
task = NULL;
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
wait:
|
||||
/* wait for a task_wakeup */
|
||||
ctx->http_state = ACME_HTTP_REQ;
|
||||
ctx->state = st;
|
||||
task->expire = TICK_ETERNITY;
|
||||
|
||||
MT_LIST_UNLOCK_FULL(&ctx->el, tmp);
|
||||
return task;
|
||||
}
|
||||
/*
|
||||
* Return 1 if the certificate must be regenerated
|
||||
* Check if the notAfter date will append in (validity period / 12) or 7 days per default
|
||||
@ -2133,6 +2367,7 @@ struct task *acme_scheduler(struct task *task, void *context, unsigned int state
|
||||
if (store->conf.acme.id) {
|
||||
|
||||
if (acme_will_expire(store)) {
|
||||
TRACE_USER("ACME Scheduling start", ACME_EV_SCHED);
|
||||
if (acme_start_task(store, &errmsg) != 0) {
|
||||
send_log(NULL, LOG_NOTICE,"acme: %s: %s Aborting.\n", store->path, errmsg ? errmsg : "");
|
||||
ha_free(&errmsg);
|
||||
@ -2321,12 +2556,14 @@ static int acme_start_task(struct ckch_store *store, char **errmsg)
|
||||
ctx->store = newstore;
|
||||
ctx->cfg = cfg;
|
||||
task->context = ctx;
|
||||
ctx->task = task;
|
||||
|
||||
MT_LIST_INIT(&ctx->el);
|
||||
MT_LIST_APPEND(&acme_tasks, &ctx->el);
|
||||
|
||||
send_log(NULL, LOG_NOTICE, "acme: %s: Starting update of the certificate.\n", ctx->store->path);
|
||||
|
||||
TRACE_USER("ACME Task start", ACME_EV_NEW, ctx);
|
||||
task_wakeup(task, TASK_WOKEN_INIT);
|
||||
|
||||
return 0;
|
||||
@ -2372,6 +2609,55 @@ static int cli_acme_renew_parse(char **args, char *payload, struct appctx *appct
|
||||
return cli_dynerr(appctx, errmsg);
|
||||
}
|
||||
|
||||
static int cli_acme_chall_ready_parse(char **args, char *payload, struct appctx *appctx, void *private)
|
||||
{
|
||||
char *errmsg = NULL;
|
||||
const char *crt;
|
||||
const char *dns;
|
||||
struct mt_list back;
|
||||
struct acme_ctx *ctx;
|
||||
struct acme_auth *auth;
|
||||
int found = 0;
|
||||
|
||||
if (!*args[2] && !*args[3] && !*args[4]) {
|
||||
memprintf(&errmsg, ": not enough parameters\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
crt = args[2];
|
||||
dns = args[4];
|
||||
|
||||
|
||||
MT_LIST_FOR_EACH_ENTRY_LOCKED(ctx, &acme_tasks, el, back) {
|
||||
|
||||
if (strcmp(ctx->store->path, crt) != 0)
|
||||
continue;
|
||||
|
||||
auth = ctx->auths;
|
||||
while (auth) {
|
||||
if (strncmp(dns, auth->dns.ptr, auth->dns.len) == 0) {
|
||||
if (!auth->ready) {
|
||||
auth->ready = 1;
|
||||
task_wakeup(ctx->task, TASK_WOKEN_MSG);
|
||||
found = 1;
|
||||
} else {
|
||||
memprintf(&errmsg, "ACME challenge for crt \"%s\" and dns \"%s\" was already READY !\n", crt, dns);
|
||||
}
|
||||
break;
|
||||
}
|
||||
auth = auth->next;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
memprintf(&errmsg, "Couldn't find the ACME task using crt \"%s\" and dns \"%s\" !\n", crt, dns);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return cli_msg(appctx, LOG_INFO, "Challenge Ready!");
|
||||
err:
|
||||
return cli_dynerr(appctx, errmsg);
|
||||
}
|
||||
|
||||
static int cli_acme_status_io_handler(struct appctx *appctx)
|
||||
{
|
||||
struct ebmb_node *node = NULL;
|
||||
@ -2454,6 +2740,7 @@ static int cli_acme_ps(char **args, char *payload, struct appctx *appctx, void *
|
||||
static struct cli_kw_list cli_kws = {{ },{
|
||||
{ { "acme", "renew", NULL }, "acme renew <certfile> : renew a certificate using the ACME protocol", cli_acme_renew_parse, NULL, NULL, NULL, 0 },
|
||||
{ { "acme", "status", NULL }, "acme status : show status of certificates configured with ACME", cli_acme_ps, cli_acme_status_io_handler, NULL, NULL, 0 },
|
||||
{ { "acme", "challenge_ready", NULL }, "acme challenge_ready <certfile> domain <domain> : show status of certificates configured with ACME", cli_acme_chall_ready_parse, NULL, NULL, NULL, 0 },
|
||||
{ { NULL }, NULL, NULL, NULL }
|
||||
}};
|
||||
|
||||
|
@ -1425,7 +1425,7 @@ struct connection *conn_backend_get(int reuse_mode,
|
||||
if (reuse_mode == PR_O_REUSE_SAFE && conn->mux->flags & MX_FL_HOL_RISK) {
|
||||
/* attach the connection to the session private list */
|
||||
conn->owner = sess;
|
||||
session_add_conn(sess, conn, conn->target);
|
||||
session_add_conn(sess, conn);
|
||||
}
|
||||
else {
|
||||
srv_add_to_avail_list(srv, conn);
|
||||
@ -2159,7 +2159,7 @@ int connect_server(struct stream *s)
|
||||
(reuse_mode == PR_O_REUSE_SAFE &&
|
||||
srv_conn->mux->flags & MX_FL_HOL_RISK)) {
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(s->sess, srv_conn, srv_conn->target);
|
||||
session_add_conn(s->sess, srv_conn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
42
src/clock.c
42
src/clock.c
@ -29,8 +29,10 @@
|
||||
struct timeval start_date; /* the process's start date in wall-clock time */
|
||||
struct timeval ready_date; /* date when the process was considered ready */
|
||||
ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
||||
volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
|
||||
volatile ullong _global_now_ns; /* locally stored common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
volatile ullong *global_now_ns; /* common monotonic date, may point to _global_now_ns or shared memory */
|
||||
volatile uint _global_now_ms; /* locally stored common monotonic date in milliseconds (may wrap) */
|
||||
volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
|
||||
|
||||
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
|
||||
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
|
||||
@ -238,7 +240,7 @@ void clock_update_local_date(int max_wait, int interrupted)
|
||||
now_ns += ms_to_ns(max_wait);
|
||||
|
||||
/* consider the most recent known date */
|
||||
now_ns = MAX(now_ns, HA_ATOMIC_LOAD(&global_now_ns));
|
||||
now_ns = MAX(now_ns, HA_ATOMIC_LOAD(global_now_ns));
|
||||
|
||||
/* this event is rare, but it requires proper handling because if
|
||||
* we just left now_ns where it was, the date will not be updated
|
||||
@ -269,8 +271,8 @@ void clock_update_global_date()
|
||||
* realistic regarding the global date, which only moves forward,
|
||||
* otherwise catch up.
|
||||
*/
|
||||
old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
|
||||
old_now_ms = _HA_ATOMIC_LOAD(&global_now_ms);
|
||||
old_now_ns = _HA_ATOMIC_LOAD(global_now_ns);
|
||||
old_now_ms = _HA_ATOMIC_LOAD(global_now_ms);
|
||||
|
||||
do {
|
||||
if (now_ns < old_now_ns)
|
||||
@ -299,8 +301,8 @@ void clock_update_global_date()
|
||||
/* let's try to update the global_now_ns (both in nanoseconds
|
||||
* and ms forms) or loop again.
|
||||
*/
|
||||
} while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
|
||||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
|
||||
} while ((!_HA_ATOMIC_CAS(global_now_ns, &old_now_ns, now_ns) ||
|
||||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(global_now_ms, &old_now_ms, now_ms))) &&
|
||||
__ha_cpu_relax());
|
||||
|
||||
if (!th_ctx->curr_mono_time) {
|
||||
@ -322,11 +324,12 @@ void clock_init_process_date(void)
|
||||
th_ctx->prev_mono_time = th_ctx->curr_mono_time = before_poll_mono_ns;
|
||||
gettimeofday(&date, NULL);
|
||||
after_poll = before_poll = date;
|
||||
global_now_ns = th_ctx->curr_mono_time;
|
||||
if (!global_now_ns) // CLOCK_MONOTONIC not supported
|
||||
global_now_ns = tv_to_ns(&date);
|
||||
now_ns = global_now_ns;
|
||||
global_now_ms = ns_to_ms(now_ns);
|
||||
_global_now_ns = th_ctx->curr_mono_time;
|
||||
if (!_global_now_ns) // CLOCK_MONOTONIC not supported
|
||||
_global_now_ns = tv_to_ns(&date);
|
||||
now_ns = _global_now_ns;
|
||||
|
||||
_global_now_ms = ns_to_ms(now_ns);
|
||||
|
||||
/* force time to wrap 20s after boot: we first compute the time offset
|
||||
* that once applied to the wall-clock date will make the local time
|
||||
@ -334,14 +337,19 @@ void clock_init_process_date(void)
|
||||
* and will be used to recompute the local time, both of which will
|
||||
* match and continue from this shifted date.
|
||||
*/
|
||||
now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
|
||||
global_now_ns += now_offset;
|
||||
now_ns = global_now_ns;
|
||||
now_offset = sec_to_ns((uint)((uint)(-_global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
|
||||
_global_now_ns += now_offset;
|
||||
now_ns = _global_now_ns;
|
||||
now_ms = ns_to_ms(now_ns);
|
||||
/* correct for TICK_ETNERITY (0) */
|
||||
if (now_ms == TICK_ETERNITY)
|
||||
now_ms++;
|
||||
global_now_ms = now_ms;
|
||||
_global_now_ms = now_ms;
|
||||
|
||||
/* for now global_now_ms points to the process-local _global_now_ms */
|
||||
global_now_ms = &_global_now_ms;
|
||||
/* same goes for global_ns_ns */
|
||||
global_now_ns = &_global_now_ns;
|
||||
|
||||
th_ctx->idle_pct = 100;
|
||||
clock_update_date(0, 1);
|
||||
@ -364,7 +372,7 @@ void clock_init_thread_date(void)
|
||||
gettimeofday(&date, NULL);
|
||||
after_poll = before_poll = date;
|
||||
|
||||
now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
|
||||
now_ns = _HA_ATOMIC_LOAD(global_now_ns);
|
||||
th_ctx->idle_pct = 100;
|
||||
th_ctx->prev_cpu_time = now_cpu_time();
|
||||
th_ctx->prev_mono_time = now_mono_time();
|
||||
|
@ -117,7 +117,7 @@ int conn_create_mux(struct connection *conn, int *closed_connection)
|
||||
}
|
||||
else if (conn->flags & CO_FL_PRIVATE) {
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(sess, conn, conn->target);
|
||||
session_add_conn(sess, conn);
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
|
@ -52,12 +52,12 @@ void counters_be_shared_drop(struct be_counters_shared *counters)
|
||||
_counters_shared_drop(counters);
|
||||
}
|
||||
|
||||
/* retrieved shared counters pointer for a given <guid> object
|
||||
/* prepare shared counters pointer for a given <guid> object
|
||||
* <size> hint is expected to reflect the actual tg member size (fe/be)
|
||||
* if <guid> is not set, then sharing is disabled
|
||||
* Returns the pointer on success or NULL on failure
|
||||
*/
|
||||
static int _counters_shared_init(struct counters_shared *shared, const struct guid_node *guid, size_t size)
|
||||
static int _counters_shared_prepare(struct counters_shared *shared, const struct guid_node *guid, size_t size)
|
||||
{
|
||||
int it = 0;
|
||||
|
||||
@ -85,11 +85,11 @@ static int _counters_shared_init(struct counters_shared *shared, const struct gu
|
||||
/* prepare shared fe counters pointer for a given <guid> object */
|
||||
int counters_fe_shared_prepare(struct fe_counters_shared *shared, const struct guid_node *guid)
|
||||
{
|
||||
return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg));
|
||||
return _counters_shared_prepare((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg));
|
||||
}
|
||||
|
||||
/* prepare shared be counters pointer for a given <guid> object */
|
||||
int counters_be_shared_init(struct be_counters_shared *shared, const struct guid_node *guid)
|
||||
int counters_be_shared_prepare(struct be_counters_shared *shared, const struct guid_node *guid)
|
||||
{
|
||||
return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg));
|
||||
return _counters_shared_prepare((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg));
|
||||
}
|
||||
|
@ -290,7 +290,7 @@ static int fcgi_flt_start(struct stream *s, struct filter *filter)
|
||||
|
||||
static void fcgi_flt_stop(struct stream *s, struct filter *filter)
|
||||
{
|
||||
struct flt_fcgi_ctx *fcgi_ctx = filter->ctx;
|
||||
struct fcgi_flt_ctx *fcgi_ctx = filter->ctx;
|
||||
|
||||
if (!fcgi_ctx)
|
||||
return;
|
||||
|
@ -33,7 +33,7 @@ uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
|
||||
*/
|
||||
for (;; __ha_cpu_relax()) {
|
||||
curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
|
||||
now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
|
||||
now_ms_tmp = HA_ATOMIC_LOAD(global_now_ms);
|
||||
|
||||
if (now_ms_tmp - curr_tick < period)
|
||||
return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
|
||||
@ -81,7 +81,7 @@ ullong _freq_ctr_total_from_values(uint period, int pend,
|
||||
{
|
||||
int remain;
|
||||
|
||||
remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
|
||||
remain = tick + period - HA_ATOMIC_LOAD(global_now_ms);
|
||||
if (unlikely(remain < 0)) {
|
||||
/* We're past the first period, check if we can still report a
|
||||
* part of last period or if we're too far away.
|
||||
@ -239,7 +239,7 @@ int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq
|
||||
return 0;
|
||||
}
|
||||
|
||||
elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
|
||||
elapsed = HA_ATOMIC_LOAD(global_now_ms) - tick;
|
||||
if (unlikely(elapsed < 0 || elapsed > period)) {
|
||||
/* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */
|
||||
return 0;
|
||||
|
19
src/hlua.c
19
src/hlua.c
@ -13363,7 +13363,24 @@ static int hlua_load_per_thread(char **args, int section_type, struct proxy *cur
|
||||
return -1;
|
||||
}
|
||||
for (i = 1; *(args[i]) != 0; i++) {
|
||||
per_thread_load[len][i - 1] = strdup(args[i]);
|
||||
/* first arg is filename */
|
||||
if (i == 1 && args[1][0] != '/') {
|
||||
char *curpath;
|
||||
char *fullpath = NULL;
|
||||
|
||||
/* filename is provided using relative path, store the absolute path
|
||||
* to take current chdir into account for other threads file load
|
||||
* which occur later
|
||||
*/
|
||||
curpath = getcwd(trash.area, trash.size);
|
||||
if (!curpath) {
|
||||
memprintf(err, "failed to retrieve cur path");
|
||||
return -1;
|
||||
}
|
||||
per_thread_load[len][i - 1] = memprintf(&fullpath, "%s/%s", curpath, args[1]);
|
||||
}
|
||||
else
|
||||
per_thread_load[len][i - 1] = strdup(args[i]);
|
||||
if (per_thread_load[len][i - 1] == NULL) {
|
||||
memprintf(err, "out of memory error");
|
||||
return -1;
|
||||
|
@ -1913,6 +1913,21 @@ int hlua_listable_servers_pairs_iterator(lua_State *L)
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* ensure proper cleanup for listable_servers_pairs */
|
||||
int hlua_listable_servers_pairs_gc(lua_State *L)
|
||||
{
|
||||
struct hlua_server_list_iterator_context *ctx;
|
||||
|
||||
ctx = lua_touserdata(L, 1);
|
||||
|
||||
/* we need to make sure that the watcher leaves in detached state even
|
||||
* if the iterator was interrupted (ie: "break" from the loop), else
|
||||
* the server watcher list will become corrupted
|
||||
*/
|
||||
watcher_detach(&ctx->srv_watch);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* init the iterator context, return iterator function
|
||||
* with context as closure. The only argument is a
|
||||
* server list object.
|
||||
@ -1925,6 +1940,12 @@ int hlua_listable_servers_pairs(lua_State *L)
|
||||
hlua_srv_list = hlua_check_server_list(L, 1);
|
||||
|
||||
ctx = lua_newuserdata(L, sizeof(*ctx));
|
||||
|
||||
/* add gc metamethod to the newly created userdata */
|
||||
lua_newtable(L);
|
||||
hlua_class_function(L, "__gc", hlua_listable_servers_pairs_gc);
|
||||
lua_setmetatable(L, -2);
|
||||
|
||||
ctx->px = hlua_srv_list->px;
|
||||
ctx->next = NULL;
|
||||
watcher_init(&ctx->srv_watch, &ctx->next, offsetof(struct server, watcher_list));
|
||||
|
@ -1641,7 +1641,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
conn_set_owner(srv_conn, sess, NULL);
|
||||
conn_set_private(srv_conn);
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(srv_conn->owner, srv_conn, srv_conn->target);
|
||||
session_add_conn(srv_conn->owner, srv_conn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3723,22 +3723,25 @@ static void fcgi_detach(struct sedesc *sd)
|
||||
(fconn->flags & FCGI_CF_KEEP_CONN)) {
|
||||
if (fconn->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session serverlist, if not already done */
|
||||
if (!session_add_conn(sess, fconn->conn, fconn->conn->target)) {
|
||||
if (!session_add_conn(sess, fconn->conn))
|
||||
fconn->conn->owner = NULL;
|
||||
if (eb_is_empty(&fconn->streams_by_id)) {
|
||||
/* let's kill the connection right away */
|
||||
|
||||
if (eb_is_empty(&fconn->streams_by_id)) {
|
||||
if (!fconn->conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
fconn->conn->mux->destroy(fconn);
|
||||
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (eb_is_empty(&fconn->streams_by_id)) {
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1);
|
||||
if (session_check_idle_conn(fconn->conn->owner, fconn->conn) != 0) {
|
||||
/* The connection is destroyed, let's leave */
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, fconn->conn) != 0) {
|
||||
fconn->conn->mux->destroy(fconn);
|
||||
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
|
||||
return;
|
||||
}
|
||||
|
12
src/mux_h1.c
12
src/mux_h1.c
@ -1138,20 +1138,24 @@ static int h1s_finish_detach(struct h1s *h1s)
|
||||
|
||||
if (h1c->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session server list, if not already done */
|
||||
if (!session_add_conn(sess, h1c->conn, h1c->conn->target)) {
|
||||
if (!session_add_conn(sess, h1c->conn)) {
|
||||
/* HTTP/1.1 conn is always idle after detach, can be removed if session insert failed. */
|
||||
h1c->conn->owner = NULL;
|
||||
h1c->conn->mux->destroy(h1c);
|
||||
goto released;
|
||||
}
|
||||
/* Always idle at this step */
|
||||
|
||||
/* HTTP/1.1 conn is always idle after detach. */
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&h1c->wait_event.tasklet->state, TASK_F_USR1);
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, h1c->conn)) {
|
||||
/* The connection got destroyed, let's leave */
|
||||
TRACE_DEVEL("outgoing connection killed", H1_EV_STRM_END|H1_EV_H1C_END);
|
||||
TRACE_DEVEL("outgoing connection rejected", H1_EV_STRM_END|H1_EV_H1C_END, h1c->conn);
|
||||
h1c->conn->mux->destroy(h1c);
|
||||
goto released;
|
||||
}
|
||||
}
|
||||
|
16
src/mux_h2.c
16
src/mux_h2.c
@ -5533,21 +5533,25 @@ static void h2_detach(struct sedesc *sd)
|
||||
|
||||
if (h2c->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session server list, if not already done */
|
||||
if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
|
||||
if (!session_add_conn(sess, h2c->conn))
|
||||
h2c->conn->owner = NULL;
|
||||
if (eb_is_empty(&h2c->streams_by_id)) {
|
||||
|
||||
if (eb_is_empty(&h2c->streams_by_id)) {
|
||||
if (!h2c->conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
h2c->conn->mux->destroy(h2c);
|
||||
TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (eb_is_empty(&h2c->streams_by_id)) {
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
|
||||
if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
|
||||
/* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, h2c->conn) != 0) {
|
||||
h2c->conn->mux->destroy(h2c);
|
||||
TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
|
||||
return;
|
||||
}
|
||||
|
@ -1857,6 +1857,14 @@ int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
|
||||
offset = qcs->rx.offset;
|
||||
}
|
||||
|
||||
if (len && (qcc->flags & QC_CF_WAIT_HS)) {
|
||||
if (!(qcc->conn->flags & CO_FL_EARLY_DATA)) {
|
||||
/* Ensure 'Early-data: 1' will be set on the request. */
|
||||
TRACE_PROTO("received early data", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
|
||||
qcc->conn->flags |= CO_FL_EARLY_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
left = len;
|
||||
while (left) {
|
||||
struct qc_stream_rxbuf *buf;
|
||||
@ -3784,26 +3792,25 @@ static void qmux_strm_detach(struct sedesc *sd)
|
||||
if (conn->flags & CO_FL_PRIVATE) {
|
||||
TRACE_DEVEL("handle private connection reuse", QMUX_EV_STRM_END, conn);
|
||||
|
||||
/* Add connection into session. If an error occured,
|
||||
* conn will be closed if idle, or insert will be
|
||||
* retried on next detach.
|
||||
/* Ensure conn is attached into session. Most of the times
|
||||
* this is already done during connect so this is a no-op.
|
||||
*/
|
||||
if (!session_add_conn(sess, conn, conn->target)) {
|
||||
if (!session_add_conn(sess, conn)) {
|
||||
TRACE_ERROR("error during connection insert into session list", QMUX_EV_STRM_END, conn);
|
||||
conn->owner = NULL;
|
||||
if (!qcc->nb_sc) {
|
||||
qcc_shutdown(qcc);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* If conn is idle, check if session can keep it. Conn is freed if this is not the case.
|
||||
* TODO graceful shutdown should be preferable instead of plain mux->destroy().
|
||||
*/
|
||||
if (!qcc->nb_sc && session_check_idle_conn(sess, conn)) {
|
||||
TRACE_DEVEL("idle conn rejected by session", QMUX_EV_STRM_END);
|
||||
conn = NULL;
|
||||
goto end;
|
||||
if (!qcc->nb_sc) {
|
||||
if (!conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, conn)) {
|
||||
TRACE_DEVEL("idle conn rejected by session", QMUX_EV_STRM_END, conn);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
@ -3812,8 +3819,9 @@ static void qmux_strm_detach(struct sedesc *sd)
|
||||
if (!srv_add_to_idle_list(objt_server(conn->target), conn, 1)) {
|
||||
/* Idle conn insert failure, gracefully close the connection. */
|
||||
TRACE_DEVEL("idle connection cannot be kept on the server", QMUX_EV_STRM_END, conn);
|
||||
qcc_shutdown(qcc);
|
||||
goto release;
|
||||
}
|
||||
|
||||
goto end;
|
||||
}
|
||||
else if (!conn->hash_node->node.node.leaf_p &&
|
||||
|
@ -2977,21 +2977,25 @@ static void spop_detach(struct sedesc *sd)
|
||||
if (!(spop_conn->flags & (SPOP_CF_RCVD_SHUT|SPOP_CF_ERR_PENDING|SPOP_CF_ERROR))) {
|
||||
if (spop_conn->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session server list, if not already done */
|
||||
if (!session_add_conn(sess, spop_conn->conn, spop_conn->conn->target)) {
|
||||
if (!session_add_conn(sess, spop_conn->conn))
|
||||
spop_conn->conn->owner = NULL;
|
||||
if (eb_is_empty(&spop_conn->streams_by_id)) {
|
||||
|
||||
if (eb_is_empty(&spop_conn->streams_by_id)) {
|
||||
if (!spop_conn->conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
spop_conn->conn->mux->destroy(spop_conn);
|
||||
TRACE_DEVEL("leaving on error after killing outgoing connection", SPOP_EV_STRM_END|SPOP_EV_SPOP_CONN_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (eb_is_empty(&spop_conn->streams_by_id)) {
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&spop_conn->wait_event.tasklet->state, TASK_F_USR1);
|
||||
if (session_check_idle_conn(spop_conn->conn->owner, spop_conn->conn) != 0) {
|
||||
/* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, spop_conn->conn) != 0) {
|
||||
spop_conn->conn->mux->destroy(spop_conn);
|
||||
TRACE_DEVEL("leaving without reusable idle connection", SPOP_EV_STRM_END);
|
||||
return;
|
||||
}
|
||||
|
94
src/pool.c
94
src/pool.c
@ -290,27 +290,67 @@ static int mem_should_fail(const struct pool_head *pool)
|
||||
* is available for a new creation. Two flags are supported :
|
||||
* - MEM_F_SHARED to indicate that the pool may be shared with other users
|
||||
* - MEM_F_EXACT to indicate that the size must not be rounded up
|
||||
* The name must be a stable pointer during all the program's life time.
|
||||
* The file and line are passed to store the registration location in the
|
||||
* registration struct. Use create_pool() instead which does it for free.
|
||||
* The alignment will be stored as-is in the registration.
|
||||
*/
|
||||
struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||
struct pool_head *create_pool_with_loc(const char *name, unsigned int size,
|
||||
unsigned int align, unsigned int flags,
|
||||
const char *file, unsigned int line)
|
||||
{
|
||||
unsigned int extra_mark, extra_caller, extra;
|
||||
struct pool_registration *reg;
|
||||
struct pool_head *pool;
|
||||
|
||||
reg = calloc(1, sizeof(*reg));
|
||||
if (!reg)
|
||||
return NULL;
|
||||
|
||||
reg->name = name;
|
||||
reg->file = file;
|
||||
reg->line = line;
|
||||
reg->size = size;
|
||||
reg->flags = flags;
|
||||
reg->align = align;
|
||||
|
||||
pool = create_pool_from_reg(name, reg);
|
||||
if (!pool)
|
||||
free(reg);
|
||||
return pool;
|
||||
}
|
||||
|
||||
/* create a pool from a pool registration. All configuration is taken from
|
||||
* there. The alignment will automatically be raised to sizeof(void*) or the
|
||||
* next power of two so that it's always possible to lazily pass alignof() or
|
||||
* sizeof(). Alignments are always respected when merging pools.
|
||||
*/
|
||||
struct pool_head *create_pool_from_reg(const char *name, struct pool_registration *reg)
|
||||
{
|
||||
unsigned int extra_mark, extra_caller, extra;
|
||||
unsigned int flags = reg->flags;
|
||||
unsigned int size = reg->size;
|
||||
unsigned int alignment = reg->align;
|
||||
struct pool_head *pool = NULL;
|
||||
struct pool_head *entry;
|
||||
struct list *start;
|
||||
unsigned int align;
|
||||
unsigned int best_diff;
|
||||
int thr __maybe_unused;
|
||||
|
||||
pool = NULL;
|
||||
reg = calloc(1, sizeof(*reg));
|
||||
if (!reg)
|
||||
goto fail;
|
||||
|
||||
strlcpy2(reg->name, name, sizeof(reg->name));
|
||||
reg->size = size;
|
||||
reg->flags = flags;
|
||||
reg->align = 0;
|
||||
/* extend alignment if needed */
|
||||
if (alignment < sizeof(void*))
|
||||
alignment = sizeof(void*);
|
||||
else if (alignment & (alignment - 1)) {
|
||||
/* not power of two! round up to next power of two by filling
|
||||
* all LSB in O(log(log(N))) then increment the result.
|
||||
*/
|
||||
int shift = 1;
|
||||
do {
|
||||
alignment |= alignment >> shift;
|
||||
shift *= 2;
|
||||
} while (alignment & (alignment + 1));
|
||||
alignment++;
|
||||
}
|
||||
|
||||
extra_mark = (pool_debugging & POOL_DBG_TAG) ? POOL_EXTRA_MARK : 0;
|
||||
extra_caller = (pool_debugging & POOL_DBG_CALLER) ? POOL_EXTRA_CALLER : 0;
|
||||
@ -407,6 +447,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||
strlcpy2(pool->name, name, sizeof(pool->name));
|
||||
pool->alloc_sz = size + extra;
|
||||
pool->size = size;
|
||||
pool->align = alignment;
|
||||
pool->flags = flags;
|
||||
LIST_APPEND(start, &pool->list);
|
||||
LIST_INIT(&pool->regs);
|
||||
@ -426,6 +467,8 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||
pool->size = size;
|
||||
pool->alloc_sz = size + extra;
|
||||
}
|
||||
if (alignment > pool->align)
|
||||
pool->align = alignment;
|
||||
DPRINTF(stderr, "Sharing %s with %s\n", name, pool->name);
|
||||
}
|
||||
|
||||
@ -433,10 +476,8 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||
pool->users++;
|
||||
pool->sum_size += size;
|
||||
|
||||
return pool;
|
||||
fail:
|
||||
free(reg);
|
||||
return NULL;
|
||||
return pool;
|
||||
}
|
||||
|
||||
/* Tries to allocate an object for the pool <pool> using the system's allocator
|
||||
@ -449,9 +490,9 @@ void *pool_get_from_os_noinc(struct pool_head *pool)
|
||||
void *ptr;
|
||||
|
||||
if ((pool_debugging & POOL_DBG_UAF) || (pool->flags & MEM_F_UAF))
|
||||
ptr = pool_alloc_area_uaf(pool->alloc_sz);
|
||||
ptr = pool_alloc_area_uaf(pool->alloc_sz, pool->align);
|
||||
else
|
||||
ptr = pool_alloc_area(pool->alloc_sz);
|
||||
ptr = pool_alloc_area(pool->alloc_sz, pool->align);
|
||||
if (ptr)
|
||||
return ptr;
|
||||
_HA_ATOMIC_INC(&pool->buckets[pool_tbucket()].failed);
|
||||
@ -1037,7 +1078,8 @@ void *pool_destroy(struct pool_head *pool)
|
||||
|
||||
list_for_each_entry_safe(reg, back, &pool->regs, list) {
|
||||
LIST_DELETE(®->list);
|
||||
free(reg);
|
||||
if (!(reg->flags & MEM_F_STATREG))
|
||||
free(reg);
|
||||
}
|
||||
|
||||
LIST_DELETE(&pool->list);
|
||||
@ -1291,10 +1333,10 @@ void dump_pools_to_trash(int how, int max, const char *pfx)
|
||||
chunk_appendf(&trash, ". Use SIGQUIT to flush them.\n");
|
||||
|
||||
for (i = 0; i < nbpools && i < max; i++) {
|
||||
chunk_appendf(&trash, " - Pool %s (%lu bytes) : %lu allocated (%lu bytes), %lu used"
|
||||
chunk_appendf(&trash, " - Pool %s (%u bytes/%u) : %lu allocated (%lu bytes), %lu used"
|
||||
" (~%lu by thread caches)"
|
||||
", needed_avg %lu, %lu failures, %u users, @%p%s\n",
|
||||
pool_info[i].entry->name, (ulong)pool_info[i].entry->size,
|
||||
pool_info[i].entry->name, pool_info[i].entry->size, pool_info[i].entry->align,
|
||||
pool_info[i].alloc_items, pool_info[i].alloc_bytes,
|
||||
pool_info[i].used_items, pool_info[i].cached_items,
|
||||
pool_info[i].need_avg, pool_info[i].failed_items,
|
||||
@ -1307,8 +1349,12 @@ void dump_pools_to_trash(int how, int max, const char *pfx)
|
||||
|
||||
if (detailed) {
|
||||
struct pool_registration *reg;
|
||||
list_for_each_entry(reg, &pool_info[i].entry->regs, list)
|
||||
chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u\n", reg->name, reg->size, reg->flags, reg->align);
|
||||
list_for_each_entry(reg, &pool_info[i].entry->regs, list) {
|
||||
chunk_appendf(&trash, " > %-12s: size=%u flags=%#x align=%u", reg->name, reg->size, reg->flags, reg->align);
|
||||
if (reg->file && reg->line)
|
||||
chunk_appendf(&trash, " [%s:%u]", reg->file, reg->line);
|
||||
chunk_appendf(&trash, "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1522,12 +1568,12 @@ static int cli_io_handler_dump_pools(struct appctx *appctx)
|
||||
* resulting pointer into <ptr>. If the allocation fails, it quits with after
|
||||
* emitting an error message.
|
||||
*/
|
||||
void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size)
|
||||
void create_pool_callback(struct pool_head **ptr, char *name, struct pool_registration *reg)
|
||||
{
|
||||
*ptr = create_pool(name, size, MEM_F_SHARED);
|
||||
*ptr = create_pool_from_reg(name, reg);
|
||||
if (!*ptr) {
|
||||
ha_alert("Failed to allocate pool '%s' of size %u : %s. Aborting.\n",
|
||||
name, size, strerror(errno));
|
||||
name, reg->size, strerror(errno));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -1768,7 +1768,7 @@ static int proxy_postcheck(struct proxy *px)
|
||||
* be_counters may be used even if the proxy lacks the backend
|
||||
* capability
|
||||
*/
|
||||
if (!counters_be_shared_init(&px->be_counters.shared, &px->guid)) {
|
||||
if (!counters_be_shared_prepare(&px->be_counters.shared, &px->guid)) {
|
||||
ha_alert("out of memory while setting up shared counters for %s %s\n",
|
||||
proxy_type_str(px), px->id);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
|
@ -747,7 +747,7 @@ int pendconn_dequeue(struct stream *strm)
|
||||
|
||||
if (p->target) {
|
||||
/* a server picked this pendconn, it must skip LB */
|
||||
strm->target = &p->target->obj_type;
|
||||
stream_set_srv_target(strm, p->target);
|
||||
strm->flags |= SF_ASSIGNED;
|
||||
}
|
||||
|
||||
|
41
src/sample.c
41
src/sample.c
@ -1983,7 +1983,7 @@ int sample_conv_var2smp_str(const struct arg *arg, struct sample *smp)
|
||||
}
|
||||
}
|
||||
|
||||
static int sample_conv_be2dec_check(struct arg *args, struct sample_conv *conv,
|
||||
static int sample_conv_2dec_check(struct arg *args, struct sample_conv *conv,
|
||||
const char *file, int line, char **err)
|
||||
{
|
||||
if (args[1].data.sint <= 0 || args[1].data.sint > sizeof(unsigned long long)) {
|
||||
@ -1999,13 +1999,13 @@ static int sample_conv_be2dec_check(struct arg *args, struct sample_conv *conv,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Converts big-endian binary input sample to a string containing an unsigned
|
||||
/* Converts big-endian/little-endian binary input sample to a string containing an unsigned
|
||||
* integer number per <chunk_size> input bytes separated with <separator>.
|
||||
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
|
||||
* boundaries.
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1), big endian (0,1)
|
||||
*/
|
||||
static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
|
||||
static int sample_conv_2dec(const struct arg *args, struct sample *smp, void *private, int be)
|
||||
{
|
||||
struct buffer *trash = get_trash_chunk();
|
||||
const int last = args[2].data.sint ? smp->data.u.str.data - args[1].data.sint + 1 : smp->data.u.str.data;
|
||||
@ -2029,8 +2029,12 @@ static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *
|
||||
max_size -= args[0].data.str.data;
|
||||
|
||||
/* Add integer */
|
||||
for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++)
|
||||
number = (number << 8) + (unsigned char)smp->data.u.str.area[ptr++];
|
||||
for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++) {
|
||||
if (be)
|
||||
number = (number << 8) + (unsigned char)smp->data.u.str.area[ptr++];
|
||||
else
|
||||
number |= (unsigned char)smp->data.u.str.area[ptr++] << (i*8);
|
||||
}
|
||||
|
||||
pos = ulltoa(number, trash->area + trash->data, trash->size - trash->data);
|
||||
if (pos)
|
||||
@ -2047,6 +2051,28 @@ static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Converts big-endian binary input sample to a string containing an unsigned
|
||||
* integer number per <chunk_size> input bytes separated with <separator>.
|
||||
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
|
||||
* boundaries.
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
|
||||
*/
|
||||
static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
|
||||
{
|
||||
return sample_conv_2dec(args, smp, private, 1);
|
||||
}
|
||||
|
||||
/* Converts little-endian binary input sample to a string containing an unsigned
|
||||
* integer number per <chunk_size> input bytes separated with <separator>.
|
||||
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
|
||||
* boundaries.
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
|
||||
*/
|
||||
static int sample_conv_le2dec(const struct arg *args, struct sample *smp, void *private)
|
||||
{
|
||||
return sample_conv_2dec(args, smp, private, 0);
|
||||
}
|
||||
|
||||
static int sample_conv_be2hex_check(struct arg *args, struct sample_conv *conv,
|
||||
const char *file, int line, char **err)
|
||||
{
|
||||
@ -5415,7 +5441,8 @@ static struct sample_conv_kw_list sample_conv_kws = {ILH, {
|
||||
{ "upper", sample_conv_str2upper, 0, NULL, SMP_T_STR, SMP_T_STR },
|
||||
{ "lower", sample_conv_str2lower, 0, NULL, SMP_T_STR, SMP_T_STR },
|
||||
{ "length", sample_conv_length, 0, NULL, SMP_T_STR, SMP_T_SINT },
|
||||
{ "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_be2dec_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_2dec_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "le2dec", sample_conv_le2dec, ARG3(1,STR,SINT,SINT), sample_conv_2dec_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "be2hex", sample_conv_be2hex, ARG3(1,STR,SINT,SINT), sample_conv_be2hex_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "hex", sample_conv_bin2hex, 0, NULL, SMP_T_BIN, SMP_T_STR },
|
||||
{ "hex2i", sample_conv_hex2int, 0, NULL, SMP_T_STR, SMP_T_SINT },
|
||||
|
@ -3450,7 +3450,7 @@ int srv_init(struct server *srv)
|
||||
if (err_code & ERR_CODE)
|
||||
goto out;
|
||||
|
||||
if (!counters_be_shared_init(&srv->counters.shared, &srv->guid)) {
|
||||
if (!counters_be_shared_prepare(&srv->counters.shared, &srv->guid)) {
|
||||
ha_alert("memory error while setting up shared counters for %s/%s server\n", srv->proxy->id, srv->id);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
|
@ -491,11 +491,11 @@ int is_inet6_reachable(void)
|
||||
int fd;
|
||||
|
||||
if (tick_isset(last_check) &&
|
||||
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(&global_now_ms)))
|
||||
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(global_now_ms)))
|
||||
return HA_ATOMIC_LOAD(&sock_inet6_seems_reachable);
|
||||
|
||||
/* update the test date to ensure nobody else does it in parallel */
|
||||
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(&global_now_ms));
|
||||
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(global_now_ms));
|
||||
|
||||
fd = socket(AF_INET6, SOCK_DGRAM, 0);
|
||||
if (fd >= 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user