mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-06 15:17:01 +02:00
Compare commits
24 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
ffbb3cc306 | ||
|
aeff2a3b2a | ||
|
66f28dbd3f | ||
|
8afd3e588d | ||
|
9ee14ed2d9 | ||
|
3dde7626ba | ||
|
365a69648c | ||
|
09275fd549 | ||
|
a6e67e7b41 | ||
|
697f7d1142 | ||
|
2ecc5290f2 | ||
|
dd9645d6b9 | ||
|
57e9425dbc | ||
|
ec1ab8d171 | ||
|
668c2cfb09 | ||
|
cfe9bec1ea | ||
|
14966c856b | ||
|
4a20b3835a | ||
|
713ebd2750 | ||
|
2ffe515d97 | ||
|
83a335f925 | ||
|
cedb4f0461 | ||
|
7fa812a1ac | ||
|
021a0681be |
@ -1,6 +1,10 @@
|
||||
ChangeLog :
|
||||
===========
|
||||
|
||||
2025/07/28 : 3.3-dev5
|
||||
- BUG/MEDIUM: queue/stats: also use stream_set_srv_target() for pendconns
|
||||
- DOC: list missing global QUIC settings
|
||||
|
||||
2025/07/26 : 3.3-dev4
|
||||
- CLEANUP: server: do not check for duplicates anymore in findserver()
|
||||
- REORG: server: move findserver() from proxy.c to server.c
|
||||
|
@ -3,7 +3,7 @@
|
||||
Configuration Manual
|
||||
----------------------
|
||||
version 3.3
|
||||
2025/07/26
|
||||
2025/07/28
|
||||
|
||||
|
||||
This document covers the configuration language as implemented in the version
|
||||
@ -1744,6 +1744,7 @@ The following keywords are supported in the "global" section :
|
||||
- insecure-setuid-wanted
|
||||
- issuers-chain-path
|
||||
- key-base
|
||||
- limited-quic
|
||||
- localpeer
|
||||
- log
|
||||
- log-send-hostname
|
||||
@ -1753,6 +1754,7 @@ The following keywords are supported in the "global" section :
|
||||
- lua-prepend-path
|
||||
- mworker-max-reloads
|
||||
- nbthread
|
||||
- no-quic
|
||||
- node
|
||||
- numa-cpu-mapping
|
||||
- ocsp-update.disable
|
||||
@ -1882,6 +1884,7 @@ The following keywords are supported in the "global" section :
|
||||
- tune.pool-low-fd-ratio
|
||||
- tune.pt.zero-copy-forwarding
|
||||
- tune.quic.cc-hystart
|
||||
- tune.quic.cc.cubic.min-losses
|
||||
- tune.quic.disable-tx-pacing
|
||||
- tune.quic.disable-udp-gso
|
||||
- tune.quic.frontend.glitches-threshold
|
||||
@ -19898,6 +19901,7 @@ and(value) integer integer
|
||||
b64dec string binary
|
||||
base64 binary string
|
||||
be2dec(separator,chunk_size[,truncate]) binary string
|
||||
le2dec(separator,chunk_size[,truncate]) binary string
|
||||
be2hex([separator[,chunk_size[,truncate]]]) binary string
|
||||
bool integer boolean
|
||||
bytes(offset[,length]) binary binary
|
||||
@ -20138,6 +20142,19 @@ be2dec(<separator>,<chunk_size>[,<truncate>])
|
||||
bin(01020304050607),be2dec(,2,1) # 2587721286
|
||||
bin(7f000001),be2dec(.,1) # 127.0.0.1
|
||||
|
||||
le2dec(<separator>,<chunk_size>[,<truncate>])
|
||||
Converts little-endian binary input sample to a string containing an unsigned
|
||||
integer number per <chunk_size> input bytes. <separator> is inserted every
|
||||
<chunk_size> binary input bytes if specified. The <truncate> flag indicates
|
||||
whether the binary input is truncated at <chunk_size> boundaries. The maximum
|
||||
value for <chunk_size> is limited by the size of long long int (8 bytes).
|
||||
|
||||
Example:
|
||||
bin(01020304050607),le2dec(:,2) # 513:1284:2055:7
|
||||
bin(01020304050607),le2dec(-,2,1) # 513-1284-2055
|
||||
bin(01020304050607),le2dec(,2,1) # 51312842055
|
||||
bin(7f000001),le2dec(.,1) # 127.0.0.1
|
||||
|
||||
be2hex([<separator>[,<chunk_size>[,<truncate>]]])
|
||||
Converts big-endian binary input sample to a hex string containing two hex
|
||||
digits per input byte. It is used to log or transfer hex dumps of some
|
||||
|
@ -51,9 +51,11 @@ enum http_st {
|
||||
};
|
||||
|
||||
struct acme_auth {
|
||||
struct ist dns; /* dns entry */
|
||||
struct ist auth; /* auth URI */
|
||||
struct ist chall; /* challenge URI */
|
||||
struct ist token; /* token */
|
||||
int ready; /* is the challenge ready ? */
|
||||
void *next;
|
||||
};
|
||||
|
||||
@ -79,6 +81,20 @@ struct acme_ctx {
|
||||
X509_REQ *req;
|
||||
struct ist finalize;
|
||||
struct ist certificate;
|
||||
struct task *task;
|
||||
struct mt_list el;
|
||||
};
|
||||
|
||||
#define ACME_EV_SCHED (1ULL << 0) /* scheduling wakeup */
|
||||
#define ACME_EV_NEW (1ULL << 1) /* new task */
|
||||
#define ACME_EV_TASK (1ULL << 2) /* Task handler */
|
||||
#define ACME_EV_REQ (1ULL << 3) /* HTTP Request */
|
||||
#define ACME_EV_RES (1ULL << 4) /* HTTP Response */
|
||||
|
||||
#define ACME_VERB_CLEAN 1
|
||||
#define ACME_VERB_MINIMAL 2
|
||||
#define ACME_VERB_SIMPLE 3
|
||||
#define ACME_VERB_ADVANCED 4
|
||||
#define ACME_VERB_COMPLETE 5
|
||||
|
||||
#endif
|
||||
|
@ -28,7 +28,7 @@
|
||||
extern struct timeval start_date; /* the process's start date in wall-clock time */
|
||||
extern struct timeval ready_date; /* date when the process was considered ready */
|
||||
extern ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
||||
extern volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
extern volatile ullong *global_now_ns;/* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
|
||||
extern THREAD_LOCAL ullong now_ns; /* internal monotonic date derived from real clock, in ns (wraps every 585 yr) */
|
||||
extern THREAD_LOCAL struct timeval date; /* the real current date (wall-clock time) */
|
||||
|
@ -28,7 +28,7 @@
|
||||
#include <haproxy/guid-t.h>
|
||||
|
||||
int counters_fe_shared_prepare(struct fe_counters_shared *counters, const struct guid_node *guid);
|
||||
int counters_be_shared_init(struct be_counters_shared *counters, const struct guid_node *guid);
|
||||
int counters_be_shared_prepare(struct be_counters_shared *counters, const struct guid_node *guid);
|
||||
|
||||
void counters_fe_shared_drop(struct fe_counters_shared *counters);
|
||||
void counters_be_shared_drop(struct be_counters_shared *counters);
|
||||
|
@ -284,10 +284,11 @@ static __inline void watcher_attach(struct watcher *w, void *target)
|
||||
MT_LIST_APPEND(list, &w->el);
|
||||
}
|
||||
|
||||
/* Untracks target via <w> watcher. Invalid if <w> is not attached first. */
|
||||
/* Untracks target via <w> watcher. Does nothing if <w> is not attached */
|
||||
static __inline void watcher_detach(struct watcher *w)
|
||||
{
|
||||
BUG_ON_HOT(!MT_LIST_INLIST(&w->el));
|
||||
if (!MT_LIST_INLIST(&w->el))
|
||||
return;
|
||||
*w->pptr = NULL;
|
||||
MT_LIST_DELETE(&w->el);
|
||||
}
|
||||
|
@ -171,25 +171,31 @@ static inline void session_unown_conn(struct session *sess, struct connection *c
|
||||
}
|
||||
}
|
||||
|
||||
/* Add the connection <conn> to the private conns list of session <sess>. This
|
||||
* function is called only if the connection is private. Nothing is performed
|
||||
* if the connection is already in the session list or if the session does not
|
||||
* owned the connection.
|
||||
/* Add the connection <conn> to the private conns list of session <sess>. Each
|
||||
* connection is indexed by their respective target in the session. Nothing is
|
||||
* performed if the connection is already in the session list.
|
||||
*
|
||||
* Returns true if conn is inserted or already present else false if a failure
|
||||
* occurs during insertion.
|
||||
*/
|
||||
static inline int session_add_conn(struct session *sess, struct connection *conn, void *target)
|
||||
static inline int session_add_conn(struct session *sess, struct connection *conn)
|
||||
{
|
||||
struct sess_priv_conns *pconns = NULL;
|
||||
struct server *srv = objt_server(conn->target);
|
||||
int found = 0;
|
||||
|
||||
BUG_ON(objt_listener(conn->target));
|
||||
/* Connection target is used to index it in the session. Only BE conns are expected in session list. */
|
||||
BUG_ON(!conn->target || objt_listener(conn->target));
|
||||
|
||||
/* Already attach to the session or not the connection owner */
|
||||
if (!LIST_ISEMPTY(&conn->sess_el) || (conn->owner && conn->owner != sess))
|
||||
/* A connection cannot be attached already to another session. */
|
||||
BUG_ON(conn->owner && conn->owner != sess);
|
||||
|
||||
/* Already attach to the session */
|
||||
if (!LIST_ISEMPTY(&conn->sess_el))
|
||||
return 1;
|
||||
|
||||
list_for_each_entry(pconns, &sess->priv_conns, sess_el) {
|
||||
if (pconns->target == target) {
|
||||
if (pconns->target == conn->target) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
@ -199,7 +205,7 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
|
||||
pconns = pool_alloc(pool_head_sess_priv_conns);
|
||||
if (!pconns)
|
||||
return 0;
|
||||
pconns->target = target;
|
||||
pconns->target = conn->target;
|
||||
LIST_INIT(&pconns->conn_list);
|
||||
LIST_APPEND(&sess->priv_conns, &pconns->sess_el);
|
||||
|
||||
@ -219,25 +225,34 @@ static inline int session_add_conn(struct session *sess, struct connection *conn
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Returns 0 if the session can keep the idle conn, -1 if it was destroyed. The
|
||||
* connection must be private.
|
||||
/* Check that session <sess> is able to keep idle connection <conn>. This must
|
||||
* be called each time a connection stored in a session becomes idle.
|
||||
*
|
||||
* Returns 0 if the connection is kept, else non-zero if the connection was
|
||||
* explicitely removed from session.
|
||||
*/
|
||||
static inline int session_check_idle_conn(struct session *sess, struct connection *conn)
|
||||
{
|
||||
/* Another session owns this connection */
|
||||
if (conn->owner != sess)
|
||||
/* Connection must be attached to session prior to this function call. */
|
||||
BUG_ON(!conn->owner || conn->owner != sess);
|
||||
|
||||
/* Connection is not attached to a session. */
|
||||
if (!conn->owner)
|
||||
return 0;
|
||||
|
||||
/* Ensure conn is not already accounted as idle to prevent sess idle count excess increment. */
|
||||
BUG_ON(conn->flags & CO_FL_SESS_IDLE);
|
||||
|
||||
if (sess->idle_conns >= sess->fe->max_out_conns) {
|
||||
session_unown_conn(sess, conn);
|
||||
conn->owner = NULL;
|
||||
conn->flags &= ~CO_FL_SESS_IDLE;
|
||||
conn->mux->destroy(conn->ctx);
|
||||
return -1;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
conn->flags |= CO_FL_SESS_IDLE;
|
||||
sess->idle_conns++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@
|
||||
|
||||
/* currently updated and stored in time.c */
|
||||
extern THREAD_LOCAL unsigned int now_ms; /* internal date in milliseconds (may wrap) */
|
||||
extern volatile unsigned int global_now_ms;
|
||||
extern volatile unsigned int *global_now_ms;
|
||||
|
||||
/* return 1 if tick is set, otherwise 0 */
|
||||
static inline int tick_isset(int expire)
|
||||
|
56
reg-tests/converter/le2dec.vtc
Normal file
56
reg-tests/converter/le2dec.vtc
Normal file
@ -0,0 +1,56 @@
|
||||
varnishtest "le2dec converter Test"
|
||||
|
||||
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.0-dev0)'"
|
||||
feature ignore_unknown_macro
|
||||
|
||||
server s1 {
|
||||
rxreq
|
||||
txresp -hdr "Connection: close"
|
||||
} -repeat 3 -start
|
||||
|
||||
haproxy h1 -conf {
|
||||
defaults
|
||||
mode http
|
||||
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
|
||||
|
||||
frontend fe
|
||||
bind "fd@${fe}"
|
||||
|
||||
#### requests
|
||||
http-request set-var(txn.input) req.hdr(input)
|
||||
|
||||
http-response set-header le2dec-1 "%[var(txn.input),le2dec(:,1)]"
|
||||
http-response set-header le2dec-2 "%[var(txn.input),le2dec(-,3)]"
|
||||
http-response set-header le2dec-3 "%[var(txn.input),le2dec(::,3,1)]"
|
||||
|
||||
default_backend be
|
||||
|
||||
backend be
|
||||
server s1 ${s1_addr}:${s1_port}
|
||||
} -start
|
||||
|
||||
client c1 -connect ${h1_fe_sock} {
|
||||
txreq -url "/" \
|
||||
-hdr "input:"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.le2dec-1 == ""
|
||||
expect resp.http.le2dec-2 == ""
|
||||
expect resp.http.le2dec-3 == ""
|
||||
txreq -url "/" \
|
||||
-hdr "input: 0123456789"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.le2dec-1 == "48:49:50:51:52:53:54:55:56:57"
|
||||
expect resp.http.le2dec-2 == "3289392-3486771-3684150-57"
|
||||
expect resp.http.le2dec-3 == "3289392::3486771::3684150"
|
||||
txreq -url "/" \
|
||||
-hdr "input: abcdefghijklmnopqrstuvwxyz"
|
||||
rxresp
|
||||
expect resp.status == 200
|
||||
expect resp.http.le2dec-1 == "97:98:99:100:101:102:103:104:105:106:107:108:109:110:111:112:113:114:115:116:117:118:119:120:121:122"
|
||||
expect resp.http.le2dec-2 == "6513249-6710628-6908007-7105386-7302765-7500144-7697523-7894902-31353"
|
||||
expect resp.http.le2dec-3 == "6513249::6710628::6908007::7105386::7302765::7500144::7697523::7894902"
|
||||
} -run
|
293
src/acme.c
293
src/acme.c
@ -34,9 +34,111 @@
|
||||
#include <haproxy/ssl_sock.h>
|
||||
#include <haproxy/ssl_utils.h>
|
||||
#include <haproxy/tools.h>
|
||||
#include <haproxy/trace.h>
|
||||
|
||||
#define TRACE_SOURCE &trace_acme
|
||||
|
||||
#if defined(HAVE_ACME)
|
||||
|
||||
static void acme_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
|
||||
const struct ist where, const struct ist func,
|
||||
const void *a1, const void *a2, const void *a3, const void *a4);
|
||||
|
||||
static const struct trace_event acme_trace_events[] = {
|
||||
{ .mask = ACME_EV_SCHED, .name = "acme_sched", .desc = "Wakeup scheduled ACME task" },
|
||||
{ .mask = ACME_EV_NEW, .name = "acme_new", .desc = "New ACME task" },
|
||||
{ .mask = ACME_EV_TASK, .name = "acme_task", .desc = "ACME task" },
|
||||
{ }
|
||||
};
|
||||
|
||||
|
||||
static const struct name_desc acme_trace_lockon_args[4] = {
|
||||
/* arg1 */ { .name="acme_ctx", .desc="ACME context" },
|
||||
/* arg2 */ { },
|
||||
/* arg3 */ { },
|
||||
/* arg4 */ { }
|
||||
};
|
||||
|
||||
static const struct name_desc acme_trace_decoding[] = {
|
||||
{ .name="clean", .desc="only user-friendly stuff, generally suitable for level \"user\"" },
|
||||
{ .name="minimal", .desc="report only conn, no real decoding" },
|
||||
{ .name="simple", .desc="add error messages" },
|
||||
{ .name="advanced", .desc="add handshake-related details" },
|
||||
{ .name="complete", .desc="add full data dump when available" },
|
||||
{ /* end */ }
|
||||
};
|
||||
|
||||
|
||||
struct trace_source trace_acme = {
|
||||
.name = IST("acme"),
|
||||
.desc = "ACME",
|
||||
.arg_def = TRC_ARG_PRIV,
|
||||
.default_cb = acme_trace,
|
||||
.known_events = acme_trace_events,
|
||||
.lockon_args = acme_trace_lockon_args,
|
||||
.decoding = acme_trace_decoding,
|
||||
.report_events = ~0, /* report everything by default */
|
||||
};
|
||||
|
||||
INITCALL1(STG_REGISTER, trace_register_source, &trace_acme);
|
||||
|
||||
static void acme_trace(enum trace_level level, uint64_t mask, const struct trace_source *src,
|
||||
const struct ist where, const struct ist func,
|
||||
const void *a1, const void *a2, const void *a3, const void *a4)
|
||||
{
|
||||
const struct acme_ctx *ctx = a1;
|
||||
|
||||
if (src->verbosity <= ACME_VERB_CLEAN)
|
||||
return;
|
||||
|
||||
chunk_appendf(&trace_buf, " :");
|
||||
|
||||
if (mask >= ACME_EV_NEW)
|
||||
chunk_appendf(&trace_buf, " acme_ctx=%p", ctx);
|
||||
|
||||
|
||||
if (mask == ACME_EV_NEW)
|
||||
chunk_appendf(&trace_buf, ", crt=%s", ctx->store->path);
|
||||
|
||||
if (mask >= ACME_EV_TASK) {
|
||||
|
||||
switch (ctx->http_state) {
|
||||
case ACME_HTTP_REQ:
|
||||
chunk_appendf(&trace_buf, ", http_st: ACME_HTTP_REQ");
|
||||
break;
|
||||
case ACME_HTTP_RES:
|
||||
chunk_appendf(&trace_buf, ", http_st: ACME_HTTP_RES");
|
||||
break;
|
||||
}
|
||||
chunk_appendf(&trace_buf, ", st: ");
|
||||
switch (ctx->state) {
|
||||
case ACME_RESOURCES: chunk_appendf(&trace_buf, "ACME_RESOURCES"); break;
|
||||
case ACME_NEWNONCE: chunk_appendf(&trace_buf, "ACME_NEWNONCE"); break;
|
||||
case ACME_CHKACCOUNT: chunk_appendf(&trace_buf, "ACME_CHKACCOUNT"); break;
|
||||
case ACME_NEWACCOUNT: chunk_appendf(&trace_buf, "ACME_NEWACCOUNT"); break;
|
||||
case ACME_NEWORDER: chunk_appendf(&trace_buf, "ACME_NEWORDER"); break;
|
||||
case ACME_AUTH: chunk_appendf(&trace_buf, "ACME_AUTH"); break;
|
||||
case ACME_CHALLENGE: chunk_appendf(&trace_buf, "ACME_CHALLENGE"); break;
|
||||
case ACME_CHKCHALLENGE: chunk_appendf(&trace_buf, "ACME_CHKCHALLENGE"); break;
|
||||
case ACME_FINALIZE: chunk_appendf(&trace_buf, "ACME_FINALIZE"); break;
|
||||
case ACME_CHKORDER: chunk_appendf(&trace_buf, "ACME_CHKORDER"); break;
|
||||
case ACME_CERTIFICATE: chunk_appendf(&trace_buf, "ACME_CERTIFICATE"); break;
|
||||
case ACME_END: chunk_appendf(&trace_buf, "ACME_END"); break;
|
||||
}
|
||||
}
|
||||
if (mask & (ACME_EV_REQ|ACME_EV_RES)) {
|
||||
const struct ist *url = a2;
|
||||
const struct buffer *buf = a3;
|
||||
|
||||
if (mask & ACME_EV_REQ)
|
||||
chunk_appendf(&trace_buf, " url: %.*s", (int)url->len, url->ptr);
|
||||
|
||||
if (src->verbosity >= ACME_VERB_COMPLETE && level >= TRACE_LEVEL_DATA) {
|
||||
chunk_appendf(&trace_buf, " Buffer Dump:\n");
|
||||
chunk_appendf(&trace_buf, "%.*s", (int)buf->data, buf->area);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct mt_list acme_tasks = MT_LIST_HEAD_INIT(acme_tasks);
|
||||
|
||||
@ -653,6 +755,7 @@ static void acme_ctx_destroy(struct acme_ctx *ctx)
|
||||
istfree(&auth->auth);
|
||||
istfree(&auth->chall);
|
||||
istfree(&auth->token);
|
||||
istfree(&auth->token);
|
||||
next = auth->next;
|
||||
free(auth);
|
||||
auth = next;
|
||||
@ -788,6 +891,43 @@ int acme_http_req(struct task *task, struct acme_ctx *ctx, struct ist url, enum
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
* compute a TXT record for DNS-01 challenge
|
||||
* base64url(sha256(token || '.' || base64url(Thumbprint(accountKey))))
|
||||
*
|
||||
* https://datatracker.ietf.org/doc/html/rfc8555/#section-8.4
|
||||
*
|
||||
*/
|
||||
unsigned int acme_txt_record(const struct ist thumbprint, const struct ist token, struct buffer *output)
|
||||
{
|
||||
unsigned char md[EVP_MAX_MD_SIZE];
|
||||
struct buffer *tmp = NULL;
|
||||
unsigned int size;
|
||||
int ret = 0;
|
||||
|
||||
|
||||
if ((tmp = alloc_trash_chunk()) == NULL)
|
||||
goto out;
|
||||
|
||||
chunk_istcat(tmp, token);
|
||||
chunk_appendf(tmp, ".");
|
||||
chunk_istcat(tmp, thumbprint);
|
||||
|
||||
if (EVP_Digest(tmp->area, tmp->data, md, &size, EVP_sha256(), NULL) == 0)
|
||||
goto out;
|
||||
|
||||
ret = a2base64url((const char *)md, size, output->area, output->size);
|
||||
if (ret < 0)
|
||||
ret = 0;
|
||||
output->data = ret;
|
||||
|
||||
out:
|
||||
free_trash_chunk(tmp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int acme_jws_payload(struct buffer *req, struct ist nonce, struct ist url, EVP_PKEY *pkey, struct ist kid, struct buffer *output, char **errmsg)
|
||||
{
|
||||
struct buffer *b64payload = NULL;
|
||||
@ -930,6 +1070,8 @@ int acme_res_certificate(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1001,6 +1143,8 @@ int acme_res_chkorder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1130,6 +1274,8 @@ int acme_res_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1174,9 +1320,13 @@ int acme_req_challenge(struct task *task, struct acme_ctx *ctx, struct acme_auth
|
||||
|
||||
chunk_printf(req_in, "{}");
|
||||
|
||||
TRACE_DATA("REQ challenge dec", ACME_EV_REQ, ctx, &auth->chall, req_in);
|
||||
|
||||
if (acme_jws_payload(req_in, ctx->nonce, auth->chall, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error;
|
||||
|
||||
TRACE_DATA("REQ challenge enc", ACME_EV_REQ, ctx, &auth->chall, req_out);
|
||||
|
||||
if (acme_http_req(task, ctx, auth->chall, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||
goto error;
|
||||
|
||||
@ -1211,6 +1361,8 @@ enum acme_ret acme_res_challenge(struct task *task, struct acme_ctx *ctx, struct
|
||||
|
||||
hdrs = hc->res.hdrs;
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
for (hdr = hdrs; isttest(hdr->v); hdr++) {
|
||||
if (isteqi(hdr->n, ist("Replay-Nonce"))) {
|
||||
istfree(&ctx->nonce);
|
||||
@ -1284,10 +1436,14 @@ int acme_post_as_get(struct task *task, struct acme_ctx *ctx, struct ist url, ch
|
||||
if ((req_out = alloc_trash_chunk()) == NULL)
|
||||
goto error_alloc;
|
||||
|
||||
TRACE_USER("POST-as-GET ", ACME_EV_REQ, ctx, &url);
|
||||
|
||||
/* empty payload */
|
||||
if (acme_jws_payload(req_in, ctx->nonce, url, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error_jws;
|
||||
|
||||
TRACE_DATA("POST-as-GET enc", ACME_EV_REQ, ctx, &url, req_out);
|
||||
|
||||
if (acme_http_req(task, ctx, url, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||
goto error_http;
|
||||
|
||||
@ -1342,6 +1498,7 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||
}
|
||||
|
||||
}
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
/* XXX: need a generic URN error parser */
|
||||
@ -1356,6 +1513,23 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* check and save the DNS entry */
|
||||
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.identifier.type", t1->area, t1->size);
|
||||
if (ret == -1) {
|
||||
memprintf(errmsg, "couldn't get a type \"dns\" from Authorization URL \"%s\"", auth->auth.ptr);
|
||||
goto error;
|
||||
}
|
||||
t1->data = ret;
|
||||
|
||||
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.identifier.value", t2->area, t2->size);
|
||||
if (ret == -1) {
|
||||
memprintf(errmsg, "couldn't get a type \"dns\" from Authorization URL \"%s\"", auth->auth.ptr);
|
||||
goto error;
|
||||
}
|
||||
t2->data = ret;
|
||||
|
||||
auth->dns = istdup(ist2(t2->area, t2->data));
|
||||
|
||||
/* get the multiple challenges and select the one from the configuration */
|
||||
for (i = 0; ; i++) {
|
||||
int ret;
|
||||
@ -1405,6 +1579,35 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* compute a response for the TXT entry */
|
||||
if (strcasecmp(ctx->cfg->challenge, "DNS-01") == 0) {
|
||||
struct sink *dpapi;
|
||||
struct ist line[7];
|
||||
|
||||
if (acme_txt_record(ist(ctx->cfg->account.thumbprint), auth->token, &trash) == 0) {
|
||||
memprintf(errmsg, "couldn't compute the DNS-01 challenge");
|
||||
goto error;
|
||||
}
|
||||
|
||||
send_log(NULL, LOG_NOTICE,"acme: %s: DNS-01 requires to set the \"_acme-challenge.%.*s\" TXT record to \"%.*s\" and use the \"acme challenge_ready\" command over the CLI\n",
|
||||
ctx->store->path, (int)auth->dns.len, auth->dns.ptr, (int)trash.data, trash.area);
|
||||
|
||||
/* dump to the "dpapi" sink */
|
||||
|
||||
line[0] = ist("acme deploy ");
|
||||
line[1] = ist(ctx->store->path);
|
||||
line[2] = ist(" thumbprint ");
|
||||
line[3] = ist(ctx->cfg->account.thumbprint);
|
||||
line[4] = ist("\n");
|
||||
line[5] = ist2( hc->res.buf.area, hc->res.buf.data); /* dump the HTTP response */
|
||||
line[6] = ist("\n\0");
|
||||
|
||||
dpapi = sink_find("dpapi");
|
||||
if (dpapi)
|
||||
sink_write(dpapi, LOG_HEADER_NONE, 0, line, 7);
|
||||
}
|
||||
|
||||
/* only useful for HTTP-01 */
|
||||
if (acme_add_challenge_map(ctx->cfg->map, auth->token.ptr, ctx->cfg->account.thumbprint, errmsg) != 0) {
|
||||
memprintf(errmsg, "couldn't add the token to the '%s' map: %s", ctx->cfg->map, *errmsg);
|
||||
goto error;
|
||||
@ -1455,10 +1658,13 @@ int acme_req_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
|
||||
chunk_appendf(req_in, " ] }");
|
||||
|
||||
TRACE_DATA("NewOrder Decode", ACME_EV_REQ, ctx, &ctx->resources.newOrder, req_in);
|
||||
|
||||
|
||||
if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newOrder, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error;
|
||||
|
||||
TRACE_DATA("NewOrder JWS ", ACME_EV_REQ, ctx, &ctx->resources.newOrder, req_out);
|
||||
if (acme_http_req(task, ctx, ctx->resources.newOrder, HTTP_METH_POST, hdrs, ist2(req_out->area, req_out->data)))
|
||||
goto error;
|
||||
|
||||
@ -1507,6 +1713,7 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
ctx->order = istdup(hdr->v);
|
||||
}
|
||||
}
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
@ -1550,6 +1757,11 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* if the challenge is not DNS-01, consider that the challenge
|
||||
* is ready because computed by HAProxy */
|
||||
if (strcasecmp(ctx->cfg->challenge, "DNS-01") != 0)
|
||||
auth->ready = 1;
|
||||
|
||||
auth->next = ctx->auths;
|
||||
ctx->auths = auth;
|
||||
ctx->next_auth = auth;
|
||||
@ -1610,6 +1822,8 @@ int acme_req_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
|
||||
else
|
||||
chunk_printf(req_in, "%s", accountreq);
|
||||
|
||||
TRACE_DATA("newAccount Decoded", ACME_EV_REQ, ctx, &ctx->resources.newAccount, req_in);
|
||||
|
||||
if (acme_jws_payload(req_in, ctx->nonce, ctx->resources.newAccount, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
|
||||
goto error;
|
||||
|
||||
@ -1659,6 +1873,8 @@ int acme_res_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
|
||||
}
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if (hc->res.status < 200 || hc->res.status >= 300) {
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.detail", t1->area, t1->size)) > -1)
|
||||
t1->data = ret;
|
||||
@ -1705,6 +1921,8 @@ int acme_nonce(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
goto error;
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
hdrs = hc->res.hdrs;
|
||||
|
||||
for (hdr = hdrs; isttest(hdr->v); hdr++) {
|
||||
@ -1743,6 +1961,8 @@ int acme_directory(struct task *task, struct acme_ctx *ctx, char **errmsg)
|
||||
goto error;
|
||||
}
|
||||
|
||||
TRACE_DATA(__FUNCTION__, ACME_EV_RES, ctx, NULL, &hc->res.buf);
|
||||
|
||||
if ((ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.newNonce", trash.area, trash.size)) <= 0) {
|
||||
memprintf(errmsg, "couldn't get newNonce URL from the directory URL");
|
||||
goto error;
|
||||
@ -1806,6 +2026,7 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
struct mt_list tmp = MT_LIST_LOCK_FULL(&ctx->el);
|
||||
|
||||
re:
|
||||
TRACE_USER("ACME Task Handle", ACME_EV_TASK, ctx, &st);
|
||||
|
||||
switch (st) {
|
||||
case ACME_RESOURCES:
|
||||
@ -1899,6 +2120,11 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
break;
|
||||
case ACME_CHALLENGE:
|
||||
if (http_st == ACME_HTTP_REQ) {
|
||||
|
||||
/* if the challenge is not ready, wait to be wakeup */
|
||||
if (!ctx->next_auth->ready)
|
||||
goto wait;
|
||||
|
||||
if (acme_req_challenge(task, ctx, ctx->next_auth, &errmsg) != 0)
|
||||
goto retry;
|
||||
}
|
||||
@ -1999,6 +2225,8 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
/* this is called when changing step in the state machine */
|
||||
http_st = ACME_HTTP_REQ;
|
||||
ctx->retries = ACME_RETRY; /* reinit the retries */
|
||||
ctx->http_state = http_st;
|
||||
ctx->state = st;
|
||||
|
||||
if (ctx->retryafter == 0)
|
||||
goto re; /* optimize by not leaving the task for the next httpreq to init */
|
||||
@ -2006,8 +2234,6 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
/* if we have a retryafter, wait before next request (usually finalize) */
|
||||
task->expire = tick_add(now_ms, ctx->retryafter * 1000);
|
||||
ctx->retryafter = 0;
|
||||
ctx->http_state = http_st;
|
||||
ctx->state = st;
|
||||
|
||||
MT_LIST_UNLOCK_FULL(&ctx->el, tmp);
|
||||
return task;
|
||||
@ -2055,8 +2281,16 @@ struct task *acme_process(struct task *task, void *context, unsigned int state)
|
||||
task = NULL;
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
wait:
|
||||
/* wait for a task_wakeup */
|
||||
ctx->http_state = ACME_HTTP_REQ;
|
||||
ctx->state = st;
|
||||
task->expire = TICK_ETERNITY;
|
||||
|
||||
MT_LIST_UNLOCK_FULL(&ctx->el, tmp);
|
||||
return task;
|
||||
}
|
||||
/*
|
||||
* Return 1 if the certificate must be regenerated
|
||||
* Check if the notAfter date will append in (validity period / 12) or 7 days per default
|
||||
@ -2133,6 +2367,7 @@ struct task *acme_scheduler(struct task *task, void *context, unsigned int state
|
||||
if (store->conf.acme.id) {
|
||||
|
||||
if (acme_will_expire(store)) {
|
||||
TRACE_USER("ACME Scheduling start", ACME_EV_SCHED);
|
||||
if (acme_start_task(store, &errmsg) != 0) {
|
||||
send_log(NULL, LOG_NOTICE,"acme: %s: %s Aborting.\n", store->path, errmsg ? errmsg : "");
|
||||
ha_free(&errmsg);
|
||||
@ -2321,12 +2556,14 @@ static int acme_start_task(struct ckch_store *store, char **errmsg)
|
||||
ctx->store = newstore;
|
||||
ctx->cfg = cfg;
|
||||
task->context = ctx;
|
||||
ctx->task = task;
|
||||
|
||||
MT_LIST_INIT(&ctx->el);
|
||||
MT_LIST_APPEND(&acme_tasks, &ctx->el);
|
||||
|
||||
send_log(NULL, LOG_NOTICE, "acme: %s: Starting update of the certificate.\n", ctx->store->path);
|
||||
|
||||
TRACE_USER("ACME Task start", ACME_EV_NEW, ctx);
|
||||
task_wakeup(task, TASK_WOKEN_INIT);
|
||||
|
||||
return 0;
|
||||
@ -2372,6 +2609,55 @@ static int cli_acme_renew_parse(char **args, char *payload, struct appctx *appct
|
||||
return cli_dynerr(appctx, errmsg);
|
||||
}
|
||||
|
||||
static int cli_acme_chall_ready_parse(char **args, char *payload, struct appctx *appctx, void *private)
|
||||
{
|
||||
char *errmsg = NULL;
|
||||
const char *crt;
|
||||
const char *dns;
|
||||
struct mt_list back;
|
||||
struct acme_ctx *ctx;
|
||||
struct acme_auth *auth;
|
||||
int found = 0;
|
||||
|
||||
if (!*args[2] && !*args[3] && !*args[4]) {
|
||||
memprintf(&errmsg, ": not enough parameters\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
crt = args[2];
|
||||
dns = args[4];
|
||||
|
||||
|
||||
MT_LIST_FOR_EACH_ENTRY_LOCKED(ctx, &acme_tasks, el, back) {
|
||||
|
||||
if (strcmp(ctx->store->path, crt) != 0)
|
||||
continue;
|
||||
|
||||
auth = ctx->auths;
|
||||
while (auth) {
|
||||
if (strncmp(dns, auth->dns.ptr, auth->dns.len) == 0) {
|
||||
if (!auth->ready) {
|
||||
auth->ready = 1;
|
||||
task_wakeup(ctx->task, TASK_WOKEN_MSG);
|
||||
found = 1;
|
||||
} else {
|
||||
memprintf(&errmsg, "ACME challenge for crt \"%s\" and dns \"%s\" was already READY !\n", crt, dns);
|
||||
}
|
||||
break;
|
||||
}
|
||||
auth = auth->next;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
memprintf(&errmsg, "Couldn't find the ACME task using crt \"%s\" and dns \"%s\" !\n", crt, dns);
|
||||
goto err;
|
||||
}
|
||||
|
||||
return cli_msg(appctx, LOG_INFO, "Challenge Ready!");
|
||||
err:
|
||||
return cli_dynerr(appctx, errmsg);
|
||||
}
|
||||
|
||||
static int cli_acme_status_io_handler(struct appctx *appctx)
|
||||
{
|
||||
struct ebmb_node *node = NULL;
|
||||
@ -2454,6 +2740,7 @@ static int cli_acme_ps(char **args, char *payload, struct appctx *appctx, void *
|
||||
static struct cli_kw_list cli_kws = {{ },{
|
||||
{ { "acme", "renew", NULL }, "acme renew <certfile> : renew a certificate using the ACME protocol", cli_acme_renew_parse, NULL, NULL, NULL, 0 },
|
||||
{ { "acme", "status", NULL }, "acme status : show status of certificates configured with ACME", cli_acme_ps, cli_acme_status_io_handler, NULL, NULL, 0 },
|
||||
{ { "acme", "challenge_ready", NULL }, "acme challenge_ready <certfile> domain <domain> : show status of certificates configured with ACME", cli_acme_chall_ready_parse, NULL, NULL, NULL, 0 },
|
||||
{ { NULL }, NULL, NULL, NULL }
|
||||
}};
|
||||
|
||||
|
@ -1425,7 +1425,7 @@ struct connection *conn_backend_get(int reuse_mode,
|
||||
if (reuse_mode == PR_O_REUSE_SAFE && conn->mux->flags & MX_FL_HOL_RISK) {
|
||||
/* attach the connection to the session private list */
|
||||
conn->owner = sess;
|
||||
session_add_conn(sess, conn, conn->target);
|
||||
session_add_conn(sess, conn);
|
||||
}
|
||||
else {
|
||||
srv_add_to_avail_list(srv, conn);
|
||||
@ -2159,7 +2159,7 @@ int connect_server(struct stream *s)
|
||||
(reuse_mode == PR_O_REUSE_SAFE &&
|
||||
srv_conn->mux->flags & MX_FL_HOL_RISK)) {
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(s->sess, srv_conn, srv_conn->target);
|
||||
session_add_conn(s->sess, srv_conn);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
42
src/clock.c
42
src/clock.c
@ -29,8 +29,10 @@
|
||||
struct timeval start_date; /* the process's start date in wall-clock time */
|
||||
struct timeval ready_date; /* date when the process was considered ready */
|
||||
ullong start_time_ns; /* the process's start date in internal monotonic time (ns) */
|
||||
volatile ullong global_now_ns; /* common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
volatile uint global_now_ms; /* common monotonic date in milliseconds (may wrap) */
|
||||
volatile ullong _global_now_ns; /* locally stored common monotonic date between all threads, in ns (wraps every 585 yr) */
|
||||
volatile ullong *global_now_ns; /* common monotonic date, may point to _global_now_ns or shared memory */
|
||||
volatile uint _global_now_ms; /* locally stored common monotonic date in milliseconds (may wrap) */
|
||||
volatile uint *global_now_ms; /* common monotonic date in milliseconds (may wrap), may point to _global_now_ms or shared memory */
|
||||
|
||||
/* when CLOCK_MONOTONIC is supported, the offset is applied from th_ctx->prev_mono_time instead */
|
||||
THREAD_ALIGNED(64) static llong now_offset; /* global offset between system time and global time in ns */
|
||||
@ -238,7 +240,7 @@ void clock_update_local_date(int max_wait, int interrupted)
|
||||
now_ns += ms_to_ns(max_wait);
|
||||
|
||||
/* consider the most recent known date */
|
||||
now_ns = MAX(now_ns, HA_ATOMIC_LOAD(&global_now_ns));
|
||||
now_ns = MAX(now_ns, HA_ATOMIC_LOAD(global_now_ns));
|
||||
|
||||
/* this event is rare, but it requires proper handling because if
|
||||
* we just left now_ns where it was, the date will not be updated
|
||||
@ -269,8 +271,8 @@ void clock_update_global_date()
|
||||
* realistic regarding the global date, which only moves forward,
|
||||
* otherwise catch up.
|
||||
*/
|
||||
old_now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
|
||||
old_now_ms = _HA_ATOMIC_LOAD(&global_now_ms);
|
||||
old_now_ns = _HA_ATOMIC_LOAD(global_now_ns);
|
||||
old_now_ms = _HA_ATOMIC_LOAD(global_now_ms);
|
||||
|
||||
do {
|
||||
if (now_ns < old_now_ns)
|
||||
@ -299,8 +301,8 @@ void clock_update_global_date()
|
||||
/* let's try to update the global_now_ns (both in nanoseconds
|
||||
* and ms forms) or loop again.
|
||||
*/
|
||||
} while ((!_HA_ATOMIC_CAS(&global_now_ns, &old_now_ns, now_ns) ||
|
||||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(&global_now_ms, &old_now_ms, now_ms))) &&
|
||||
} while ((!_HA_ATOMIC_CAS(global_now_ns, &old_now_ns, now_ns) ||
|
||||
(now_ms != old_now_ms && !_HA_ATOMIC_CAS(global_now_ms, &old_now_ms, now_ms))) &&
|
||||
__ha_cpu_relax());
|
||||
|
||||
if (!th_ctx->curr_mono_time) {
|
||||
@ -322,11 +324,12 @@ void clock_init_process_date(void)
|
||||
th_ctx->prev_mono_time = th_ctx->curr_mono_time = before_poll_mono_ns;
|
||||
gettimeofday(&date, NULL);
|
||||
after_poll = before_poll = date;
|
||||
global_now_ns = th_ctx->curr_mono_time;
|
||||
if (!global_now_ns) // CLOCK_MONOTONIC not supported
|
||||
global_now_ns = tv_to_ns(&date);
|
||||
now_ns = global_now_ns;
|
||||
global_now_ms = ns_to_ms(now_ns);
|
||||
_global_now_ns = th_ctx->curr_mono_time;
|
||||
if (!_global_now_ns) // CLOCK_MONOTONIC not supported
|
||||
_global_now_ns = tv_to_ns(&date);
|
||||
now_ns = _global_now_ns;
|
||||
|
||||
_global_now_ms = ns_to_ms(now_ns);
|
||||
|
||||
/* force time to wrap 20s after boot: we first compute the time offset
|
||||
* that once applied to the wall-clock date will make the local time
|
||||
@ -334,14 +337,19 @@ void clock_init_process_date(void)
|
||||
* and will be used to recompute the local time, both of which will
|
||||
* match and continue from this shifted date.
|
||||
*/
|
||||
now_offset = sec_to_ns((uint)((uint)(-global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
|
||||
global_now_ns += now_offset;
|
||||
now_ns = global_now_ns;
|
||||
now_offset = sec_to_ns((uint)((uint)(-_global_now_ms) / 1000U - BOOT_TIME_WRAP_SEC));
|
||||
_global_now_ns += now_offset;
|
||||
now_ns = _global_now_ns;
|
||||
now_ms = ns_to_ms(now_ns);
|
||||
/* correct for TICK_ETNERITY (0) */
|
||||
if (now_ms == TICK_ETERNITY)
|
||||
now_ms++;
|
||||
global_now_ms = now_ms;
|
||||
_global_now_ms = now_ms;
|
||||
|
||||
/* for now global_now_ms points to the process-local _global_now_ms */
|
||||
global_now_ms = &_global_now_ms;
|
||||
/* same goes for global_ns_ns */
|
||||
global_now_ns = &_global_now_ns;
|
||||
|
||||
th_ctx->idle_pct = 100;
|
||||
clock_update_date(0, 1);
|
||||
@ -364,7 +372,7 @@ void clock_init_thread_date(void)
|
||||
gettimeofday(&date, NULL);
|
||||
after_poll = before_poll = date;
|
||||
|
||||
now_ns = _HA_ATOMIC_LOAD(&global_now_ns);
|
||||
now_ns = _HA_ATOMIC_LOAD(global_now_ns);
|
||||
th_ctx->idle_pct = 100;
|
||||
th_ctx->prev_cpu_time = now_cpu_time();
|
||||
th_ctx->prev_mono_time = now_mono_time();
|
||||
|
@ -117,7 +117,7 @@ int conn_create_mux(struct connection *conn, int *closed_connection)
|
||||
}
|
||||
else if (conn->flags & CO_FL_PRIVATE) {
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(sess, conn, conn->target);
|
||||
session_add_conn(sess, conn);
|
||||
}
|
||||
return 0;
|
||||
fail:
|
||||
|
@ -52,12 +52,12 @@ void counters_be_shared_drop(struct be_counters_shared *counters)
|
||||
_counters_shared_drop(counters);
|
||||
}
|
||||
|
||||
/* retrieved shared counters pointer for a given <guid> object
|
||||
/* prepare shared counters pointer for a given <guid> object
|
||||
* <size> hint is expected to reflect the actual tg member size (fe/be)
|
||||
* if <guid> is not set, then sharing is disabled
|
||||
* Returns the pointer on success or NULL on failure
|
||||
*/
|
||||
static int _counters_shared_init(struct counters_shared *shared, const struct guid_node *guid, size_t size)
|
||||
static int _counters_shared_prepare(struct counters_shared *shared, const struct guid_node *guid, size_t size)
|
||||
{
|
||||
int it = 0;
|
||||
|
||||
@ -85,11 +85,11 @@ static int _counters_shared_init(struct counters_shared *shared, const struct gu
|
||||
/* prepare shared fe counters pointer for a given <guid> object */
|
||||
int counters_fe_shared_prepare(struct fe_counters_shared *shared, const struct guid_node *guid)
|
||||
{
|
||||
return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg));
|
||||
return _counters_shared_prepare((struct counters_shared *)shared, guid, sizeof(struct fe_counters_shared_tg));
|
||||
}
|
||||
|
||||
/* prepare shared be counters pointer for a given <guid> object */
|
||||
int counters_be_shared_init(struct be_counters_shared *shared, const struct guid_node *guid)
|
||||
int counters_be_shared_prepare(struct be_counters_shared *shared, const struct guid_node *guid)
|
||||
{
|
||||
return _counters_shared_init((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg));
|
||||
return _counters_shared_prepare((struct counters_shared *)shared, guid, sizeof(struct be_counters_shared_tg));
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc)
|
||||
*/
|
||||
for (;; __ha_cpu_relax()) {
|
||||
curr_tick = HA_ATOMIC_LOAD(&ctr->curr_tick);
|
||||
now_ms_tmp = HA_ATOMIC_LOAD(&global_now_ms);
|
||||
now_ms_tmp = HA_ATOMIC_LOAD(global_now_ms);
|
||||
|
||||
if (now_ms_tmp - curr_tick < period)
|
||||
return HA_ATOMIC_ADD_FETCH(&ctr->curr_ctr, inc);
|
||||
@ -81,7 +81,7 @@ ullong _freq_ctr_total_from_values(uint period, int pend,
|
||||
{
|
||||
int remain;
|
||||
|
||||
remain = tick + period - HA_ATOMIC_LOAD(&global_now_ms);
|
||||
remain = tick + period - HA_ATOMIC_LOAD(global_now_ms);
|
||||
if (unlikely(remain < 0)) {
|
||||
/* We're past the first period, check if we can still report a
|
||||
* part of last period or if we're too far away.
|
||||
@ -239,7 +239,7 @@ int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq
|
||||
return 0;
|
||||
}
|
||||
|
||||
elapsed = HA_ATOMIC_LOAD(&global_now_ms) - tick;
|
||||
elapsed = HA_ATOMIC_LOAD(global_now_ms) - tick;
|
||||
if (unlikely(elapsed < 0 || elapsed > period)) {
|
||||
/* The counter is in the future or the elapsed time is higher than the period, there is no overshoot */
|
||||
return 0;
|
||||
|
17
src/hlua.c
17
src/hlua.c
@ -13363,6 +13363,23 @@ static int hlua_load_per_thread(char **args, int section_type, struct proxy *cur
|
||||
return -1;
|
||||
}
|
||||
for (i = 1; *(args[i]) != 0; i++) {
|
||||
/* first arg is filename */
|
||||
if (i == 1 && args[1][0] != '/') {
|
||||
char *curpath;
|
||||
char *fullpath = NULL;
|
||||
|
||||
/* filename is provided using relative path, store the absolute path
|
||||
* to take current chdir into account for other threads file load
|
||||
* which occur later
|
||||
*/
|
||||
curpath = getcwd(trash.area, trash.size);
|
||||
if (!curpath) {
|
||||
memprintf(err, "failed to retrieve cur path");
|
||||
return -1;
|
||||
}
|
||||
per_thread_load[len][i - 1] = memprintf(&fullpath, "%s/%s", curpath, args[1]);
|
||||
}
|
||||
else
|
||||
per_thread_load[len][i - 1] = strdup(args[i]);
|
||||
if (per_thread_load[len][i - 1] == NULL) {
|
||||
memprintf(err, "out of memory error");
|
||||
|
@ -1913,6 +1913,21 @@ int hlua_listable_servers_pairs_iterator(lua_State *L)
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* ensure proper cleanup for listable_servers_pairs */
|
||||
int hlua_listable_servers_pairs_gc(lua_State *L)
|
||||
{
|
||||
struct hlua_server_list_iterator_context *ctx;
|
||||
|
||||
ctx = lua_touserdata(L, 1);
|
||||
|
||||
/* we need to make sure that the watcher leaves in detached state even
|
||||
* if the iterator was interrupted (ie: "break" from the loop), else
|
||||
* the server watcher list will become corrupted
|
||||
*/
|
||||
watcher_detach(&ctx->srv_watch);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* init the iterator context, return iterator function
|
||||
* with context as closure. The only argument is a
|
||||
* server list object.
|
||||
@ -1925,6 +1940,12 @@ int hlua_listable_servers_pairs(lua_State *L)
|
||||
hlua_srv_list = hlua_check_server_list(L, 1);
|
||||
|
||||
ctx = lua_newuserdata(L, sizeof(*ctx));
|
||||
|
||||
/* add gc metamethod to the newly created userdata */
|
||||
lua_newtable(L);
|
||||
hlua_class_function(L, "__gc", hlua_listable_servers_pairs_gc);
|
||||
lua_setmetatable(L, -2);
|
||||
|
||||
ctx->px = hlua_srv_list->px;
|
||||
ctx->next = NULL;
|
||||
watcher_init(&ctx->srv_watch, &ctx->next, offsetof(struct server, watcher_list));
|
||||
|
@ -1641,7 +1641,7 @@ int http_wait_for_response(struct stream *s, struct channel *rep, int an_bit)
|
||||
conn_set_owner(srv_conn, sess, NULL);
|
||||
conn_set_private(srv_conn);
|
||||
/* If it fail now, the same will be done in mux->detach() callback */
|
||||
session_add_conn(srv_conn->owner, srv_conn, srv_conn->target);
|
||||
session_add_conn(srv_conn->owner, srv_conn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3723,22 +3723,25 @@ static void fcgi_detach(struct sedesc *sd)
|
||||
(fconn->flags & FCGI_CF_KEEP_CONN)) {
|
||||
if (fconn->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session serverlist, if not already done */
|
||||
if (!session_add_conn(sess, fconn->conn, fconn->conn->target)) {
|
||||
if (!session_add_conn(sess, fconn->conn))
|
||||
fconn->conn->owner = NULL;
|
||||
|
||||
if (eb_is_empty(&fconn->streams_by_id)) {
|
||||
/* let's kill the connection right away */
|
||||
if (!fconn->conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
fconn->conn->mux->destroy(fconn);
|
||||
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (eb_is_empty(&fconn->streams_by_id)) {
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&fconn->wait_event.tasklet->state, TASK_F_USR1);
|
||||
if (session_check_idle_conn(fconn->conn->owner, fconn->conn) != 0) {
|
||||
/* The connection is destroyed, let's leave */
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, fconn->conn) != 0) {
|
||||
fconn->conn->mux->destroy(fconn);
|
||||
TRACE_DEVEL("outgoing connection killed", FCGI_EV_STRM_END|FCGI_EV_FCONN_ERR);
|
||||
return;
|
||||
}
|
||||
|
12
src/mux_h1.c
12
src/mux_h1.c
@ -1138,20 +1138,24 @@ static int h1s_finish_detach(struct h1s *h1s)
|
||||
|
||||
if (h1c->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session server list, if not already done */
|
||||
if (!session_add_conn(sess, h1c->conn, h1c->conn->target)) {
|
||||
if (!session_add_conn(sess, h1c->conn)) {
|
||||
/* HTTP/1.1 conn is always idle after detach, can be removed if session insert failed. */
|
||||
h1c->conn->owner = NULL;
|
||||
h1c->conn->mux->destroy(h1c);
|
||||
goto released;
|
||||
}
|
||||
/* Always idle at this step */
|
||||
|
||||
/* HTTP/1.1 conn is always idle after detach. */
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&h1c->wait_event.tasklet->state, TASK_F_USR1);
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, h1c->conn)) {
|
||||
/* The connection got destroyed, let's leave */
|
||||
TRACE_DEVEL("outgoing connection killed", H1_EV_STRM_END|H1_EV_H1C_END);
|
||||
TRACE_DEVEL("outgoing connection rejected", H1_EV_STRM_END|H1_EV_H1C_END, h1c->conn);
|
||||
h1c->conn->mux->destroy(h1c);
|
||||
goto released;
|
||||
}
|
||||
}
|
||||
|
14
src/mux_h2.c
14
src/mux_h2.c
@ -5533,21 +5533,25 @@ static void h2_detach(struct sedesc *sd)
|
||||
|
||||
if (h2c->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session server list, if not already done */
|
||||
if (!session_add_conn(sess, h2c->conn, h2c->conn->target)) {
|
||||
if (!session_add_conn(sess, h2c->conn))
|
||||
h2c->conn->owner = NULL;
|
||||
|
||||
if (eb_is_empty(&h2c->streams_by_id)) {
|
||||
if (!h2c->conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
h2c->conn->mux->destroy(h2c);
|
||||
TRACE_DEVEL("leaving on error after killing outgoing connection", H2_EV_STRM_END|H2_EV_H2C_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (eb_is_empty(&h2c->streams_by_id)) {
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&h2c->wait_event.tasklet->state, TASK_F_USR1);
|
||||
if (session_check_idle_conn(h2c->conn->owner, h2c->conn) != 0) {
|
||||
/* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, h2c->conn) != 0) {
|
||||
h2c->conn->mux->destroy(h2c);
|
||||
TRACE_DEVEL("leaving without reusable idle connection", H2_EV_STRM_END);
|
||||
return;
|
||||
}
|
||||
|
@ -1857,6 +1857,14 @@ int qcc_recv(struct qcc *qcc, uint64_t id, uint64_t len, uint64_t offset,
|
||||
offset = qcs->rx.offset;
|
||||
}
|
||||
|
||||
if (len && (qcc->flags & QC_CF_WAIT_HS)) {
|
||||
if (!(qcc->conn->flags & CO_FL_EARLY_DATA)) {
|
||||
/* Ensure 'Early-data: 1' will be set on the request. */
|
||||
TRACE_PROTO("received early data", QMUX_EV_QCC_RECV|QMUX_EV_QCS_RECV, qcc->conn, qcs);
|
||||
qcc->conn->flags |= CO_FL_EARLY_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
left = len;
|
||||
while (left) {
|
||||
struct qc_stream_rxbuf *buf;
|
||||
@ -3784,26 +3792,25 @@ static void qmux_strm_detach(struct sedesc *sd)
|
||||
if (conn->flags & CO_FL_PRIVATE) {
|
||||
TRACE_DEVEL("handle private connection reuse", QMUX_EV_STRM_END, conn);
|
||||
|
||||
/* Add connection into session. If an error occured,
|
||||
* conn will be closed if idle, or insert will be
|
||||
* retried on next detach.
|
||||
/* Ensure conn is attached into session. Most of the times
|
||||
* this is already done during connect so this is a no-op.
|
||||
*/
|
||||
if (!session_add_conn(sess, conn, conn->target)) {
|
||||
if (!session_add_conn(sess, conn)) {
|
||||
TRACE_ERROR("error during connection insert into session list", QMUX_EV_STRM_END, conn);
|
||||
conn->owner = NULL;
|
||||
if (!qcc->nb_sc) {
|
||||
qcc_shutdown(qcc);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* If conn is idle, check if session can keep it. Conn is freed if this is not the case.
|
||||
* TODO graceful shutdown should be preferable instead of plain mux->destroy().
|
||||
*/
|
||||
if (!qcc->nb_sc && session_check_idle_conn(sess, conn)) {
|
||||
TRACE_DEVEL("idle conn rejected by session", QMUX_EV_STRM_END);
|
||||
conn = NULL;
|
||||
goto end;
|
||||
if (!qcc->nb_sc) {
|
||||
if (!conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
goto release;
|
||||
}
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, conn)) {
|
||||
TRACE_DEVEL("idle conn rejected by session", QMUX_EV_STRM_END, conn);
|
||||
goto release;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
@ -3812,8 +3819,9 @@ static void qmux_strm_detach(struct sedesc *sd)
|
||||
if (!srv_add_to_idle_list(objt_server(conn->target), conn, 1)) {
|
||||
/* Idle conn insert failure, gracefully close the connection. */
|
||||
TRACE_DEVEL("idle connection cannot be kept on the server", QMUX_EV_STRM_END, conn);
|
||||
qcc_shutdown(qcc);
|
||||
goto release;
|
||||
}
|
||||
|
||||
goto end;
|
||||
}
|
||||
else if (!conn->hash_node->node.node.leaf_p &&
|
||||
|
@ -2977,21 +2977,25 @@ static void spop_detach(struct sedesc *sd)
|
||||
if (!(spop_conn->flags & (SPOP_CF_RCVD_SHUT|SPOP_CF_ERR_PENDING|SPOP_CF_ERROR))) {
|
||||
if (spop_conn->conn->flags & CO_FL_PRIVATE) {
|
||||
/* Add the connection in the session server list, if not already done */
|
||||
if (!session_add_conn(sess, spop_conn->conn, spop_conn->conn->target)) {
|
||||
if (!session_add_conn(sess, spop_conn->conn))
|
||||
spop_conn->conn->owner = NULL;
|
||||
|
||||
if (eb_is_empty(&spop_conn->streams_by_id)) {
|
||||
if (!spop_conn->conn->owner) {
|
||||
/* Session insertion above has failed and connection is idle, remove it. */
|
||||
spop_conn->conn->mux->destroy(spop_conn);
|
||||
TRACE_DEVEL("leaving on error after killing outgoing connection", SPOP_EV_STRM_END|SPOP_EV_SPOP_CONN_ERR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (eb_is_empty(&spop_conn->streams_by_id)) {
|
||||
|
||||
/* mark that the tasklet may lose its context to another thread and
|
||||
* that the handler needs to check it under the idle conns lock.
|
||||
*/
|
||||
HA_ATOMIC_OR(&spop_conn->wait_event.tasklet->state, TASK_F_USR1);
|
||||
if (session_check_idle_conn(spop_conn->conn->owner, spop_conn->conn) != 0) {
|
||||
/* At this point either the connection is destroyed, or it's been added to the server idle list, just stop */
|
||||
|
||||
/* Ensure session can keep a new idle connection. */
|
||||
if (session_check_idle_conn(sess, spop_conn->conn) != 0) {
|
||||
spop_conn->conn->mux->destroy(spop_conn);
|
||||
TRACE_DEVEL("leaving without reusable idle connection", SPOP_EV_STRM_END);
|
||||
return;
|
||||
}
|
||||
|
@ -1768,7 +1768,7 @@ static int proxy_postcheck(struct proxy *px)
|
||||
* be_counters may be used even if the proxy lacks the backend
|
||||
* capability
|
||||
*/
|
||||
if (!counters_be_shared_init(&px->be_counters.shared, &px->guid)) {
|
||||
if (!counters_be_shared_prepare(&px->be_counters.shared, &px->guid)) {
|
||||
ha_alert("out of memory while setting up shared counters for %s %s\n",
|
||||
proxy_type_str(px), px->id);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
|
@ -747,7 +747,7 @@ int pendconn_dequeue(struct stream *strm)
|
||||
|
||||
if (p->target) {
|
||||
/* a server picked this pendconn, it must skip LB */
|
||||
strm->target = &p->target->obj_type;
|
||||
stream_set_srv_target(strm, p->target);
|
||||
strm->flags |= SF_ASSIGNED;
|
||||
}
|
||||
|
||||
|
39
src/sample.c
39
src/sample.c
@ -1983,7 +1983,7 @@ int sample_conv_var2smp_str(const struct arg *arg, struct sample *smp)
|
||||
}
|
||||
}
|
||||
|
||||
static int sample_conv_be2dec_check(struct arg *args, struct sample_conv *conv,
|
||||
static int sample_conv_2dec_check(struct arg *args, struct sample_conv *conv,
|
||||
const char *file, int line, char **err)
|
||||
{
|
||||
if (args[1].data.sint <= 0 || args[1].data.sint > sizeof(unsigned long long)) {
|
||||
@ -1999,13 +1999,13 @@ static int sample_conv_be2dec_check(struct arg *args, struct sample_conv *conv,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Converts big-endian binary input sample to a string containing an unsigned
|
||||
/* Converts big-endian/little-endian binary input sample to a string containing an unsigned
|
||||
* integer number per <chunk_size> input bytes separated with <separator>.
|
||||
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
|
||||
* boundaries.
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1), big endian (0,1)
|
||||
*/
|
||||
static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
|
||||
static int sample_conv_2dec(const struct arg *args, struct sample *smp, void *private, int be)
|
||||
{
|
||||
struct buffer *trash = get_trash_chunk();
|
||||
const int last = args[2].data.sint ? smp->data.u.str.data - args[1].data.sint + 1 : smp->data.u.str.data;
|
||||
@ -2029,8 +2029,12 @@ static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *
|
||||
max_size -= args[0].data.str.data;
|
||||
|
||||
/* Add integer */
|
||||
for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++)
|
||||
for (number = 0, i = 0; i < args[1].data.sint && ptr < smp->data.u.str.data; i++) {
|
||||
if (be)
|
||||
number = (number << 8) + (unsigned char)smp->data.u.str.area[ptr++];
|
||||
else
|
||||
number |= (unsigned char)smp->data.u.str.area[ptr++] << (i*8);
|
||||
}
|
||||
|
||||
pos = ulltoa(number, trash->area + trash->data, trash->size - trash->data);
|
||||
if (pos)
|
||||
@ -2047,6 +2051,28 @@ static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Converts big-endian binary input sample to a string containing an unsigned
|
||||
* integer number per <chunk_size> input bytes separated with <separator>.
|
||||
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
|
||||
* boundaries.
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
|
||||
*/
|
||||
static int sample_conv_be2dec(const struct arg *args, struct sample *smp, void *private)
|
||||
{
|
||||
return sample_conv_2dec(args, smp, private, 1);
|
||||
}
|
||||
|
||||
/* Converts little-endian binary input sample to a string containing an unsigned
|
||||
* integer number per <chunk_size> input bytes separated with <separator>.
|
||||
* Optional <truncate> flag indicates if input is truncated at <chunk_size>
|
||||
* boundaries.
|
||||
* Arguments: separator (string), chunk_size (integer), truncate (0,1)
|
||||
*/
|
||||
static int sample_conv_le2dec(const struct arg *args, struct sample *smp, void *private)
|
||||
{
|
||||
return sample_conv_2dec(args, smp, private, 0);
|
||||
}
|
||||
|
||||
static int sample_conv_be2hex_check(struct arg *args, struct sample_conv *conv,
|
||||
const char *file, int line, char **err)
|
||||
{
|
||||
@ -5415,7 +5441,8 @@ static struct sample_conv_kw_list sample_conv_kws = {ILH, {
|
||||
{ "upper", sample_conv_str2upper, 0, NULL, SMP_T_STR, SMP_T_STR },
|
||||
{ "lower", sample_conv_str2lower, 0, NULL, SMP_T_STR, SMP_T_STR },
|
||||
{ "length", sample_conv_length, 0, NULL, SMP_T_STR, SMP_T_SINT },
|
||||
{ "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_be2dec_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "be2dec", sample_conv_be2dec, ARG3(1,STR,SINT,SINT), sample_conv_2dec_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "le2dec", sample_conv_le2dec, ARG3(1,STR,SINT,SINT), sample_conv_2dec_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "be2hex", sample_conv_be2hex, ARG3(1,STR,SINT,SINT), sample_conv_be2hex_check, SMP_T_BIN, SMP_T_STR },
|
||||
{ "hex", sample_conv_bin2hex, 0, NULL, SMP_T_BIN, SMP_T_STR },
|
||||
{ "hex2i", sample_conv_hex2int, 0, NULL, SMP_T_STR, SMP_T_SINT },
|
||||
|
@ -3450,7 +3450,7 @@ int srv_init(struct server *srv)
|
||||
if (err_code & ERR_CODE)
|
||||
goto out;
|
||||
|
||||
if (!counters_be_shared_init(&srv->counters.shared, &srv->guid)) {
|
||||
if (!counters_be_shared_prepare(&srv->counters.shared, &srv->guid)) {
|
||||
ha_alert("memory error while setting up shared counters for %s/%s server\n", srv->proxy->id, srv->id);
|
||||
err_code |= ERR_ALERT | ERR_FATAL;
|
||||
goto out;
|
||||
|
@ -491,11 +491,11 @@ int is_inet6_reachable(void)
|
||||
int fd;
|
||||
|
||||
if (tick_isset(last_check) &&
|
||||
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(&global_now_ms)))
|
||||
!tick_is_expired(tick_add(last_check, INET6_CONNECTIVITY_CACHE_TIME), HA_ATOMIC_LOAD(global_now_ms)))
|
||||
return HA_ATOMIC_LOAD(&sock_inet6_seems_reachable);
|
||||
|
||||
/* update the test date to ensure nobody else does it in parallel */
|
||||
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(&global_now_ms));
|
||||
HA_ATOMIC_STORE(&last_inet6_check, HA_ATOMIC_LOAD(global_now_ms));
|
||||
|
||||
fd = socket(AF_INET6, SOCK_DGRAM, 0);
|
||||
if (fd >= 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user