Compare commits

..

No commits in common. "master" and "v3.4-dev7" have entirely different histories.

122 changed files with 1595 additions and 6392 deletions

33
.github/matrix.py vendored
View File

@ -12,7 +12,6 @@ import functools
import json
import re
import sys
import urllib.error
import urllib.request
from os import environ
from packaging import version
@ -34,24 +33,13 @@ def get_all_github_tags(url):
headers = {}
if environ.get("GITHUB_TOKEN") is not None:
headers["Authorization"] = "token {}".format(environ.get("GITHUB_TOKEN"))
all_tags = []
page = 1
sep = "&" if "?" in url else "?"
while True:
paginated_url = "{}{}per_page=100&page={}".format(url, sep, page)
request = urllib.request.Request(paginated_url, headers=headers)
try:
response = urllib.request.urlopen(request)
except urllib.error.URLError:
return all_tags if all_tags else None
tags = json.loads(response.read().decode("utf-8"))
if not tags:
break
all_tags.extend([tag['name'] for tag in tags])
if len(tags) < 100:
break
page += 1
return all_tags if all_tags else None
request = urllib.request.Request(url, headers=headers)
try:
tags = urllib.request.urlopen(request)
except:
return None
tags = json.loads(tags.read().decode("utf-8"))
return [tag['name'] for tag in tags]
@functools.lru_cache(5)
def determine_latest_openssl(ssl):
@ -77,8 +65,6 @@ def determine_latest_aws_lc(ssl):
if not tags:
return "AWS_LC_VERSION=failed_to_detect"
valid_tags = list(filter(aws_lc_version_valid, tags))
if not valid_tags:
return "AWS_LC_VERSION=failed_to_detect"
latest_tag = max(valid_tags, key=aws_lc_version_string_to_num)
return "AWS_LC_VERSION={}".format(latest_tag[1:])
@ -90,12 +76,11 @@ def aws_lc_fips_version_valid(version_string):
@functools.lru_cache(5)
def determine_latest_aws_lc_fips(ssl):
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags")
# the AWS-LC-FIPS tags are at the end of the list, so let's get a lot
tags = get_all_github_tags("https://api.github.com/repos/aws/aws-lc/tags?per_page=200")
if not tags:
return "AWS_LC_FIPS_VERSION=failed_to_detect"
valid_tags = list(filter(aws_lc_fips_version_valid, tags))
if not valid_tags:
return "AWS_LC_FIPS_VERSION=failed_to_detect"
latest_tag = max(valid_tags, key=aws_lc_fips_version_string_to_num)
return "AWS_LC_FIPS_VERSION={}".format(latest_tag[12:])

161
CHANGELOG
View File

@ -1,167 +1,6 @@
ChangeLog :
===========
2026/04/03 : 3.4-dev8
- MINOR: log: split do_log() in do_log() + do_log_ctx()
- MINOR: log: provide a way to override logger->profile from process_send_log_ctx
- MINOR: log: support optional 'profile <log_profile_name>' argument to do-log action
- BUG/MINOR: sock: adjust accept() error messages for ENFILE and ENOMEM
- BUG/MINOR: qpack: fix 62-bit overflow and 1-byte OOB reads in decoding
- MEDIUM: sched: do not run a same task multiple times in series
- MINOR: sched: do not requeue a tasklet into the current queue
- MINOR: sched: do not punish self-waking tasklets anymore
- MEDIUM: sched: do not punish self-waking tasklets if TASK_WOKEN_ANY
- MEDIUM: sched: change scheduler budgets to lower TL_BULK
- MINOR: mux-h2: assign a limited frames processing budget
- BUILD: sched: fix leftover of debugging test in single-run changes
- BUG/MEDIUM: acme: fix multiple resource leaks in acme_x509_req()
- MINOR: http_htx: use enum for arbitrary values in conf_errors
- MINOR: http_htx: rename fields in struct conf_errors
- MINOR: http_htx: split check/init of http_errors
- MINOR/OPTIM: http_htx: lookup once http_errors section on check/init
- MEDIUM: proxy: remove http-errors limitation for dynamic backends
- BUG/MINOR: acme: leak of ext_san upon insertion error
- BUG/MINOR: acme: wrong error when checking for duplicate section
- BUG/MINOR: acme/cli: wrong argument check in 'acme renew'
- BUG/MINOR: http_htx: fix null deref in http-errors config check
- MINOR: buffers: Move small buffers management from quic to dynbuf part
- MINOR: dynbuf: Add helper functions to alloc large and small buffers
- MINOR: quic: Use b_alloc_small() to allocate a small buffer
- MINOR: config: Relax tests on the configured size of small buffers
- MINOR: config: Report the warning when invalid large buffer size is set
- MEDIUM: htx: Add htx_xfer function to replace htx_xfer_blks
- MINOR: htx: Add helper functions to xfer a message to smaller or larger one
- MINOR: http-ana: Use HTX API to move to a large buffer
- MEDIUM: chunk: Add support for small chunks
- MEDIUM: stream: Try to use a small buffer for HTTP request on queuing
- MEDIUM: stream: Try to use small buffer when TCP stream is queued
- MEDIUM: stconn: Use a small buffer if possible for L7 retries
- MEDIUM: tree-wide: Rely on htx_xfer() instead of htx_xfer_blks()
- Revert "BUG/MEDIUM: mux-h2: make sure to always report pending errors to the stream"
- MEDIUM: mux-h2: Stop dealing with HTX flags transfer in h2_rcv_buf()
- MEDIUM: tcpcheck: Use small buffer if possible for healthchecks
- MINOR: proxy: Review options flags used to configure healthchecks
- DOC: config: Fix alphabetical ordering of proxy options
- DOC: config: Fix alphabetical ordering of external-check directives
- MINOR: proxy: Add use-small-buffers option to set where to use small buffers
- DOC: config: Add missing 'status-code' param for 'http-check expect' directive
- DOC: config: Reorder params for 'tcp-check expect' directive
- BUG/MINOR: acme: NULL check on my_strndup()
- BUG/MINOR: acme: free() DER buffer on a2base64url error path
- BUG/MINOR: acme: replace atol with len-bounded __strl2uic() for retry-after
- BUG/MINOR: acme/cli: fix argument check and error in 'acme challenge_ready'
- BUILD: tools: potential null pointer dereference in dl_collect_libs_cb
- BUG/MINOR: ech: permission checks on the CLI
- BUG/MINOR: acme: permission checks on the CLI
- BUG/MEDIUM: check: Don't reuse the server xprt if we should not
- MINOR: checks: Store the protocol to be used in struct check
- MINOR: protocols: Add a new proto_is_quic() function
- MEDIUM: connections: Enforce mux protocol requirements
- MEDIUM: server: remove a useless memset() in srv_update_check_addr_port.
- BUG/MINOR: config: Warn only if warnif_cond_conflicts report a conflict
- BUG/MINOR: config: Properly test warnif_misplaced_* return values
- BUG/MINOR: http-ana: Only consider client abort for abortonclose
- BUG/MEDIUM: acme: skip doing challenge if it is already valid
- MINOR: connections: Enhance tune.idle-pool.shared
- BUG/MINOR: acme: fix task allocation leaked upon error
- BUG/MEDIUM: htx: Fix htx_xfer() to consume more data than expected
- CI: github: fix tag listing by implementing proper API pagination
- CLEANUP: fix typos and spelling in comments and documentation
- BUG/MINOR: quic: close conn on packet reception with incompatible frame
- CLEANUP: stconn: Remove usless sc_new_from_haterm() declaration
- BUG/MINOR: stconn: Always declare the SC created from healthchecks as a back SC
- MINOR: stconn: flag the stream endpoint descriptor when the app has started
- MINOR: mux-h2: report glitches on early RST_STREAM
- BUG/MINOR: net_helper: fix length controls on ip.fp tcp options parsing
- BUILD: net_helper: fix unterminated comment that broke the build
- MINOR: resolvers: basic TXT record implementation
- MINOR: acme: store the TXT record in auth->token
- MEDIUM: acme: add dns-01 DNS propagation pre-check
- MEDIUM: acme: new 'challenge-ready' option
- DOC: configuration: document challenge-ready and dns-delay options for ACME
- SCRIPTS: git-show-backports: list new commits and how to review them with -L
- BUG/MEDIUM: ssl/cli: tls-keys commands warn when accessed without admin level
- BUG/MEDIUM: ssl/ocsp: ocsp commands warn when accessed without admin level
- BUG/MEDIUM: map/cli: map/acl commands warn when accessed without admin level
- BUG/MEDIUM: ssl/cli: tls-keys commands are missing permission checks
- BUG/MEDIUM: ssl/ocsp: ocsp commands are missing permission checks
- BUG/MEDIUM: map/cli: CLI commands lack admin permission checks
- DOC: configuration: mention QUIC server support
- MEDIUM: Add set-headers-bin, add-headers-bin and del-headers-bin actions
- BUG/MEDIUM: mux-h1: Don't set MSG_MORE on bodyless responses forwarded to client
- BUG/MINOR: http_act: Properly handle decoding errors in *-headers-bin actions
- MEDIUM: stats: Hide the version by default and add stats-showversion
- MINOR: backends: Don't update last_sess if it did not change
- MINOR: servers: Don't update last_sess if it did not change
- MINOR: ssl/log: add keylog format variables and env vars
- DOC: configuration: update tune.ssl.keylog URL to IETF draft
- BUG/MINOR: http_act: Make set/add-headers-bin compatible with ACL conditions
- MINOR: action: Add a sample expression field in arguments used by HTTP actions
- MEDIUM: http_act: Rework *-headers-bin actions
- BUG/MINOR: tcpcheck: Remove unexpected flag on tcpcheck rules for httchck option
- MEDIUM: tcpcheck: Refactor how tcp-check rulesets are stored
- MINOR: tcpcheck: Deal with disable-on-404 and send-state in the tcp-check itself
- BUG/MINOR: tcpcheck: Don't enable http_needed when parsing HTTP samples
- MINOR: tcpcheck: Use tcpcheck flags to know a healthcheck uses SSL connections
- BUG/MINOR: tcpcheck: Use tcpcheck context for expressions parsing
- CLEANUP: tcpcheck: Don't needlessly expose proxy_parse_tcpcheck()
- MINOR: tcpcheck: Add a function to stringify the healthcheck type
- MEDIUM: tcpcheck: Split parsing functions to prepare healthcheck sections parsing
- MEDIUM: tcpcheck: Add parsing support for healthcheck sections
- MINOR: tcpcheck: Extract tcpheck ruleset post-config in a dedicated function
- MEDIUM: tcpcheck/server: Add healthcheck server keyword
- REGTESTS: tcpcheck: Add a script to check healthcheck section
- MINOR: acme: add 'dns-timeout' keyword for dns-01 challenge
- CLEANUP: net_helper: fix typo in comment
- MINOR: acme: set the default dns-delay to 30s
- MINOR: connection: add function to identify a QUIC connection
- MINOR: quic: refactor frame parsing
- MINOR: quic: refactor frame encoding
- BUG/MINOR: quic: fix documentation for transport params decoding
- MINOR: quic: split transport params decoding/check
- MINOR: quic: remove useless quic_tp_dec_err type
- MINOR: quic: define QMux transport parameters frame type
- MINOR: quic: implement QMux transport params frame parser/builder
- MINOR: mux-quic: move qcs stream member into tx inner struct
- MINOR: mux-quic: prepare Tx support for QMux
- MINOR: mux-quic: convert init/closure for QMux compatibility
- MINOR: mux-quic: protect qcc_io_process for QMux
- MINOR: mux-quic: prepare traces support for QMux
- MINOR: quic: abstract stream type in qf_stream frame
- MEDIUM: mux-quic: implement QMux receive
- MINOR: mux-quic: handle flow-control frame on qstream read
- MINOR: mux-quic: define Rx connection buffer for QMux
- MINOR: mux_quic: implement qstrm rx buffer realign
- MEDIUM: mux-quic: implement QMux send
- MINOR: mux-quic: implement qstream send callback
- MINOR: mux-quic: define Tx connection buffer for QMux
- MINOR: xprt_qstrm: define new xprt module for QMux protocol
- MINOR: xprt_qstrm: define callback for ALPN retrieval
- MINOR: xprt_qstrm: implement reception of transport parameters
- MINOR: xprt_qstrm: implement sending of transport parameters
- MEDIUM: ssl: load xprt_qstrm after handshake completion
- MINOR: mux-quic: use QMux transport parameters from qstrm xprt
- MAJOR: mux-quic: activate QMux for frontend side
- MAJOR: mux-quic: activate QMux on the backend side
- MINOR: acme: split the CLI wait from the resolve wait
- MEDIUM: acme: initialize the dns timer starting from the first DNS request
- DEBUG: connection/flags: add QSTRM flags for the decoder
- BUG/MINOR: mux_quic: fix uninit for QMux emission
- MINOR: acme: remove remaining CLI wait in ACME_RSLV_TRIGGER
- MEDIUM: acme: split the initial delay from the retry DNS delay
- BUG/MINOR: cfgcond: properly set the error pointer on evaluation error
- BUG/MINOR: cfgcond: always set the error string on openssl_version checks
- BUG/MINOR: cfgcond: always set the error string on awslc_api checks
- BUG/MINOR: cfgcond: fail cleanly on missing argument for "feature"
- MINOR: ssl: add the ssl_fc_crtname sample fetch
- MINOR: hasterm: Change hstream_add_data() to prepare zero-copy data forwarding
- MEDIUM: haterm: Add support for 0-copy data forwading and option to disable it
- MEDIUM: haterm: Prepare support for splicing by initializing a master pipe
- MEDIUM: haterm: Add support for splicing and option to disable it
- MINOR: haterm: Handle boolean request options as flags
- MINOR: haterm: Add an request option to disable splicing
- BUG/MINOR: ssl: fix memory leak in ssl_fc_crtname by using SSL_CTX ex_data index
2026/03/20 : 3.4-dev7
- BUG/MINOR: stconn: Increase SC bytes_out value in se_done_ff()
- BUG/MINOR: ssl-sample: Fix sample_conv_sha2() by checking EVP_Digest* failures

View File

@ -643,7 +643,7 @@ ifneq ($(USE_OPENSSL:0=),)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
src/acme_resolvers.o src/ssl_trace.o src/jwe.o
src/ssl_trace.o src/jwe.o
endif
ifneq ($(USE_ENGINE:0=),)
@ -670,7 +670,7 @@ OPTIONS_OBJS += src/mux_quic.o src/h3.o src/quic_rx.o src/quic_tx.o \
src/quic_cc_nocc.o src/quic_cc.o src/quic_pacing.o \
src/h3_stats.o src/quic_stats.o src/qpack-enc.o \
src/qpack-tbl.o src/quic_cc_drs.o src/quic_fctl.o \
src/quic_enc.o src/mux_quic_qstrm.o src/xprt_qstrm.o
src/quic_enc.o
endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT:0=),)

View File

@ -1,2 +1,2 @@
$Format:%ci$
2026/04/03
2026/03/20

View File

@ -1 +1 @@
3.4-dev8
3.4-dev7

View File

@ -149,7 +149,7 @@ usage() {
echo "Options:"
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${SOCKET})"
echo " -s, --socket <path> Use the stats socket at <path>"
echo " -p, --path <path> Specify a base path for relative files (default: ${BASEPATH})"
echo " -p, --path <path> Specifiy a base path for relative files (default: ${BASEPATH})"
echo " -n, --dry-run Read certificates on the socket but don't dump them"
echo " -d, --debug Debug mode, set -x"
echo " -v, --verbose Verbose mode"

View File

@ -86,7 +86,7 @@ maintenance model and what the user wants is passed, then the LLM is invited to
provide its opinion on the need for a backport and an explanation of the reason
for its choice. This often helps the user to find a quick summary about the
patch. All these outputs are then converted to a long HTML page with colors and
radio buttons, where patches are preselected based on this classification,
radio buttons, where patches are pre-selected based on this classification,
that the user can consult and adjust, read the commits if needed, and the
selected patches finally provide some copy-pastable commands in a text-area to
select commit IDs to work on, typically in a form that's suitable for a simple

File diff suppressed because it is too large Load Diff

View File

@ -134,7 +134,7 @@ HATerm:
- /?R=<enable> Enable sending random data if >0.
Note that those arguments may be cumulated on one line separated by a set of
delimiters among [&?,;/] :
delimitors among [&?,;/] :
- GET /?s=20k&c=1&t=700&K=30r HTTP/1.0
- GET /?r=500?s=0?c=0?t=1000 HTTP/1.0

View File

@ -539,22 +539,10 @@ message. These functions are used by HTX analyzers or by multiplexers.
with the first block not removed, or NULL if everything was removed, and
the amount of data drained.
- htx_xfer() transfers HTX blocks from an HTX message to another, stopping
when a specific amount of bytes, including meta-data, was copied. If the
tail block is a DATA block, it may be partially copied. All other block
are transferred at once. By default, copied blocks are removed from the
original HTX message and headers and trailers parts cannot be partially
copied. But flags can be set to change the default behavior:
- HTX_XFER_KEEP_SRC_BLKS: source blocks are not removed
- HTX_XFER_PARTIAL_HDRS_COPY: partial headers and trailers
part can be xferred
- HTX_XFER_HDRS_ONLY: Only the headers part is xferred
- htx_xfer_blks() [DEPRECATED] transfers HTX blocks from an HTX message to
another, stopping after the first block of a specified type is transferred
or when a specific amount of bytes, including meta-data, was moved. If the
tail block is a DATA block, it may be partially moved. All other block are
- htx_xfer_blks() transfers HTX blocks from an HTX message to another,
stopping after the first block of a specified type is transferred or when
a specific amount of bytes, including meta-data, was moved. If the tail
block is a DATA block, it may be partially moved. All other block are
transferred at once or kept. This function returns a mixed value, with the
last block moved, or NULL if nothing was moved, and the amount of data
transferred. When HEADERS or TRAILERS blocks must be transferred, this

View File

@ -114,7 +114,7 @@ SHUT RDY ACT
1 1 1 => shut pending
PB: we can land into final shut if one thread disables the FD while another
one that was waiting on it reports it as shut. Theoretically it should be
one that was waiting on it reports it as shut. Theorically it should be
implicitly ready though, since reported. But if no data is reported, it
will be reportedly shut only. And no event will be reported then. This
might still make sense since it's not active, thus we don't want events.

View File

@ -1731,7 +1731,7 @@ add backend <name> from <defproxy> [mode <mode>] [guid <guid>] [ EXPERIMENTAL ]
Only TCP or HTTP proxies can be created. All of the settings are inherited
from <defproxy> default proxy instance. By default, it is mandatory to
specify the backend mode via the argument of the same name, unless <defproxy>
already defines it explicitly. It is also possible to use an optional GUID
already defines it explicitely. It is also possible to use an optional GUID
argument if wanted.
Servers can be added via the command "add server". The backend is initialized
@ -1740,7 +1740,10 @@ add backend <name> from <defproxy> [mode <mode>] [guid <guid>] [ EXPERIMENTAL ]
All named default proxies can be used, given that they validate the same
inheritance rules applied during configuration parsing. There is some
exceptions though, for example when the mode is neither TCP nor HTTP.
exceptions though, for example when the mode is neither TCP nor HTTP. Another
exception is that it is not yet possible to use a default proxies which
reference custom HTTP errors, for example via the errorfiles or http-rules
keywords.
This command is restricted and can only be issued on sockets configured for
level "admin". Moreover, this feature is still considered in development so it
@ -2130,7 +2133,7 @@ del backend <name>
be attached to the backend instance.
There is additional restrictions which prevent backend removal. First, a
backend cannot be removed if it is explicitly referenced by config elements,
backend cannot be removed if it is explicitely referenced by config elements,
for example via a use_backend rule or in sample expressions. Some proxies
options are also incompatible with runtime deletion. Currently, this is the
case when deprecated dispatch or option transparent are used. Also, a backend
@ -2138,7 +2141,7 @@ del backend <name>
impossible for now to remove a backend if QUIC servers were present in it.
It can be useful to use "wait be-removable" prior to this command to check
for the aforementioned requisites. This also provides a method to wait for
for the aformentioned requisites. This also provides a methode to wait for
the final closure of the streams attached to the target backend.
This command is restricted and can only be issued on sockets configured for

View File

@ -1,69 +0,0 @@
# Example: log HTTP traffic and TLS session keys to separate destinations
#
# "option httpslog" sends HTTP access logs to the /dev/log syslog server.
# TLS session keys are written to 2 ring buffers.
#
# Requirements:
# - HAProxy built with OpenSSL support
# - "tune.ssl.keylog on" in the global section
#
# Retrieve TLS session keys from the ring buffer via the CLI:
# For frontend connections:
#
# (echo "show events keylog-fc -w"; read) | socat /tmp/worker.socket -
#
# For backend connections:
#
# (echo "show events keylog-bc -w"; read) | socat /tmp/worker.socket -
#
# The result is in SSLKEYLOGFILE format and can be saved to a file and loaded
# into Wireshark to decrypt captured TLS traffic.
global
stats socket /tmp/worker.socket mode 0660
tune.ssl.keylog on
# Ring buffer for TLS session keys.
# "format raw" stores only the log message text, without any syslog envelope,
# producing output in the SSLKEYLOGFILE format directly.
ring keylog-fc
description "TLS session key frontend log"
format raw
maxlen 2000
size 1M
ring keylog-bc
description "TLS session key backend log"
format raw
maxlen 2000
size 1M
defaults
mode http
timeout client 30s
timeout server 30s
timeout connect 5s
log-profile keylog-fc
on any format "${HAPROXY_KEYLOG_FC_LOG_FMT}"
log-profile keylog-bc
on any format "${HAPROXY_KEYLOG_BC_LOG_FMT}"
frontend https-in
bind :443 ssl crt "common.pem"
option httpslog
# HTTPs access logs sent to the syslog server
log /dev/log format raw local0
# TLS session keys written to the ring buffer
log ring@keylog-fc profile keylog-fc local1
log ring@keylog-bc profile keylog-bc local1
default_backend be1
backend be1
server s1 10.0.0.123:443 ssl verify none

View File

@ -2,29 +2,17 @@
#ifndef _ACME_T_H_
#define _ACME_T_H_
#include <haproxy/acme_resolvers-t.h>
#include <haproxy/istbuf.h>
#include <haproxy/openssl-compat.h>
#if defined(HAVE_ACME)
#define ACME_RETRY 5
/* Readiness requirements for challenge */
#define ACME_RDY_NONE 0x00
#define ACME_RDY_CLI 0x01
#define ACME_RDY_DNS 0x02
#define ACME_RDY_DELAY 0x04
/* acme section configuration */
struct acme_cfg {
char *filename; /* config filename */
int linenum; /* config linenum */
char *name; /* section name */
int reuse_key; /* do we need to renew the private key */
int cond_ready; /* ready condition */
unsigned int dns_delay; /* delay in seconds before re-triggering DNS resolution (default: 300) */
unsigned int dns_timeout; /* time after which the DNS check shouldn't be retried (default: 600) */
char *directory; /* directory URL */
char *map; /* storage for tokens + thumbprint */
struct {
@ -52,11 +40,6 @@ enum acme_st {
ACME_NEWACCOUNT,
ACME_NEWORDER,
ACME_AUTH,
ACME_CLI_WAIT, /* wait for the ACME_RDY_CLI */
ACME_INITIAL_DELAY,
ACME_RSLV_RETRY_DELAY,
ACME_RSLV_TRIGGER,
ACME_RSLV_READY,
ACME_CHALLENGE,
ACME_CHKCHALLENGE,
ACME_FINALIZE,
@ -75,8 +58,6 @@ struct acme_auth {
struct ist auth; /* auth URI */
struct ist chall; /* challenge URI */
struct ist token; /* token */
int validated; /* already validated */
struct acme_rslv *rslv; /* acme dns-01 resolver */
int ready; /* is the challenge ready ? */
void *next;
};
@ -103,8 +84,6 @@ struct acme_ctx {
X509_REQ *req;
struct ist finalize;
struct ist certificate;
unsigned int dnstasks; /* number of DNS tasks running for this ctx */
unsigned int dnsstarttime; /* time at which we started the DNS checks */
struct task *task;
struct ebmb_node node;
char name[VAR_ARRAY];
@ -122,6 +101,4 @@ struct acme_ctx {
#define ACME_VERB_ADVANCED 4
#define ACME_VERB_COMPLETE 5
#endif /* ! HAVE_ACME */
#endif

View File

@ -1,27 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _HAPROXY_ACME_RESOLVERS_T_H
#define _HAPROXY_ACME_RESOLVERS_T_H
#include <haproxy/obj_type-t.h>
#include <haproxy/resolvers-t.h>
struct dns_counters;
/* TXT records for dns-01 */
struct acme_rslv {
enum obj_type obj_type; /* OBJ_TYPE_ACME_RSLV */
unsigned int *dnstasks; /* number of running DNS resolution for the same acme_task */
char *hostname_dn;
int hostname_dn_len;
struct resolvers *resolvers;
struct resolv_requester *requester;
int result; /* RSLV_STATUS_* — NONE until done */
int error_code; /* RSLV_RESP_* from the error callback */
struct task *acme_task; /* ACME task to wake on completion, or NULL */
struct ist txt; /* first TXT record found */
int (*success_cb)(struct resolv_requester *, struct dns_counters *);
int (*error_cb)(struct resolv_requester *, int);
};
#endif /* _HAPROXY_ACME_RESOLVERS_T_H */

View File

@ -1,18 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
#ifndef _HAPROXY_ACME_RESOLVERS_H
#define _HAPROXY_ACME_RESOLVERS_H
#include <haproxy/openssl-compat.h>
#if defined(HAVE_ACME)
#include <haproxy/acme_resolvers-t.h>
#include <haproxy/acme-t.h>
#include <haproxy/resolvers-t.h>
struct acme_rslv *acme_rslv_start(struct acme_auth *auth, unsigned int *dnstasks, char **errmsg);
void acme_rslv_free(struct acme_rslv *rslv);
#endif
#endif /* _HAPROXY_ACME_RESOLVERS_H */

View File

@ -151,7 +151,6 @@ struct act_rule {
struct ist str; /* string param (reason, header name, ...) */
struct lf_expr fmt; /* log-format compatible expression */
struct my_regex *re; /* used by replace-header/value/uri/path */
struct sample_expr *expr; /* sample expression used by HTTP action */
} http; /* args used by some HTTP rules */
struct http_reply *http_reply; /* HTTP response to be used by return/deny/tarpit rules */
struct redirect_rule *redir; /* redirect rule or "http-request redirect" */
@ -199,11 +198,6 @@ struct act_rule {
struct server *srv; /* target server to attach the connection */
struct sample_expr *name; /* used to differentiate idle connections */
} attach_srv; /* 'attach-srv' rule */
struct {
enum log_orig_id orig;
char *profile_name;
struct log_profile *profile;
} do_log; /* 'do-log' action */
struct {
int value;
struct sample_expr *expr;

View File

@ -99,11 +99,8 @@ static inline int be_is_eligible(const struct proxy *be)
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
uint now_sec = ns_to_sec(now_ns);
if (be->be_counters.shared.tg)
if (HA_ATOMIC_LOAD(&be->be_counters.shared.tg[tgid - 1]->last_sess) != now_sec)
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, now_sec);
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* This function returns non-zero if the designated server will be

View File

@ -59,7 +59,6 @@ enum chk_result {
#define CHK_ST_FASTINTER 0x0400 /* force fastinter check */
#define CHK_ST_READY 0x0800 /* check ready to migrate or run, see below */
#define CHK_ST_SLEEPING 0x1000 /* check was sleeping, i.e. not currently bound to a thread, see below */
#define CHK_ST_USE_SMALL_BUFF 0x2000 /* Use small buffers if possible for the request */
/* 4 possible states for CHK_ST_SLEEPING and CHK_ST_READY:
* SLP RDY State Description
@ -155,7 +154,7 @@ enum {
};
struct tcpcheck_rule;
struct tcpcheck;
struct tcpcheck_rules;
struct check {
enum obj_type obj_type; /* object type == OBJ_TYPE_CHECK */
@ -174,7 +173,7 @@ struct check {
signed char use_ssl; /* use SSL for health checks (1: on, 0: server mode, -1: off) */
int send_proxy; /* send a PROXY protocol header with checks */
int reuse_pool; /* try to reuse idle connections */
struct tcpcheck *tcpcheck; /* tcp-check to use to perform a health-check */
struct tcpcheck_rules *tcpcheck_rules; /* tcp-check send / expect rules */
struct tcpcheck_rule *current_step; /* current step when using tcpcheck */
int inter, fastinter, downinter; /* checks: time in milliseconds */
enum chk_result result; /* health-check result : CHK_RES_* */
@ -189,7 +188,6 @@ struct check {
char **envp; /* the environment to use if running a process-based check */
struct pid_list *curpid; /* entry in pid_list used for current process-based test, or -1 if not in test */
struct sockaddr_storage addr; /* the address to check */
struct protocol *proto; /* protocol used for check, may be different from the server's one */
char *pool_conn_name; /* conn name used on reuse */
char *sni; /* Server name */
char *alpn_str; /* ALPN to use for checks */

View File

@ -78,7 +78,7 @@ struct task *process_chk(struct task *t, void *context, unsigned int state);
struct task *srv_chk_io_cb(struct task *t, void *ctx, unsigned int state);
int check_buf_available(void *target);
struct buffer *check_get_buf(struct check *check, struct buffer *bptr, unsigned int small_buffer);
struct buffer *check_get_buf(struct check *check, struct buffer *bptr);
void check_release_buf(struct check *check, struct buffer *bptr);
const char *init_check(struct check *check, int type);
void free_check(struct check *check);

View File

@ -33,7 +33,6 @@
extern struct pool_head *pool_head_trash;
extern struct pool_head *pool_head_large_trash;
extern struct pool_head *pool_head_small_trash;
/* function prototypes */
@ -49,7 +48,6 @@ int chunk_strcmp(const struct buffer *chk, const char *str);
int chunk_strcasecmp(const struct buffer *chk, const char *str);
struct buffer *get_trash_chunk(void);
struct buffer *get_large_trash_chunk(void);
struct buffer *get_small_trash_chunk(void);
struct buffer *get_trash_chunk_sz(size_t size);
struct buffer *get_larger_trash_chunk(struct buffer *chunk);
int init_trash_buffers(int first);
@ -135,29 +133,6 @@ static forceinline struct buffer *alloc_large_trash_chunk(void)
return chunk;
}
/*
* Allocate a small trash chunk from the reentrant pool. The buffer starts at
* the end of the chunk. This chunk must be freed using free_trash_chunk(). This
* call may fail and the caller is responsible for checking that the returned
* pointer is not NULL.
*/
static forceinline struct buffer *alloc_small_trash_chunk(void)
{
struct buffer *chunk;
if (!pool_head_small_trash)
return NULL;
chunk = pool_alloc(pool_head_small_trash);
if (chunk) {
char *buf = (char *)chunk + sizeof(struct buffer);
*buf = 0;
chunk_init(chunk, buf,
pool_head_small_trash->size - sizeof(struct buffer));
}
return chunk;
}
/*
* Allocate a trash chunk accordingly to the requested size. This chunk must be
* freed using free_trash_chunk(). This call may fail and the caller is
@ -165,9 +140,7 @@ static forceinline struct buffer *alloc_small_trash_chunk(void)
*/
static forceinline struct buffer *alloc_trash_chunk_sz(size_t size)
{
if (pool_head_small_trash && size <= pool_head_small_trash->size)
return alloc_small_trash_chunk();
else if (size <= pool_head_trash->size)
if (likely(size <= pool_head_trash->size))
return alloc_trash_chunk();
else if (pool_head_large_trash && size <= pool_head_large_trash->size)
return alloc_large_trash_chunk();
@ -180,12 +153,10 @@ static forceinline struct buffer *alloc_trash_chunk_sz(size_t size)
*/
static forceinline void free_trash_chunk(struct buffer *chunk)
{
if (pool_head_small_trash && chunk && chunk->size == pool_head_small_trash->size - sizeof(struct buffer))
pool_free(pool_head_small_trash, chunk);
else if (pool_head_large_trash && chunk && chunk->size == pool_head_large_trash->size - sizeof(struct buffer))
pool_free(pool_head_large_trash, chunk);
else
if (likely(chunk && chunk->size == pool_head_trash->size - sizeof(struct buffer)))
pool_free(pool_head_trash, chunk);
else
pool_free(pool_head_large_trash, chunk);
}
/* copies chunk <src> into <chk>. Returns 0 in case of failure. */

View File

@ -130,8 +130,7 @@ enum {
CO_FL_OPT_TOS = 0x00000020, /* connection has a special sockopt tos */
CO_FL_QSTRM_SEND = 0x00000040, /* connection uses QMux protocol, needs to exchange transport parameters before starting mux layer */
CO_FL_QSTRM_RECV = 0x00000080, /* connection uses QMux protocol, needs to exchange transport parameters before starting mux layer */
/* unused : 0x00000040, 0x00000080 */
/* These flags indicate whether the Control and Transport layers are initialized */
CO_FL_CTRL_READY = 0x00000100, /* FD was registered, fd_delete() needed */
@ -213,14 +212,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
/* flags */
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
_(CO_FL_QSTRM_SEND, _(CO_FL_QSTRM_RECV,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
_(CO_FL_XPRT_TRACKED
)))))))))))))))))))))))))))))));
)))))))))))))))))))))))))))));
/* epilogue */
_(~0U);
return buf;
@ -347,7 +345,6 @@ enum {
XPRT_SSL = 1,
XPRT_HANDSHAKE = 2,
XPRT_QUIC = 3,
XPRT_QSTRM = 4,
XPRT_ENTRIES /* must be last one */
};
@ -359,7 +356,6 @@ enum {
MX_FL_NO_UPG = 0x00000004, /* set if mux does not support any upgrade */
MX_FL_FRAMED = 0x00000008, /* mux working on top of a framed transport layer (QUIC) */
MX_FL_REVERSABLE = 0x00000010, /* mux supports connection reversal */
MX_FL_EXPERIMENTAL = 0x00000020, /* requires experimental support directives */
};
/* PROTO token registration */

View File

@ -34,7 +34,6 @@
#include <haproxy/listener-t.h>
#include <haproxy/obj_type.h>
#include <haproxy/pool-t.h>
#include <haproxy/protocol.h>
#include <haproxy/server.h>
#include <haproxy/session-t.h>
#include <haproxy/task-t.h>
@ -610,13 +609,13 @@ void list_mux_proto(FILE *out);
*/
static inline const struct mux_proto_list *conn_get_best_mux_entry(
const struct ist mux_proto,
int proto_side, int proto_is_quic, int proto_mode)
int proto_side, int proto_mode)
{
struct mux_proto_list *item;
struct mux_proto_list *fallback = NULL;
list_for_each_entry(item, &mux_proto_list.list, list) {
if (!(item->side & proto_side) || !(item->mode & proto_mode) || (proto_is_quic && !(item->mux->flags & MX_FL_FRAMED)))
if (!(item->side & proto_side) || !(item->mode & proto_mode))
continue;
if (istlen(mux_proto) && isteq(mux_proto, item->token))
return item;
@ -641,7 +640,7 @@ static inline const struct mux_ops *conn_get_best_mux(struct connection *conn,
{
const struct mux_proto_list *item;
item = conn_get_best_mux_entry(mux_proto, proto_side, proto_is_quic(conn->ctrl), proto_mode);
item = conn_get_best_mux_entry(mux_proto, proto_side, proto_mode);
return item ? item->mux : NULL;
}
@ -691,12 +690,6 @@ static inline int conn_is_ssl(struct connection *conn)
return !!conn_get_ssl_sock_ctx(conn);
}
/* Returns true if connection runs over QUIC. */
static inline int conn_is_quic(const struct connection *conn)
{
return conn->flags & CO_FL_FDLESS;
}
/* Returns true if connection must be reversed. */
static inline int conn_is_reverse(const struct connection *conn)
{

View File

@ -37,7 +37,6 @@
extern struct pool_head *pool_head_buffer;
extern struct pool_head *pool_head_large_buffer;
extern struct pool_head *pool_head_small_buffer;
int init_buffer(void);
void buffer_dump(FILE *o, struct buffer *b, int from, int to);
@ -67,12 +66,6 @@ static inline int b_is_large_sz(size_t sz)
return (pool_head_large_buffer && sz == pool_head_large_buffer->size);
}
/* Return 1 if <sz> is the size of a small buffer */
static inline int b_is_small_sz(size_t sz)
{
return (pool_head_small_buffer && sz == pool_head_small_buffer->size);
}
/* Return 1 if <bug> is a default buffer */
static inline int b_is_default(struct buffer *buf)
{
@ -85,12 +78,6 @@ static inline int b_is_large(struct buffer *buf)
return b_is_large_sz(b_size(buf));
}
/* Return 1 if <buf> is a small buffer */
static inline int b_is_small(struct buffer *buf)
{
return b_is_small_sz(b_size(buf));
}
/**************************************************/
/* Functions below are used for buffer allocation */
/**************************************************/
@ -185,8 +172,6 @@ static inline char *__b_get_emergency_buf(void)
* than the default buffers */ \
if (unlikely(b_is_large_sz(sz))) \
pool_free(pool_head_large_buffer, area); \
else if (unlikely(b_is_small_sz(sz))) \
pool_free(pool_head_small_buffer, area); \
else if (th_ctx->emergency_bufs_left < global.tune.reserved_bufs) \
th_ctx->emergency_bufs[th_ctx->emergency_bufs_left++] = area; \
else \
@ -200,35 +185,6 @@ static inline char *__b_get_emergency_buf(void)
__b_free((_buf)); \
} while (0)
static inline struct buffer *b_alloc_small(struct buffer *buf)
{
char *area = NULL;
if (!buf->size) {
area = pool_alloc(pool_head_small_buffer);
if (!area)
return NULL;
buf->area = area;
buf->size = global.tune.bufsize_small;
}
return buf;
}
static inline struct buffer *b_alloc_large(struct buffer *buf)
{
char *area = NULL;
if (!buf->size) {
area = pool_alloc(pool_head_large_buffer);
if (!area)
return NULL;
buf->area = area;
buf->size = global.tune.bufsize_large;
}
return buf;
}
/* Offer one or multiple buffer currently belonging to target <from> to whoever
* needs one. Any pointer is valid for <from>, including NULL. Its purpose is
* to avoid passing a buffer to oneself in case of failed allocations (e.g.

View File

@ -207,7 +207,6 @@ struct flt_ops {
* accessible from a filter when instantiated in a stream
*/
struct flt_conf {
const char *name; /* The filter name (same name used to select the filter from config) */
const char *id; /* The filter id */
struct flt_ops *ops; /* The filter callbacks */
void *conf; /* The filter configuration */
@ -215,12 +214,6 @@ struct flt_conf {
unsigned int flags; /* FLT_CFG_FL_* */
};
struct filter_sequence_elt {
char *flt_name; /* filter name (set during parsing) */
struct flt_conf *flt_conf; /* associated filter conf (set after parsing) */
struct list list; /* list element */
};
/*
* Structure reprensenting a filter instance attached to a stream
*

View File

@ -5,11 +5,6 @@
#include <haproxy/http-t.h>
#include <haproxy/obj_type-t.h>
/* Size in bytes of the prebuilts response buffers */
#define RESPSIZE 16384
/* Number of bytes by body response line */
#define HS_COMMON_RESPONSE_LINE_SZ 50
/* hastream stream */
struct hstream {
enum obj_type obj_type;
@ -26,11 +21,15 @@ struct hstream {
int flags;
int ka; /* .0: keep-alive .1: forced .2: http/1.1, .3: was_reused */
int req_cache;
unsigned long long req_size; /* values passed in the URI to override the server's */
unsigned long long req_body; /* remaining body to be consumed from the request */
int req_code;
int res_wait; /* time to wait before replying in ms */
int res_time;
int req_chunked;
int req_random;
int req_after_res; /* Drain the request body after having sent the response */
enum http_meth_t req_meth;
};

View File

@ -5,6 +5,7 @@
#include <haproxy/hstream-t.h>
struct task *sc_hstream_io_cb(struct task *t, void *ctx, unsigned int state);
void hstream_shutdown(struct stconn *sc);
void *hstream_new(struct session *sess, struct stconn *sc, struct buffer *input);
#endif /* _HAPROXY_HSTREAM_H */

View File

@ -290,36 +290,6 @@ static inline int http_status_matches(const long *array, uint status)
return ha_bit_test(status - 100, array);
}
/* This function returns 1 if the header is one of the immutable headers.
* Forbidden headers are the ones that must not be rewritten. Function returns
* 0 if a header can be rewritten
*/
static inline int is_immutable_header(struct ist hdr)
{
switch (hdr.len) {
case 6:
return isteqi(hdr, ist("expect"));
case 7:
return isteqi(hdr, ist("trailer")) ||
isteqi(hdr, ist("upgrade"));
case 10:
return isteqi(hdr, ist("connection")) ||
isteqi(hdr, ist("keep-alive"));
case 14:
return isteqi(hdr, ist("content-length"));
case 16:
return isteqi(hdr, ist("proxy-connection"));
case 17:
return isteqi(hdr, ist("transfer-encoding"));
case 18:
return isteqi(hdr, ist("proxy-authenticate"));
case 19:
return isteqi(hdr, ist("proxy-authorization"));
default:
return 0;
}
}
#endif /* _HAPROXY_HTTP_H */
/*

View File

@ -93,22 +93,4 @@ struct http_errors {
struct list list; /* http-errors list */
};
/* Indicates the keyword origin of an http-error definition. This is used in
* <conf_errors> type to indicate which part of the internal union should be
* manipulated.
*/
enum http_err_directive {
HTTP_ERR_DIRECTIVE_SECTION = 0, /* "errorfiles" keyword referencing a http-errors section */
HTTP_ERR_DIRECTIVE_INLINE, /* "errorfile" keyword with inline error definition */
};
/* Used with "errorfiles" directives. It indicates for each known HTTP error
* status codes if they are defined in the target http-errors section.
*/
enum http_err_import {
HTTP_ERR_IMPORT_NO = 0,
HTTP_ERR_IMPORT_IMPLICIT, /* import every errcode defined in a section */
HTTP_ERR_IMPORT_EXPLICIT, /* import a specific errcode from a section */
};
#endif /* _HAPROXY_HTTP_HTX_T_H */

View File

@ -78,7 +78,6 @@ struct buffer *http_load_errorfile(const char *file, char **errmsg);
struct buffer *http_load_errormsg(const char *key, const struct ist msg, char **errmsg);
struct buffer *http_parse_errorfile(int status, const char *file, char **errmsg);
struct buffer *http_parse_errorloc(int errloc, int status, const char *url, char **errmsg);
int proxy_check_http_errors(struct proxy *px);
int proxy_dup_default_conf_errors(struct proxy *curpx, const struct proxy *defpx, char **errmsg);
void proxy_release_conf_errors(struct proxy *px);

View File

@ -57,16 +57,6 @@ size_t htx_add_data(struct htx *htx, const struct ist data);
struct htx_blk *htx_add_last_data(struct htx *htx, struct ist data);
void htx_move_blk_before(struct htx *htx, struct htx_blk **blk, struct htx_blk **ref);
int htx_append_msg(struct htx *dst, const struct htx *src);
struct buffer *htx_move_to_small_buffer(struct buffer *dst, struct buffer *src);
struct buffer *htx_move_to_large_buffer(struct buffer *dst, struct buffer *src);
struct buffer *htx_copy_to_small_buffer(struct buffer *dst, struct buffer *src);
struct buffer *htx_copy_to_large_buffer(struct buffer *dst, struct buffer *src);
#define HTX_XFER_DEFAULT 0x00000000 /* Default XFER: no partial xfer / remove blocks from source */
#define HTX_XFER_KEEP_SRC_BLKS 0x00000001 /* Don't remove xfer blocks from source messages during xfer */
#define HTX_XFER_PARTIAL_HDRS_COPY 0x00000002 /* Allow partial copy of headers and trailers part */
#define HTX_XFER_HDRS_ONLY 0x00000003 /* Only Transfer header blocks (start-line, header and EOH) */
size_t htx_xfer(struct htx *dst, struct htx *src, size_t count, unsigned int flags);
/* Functions and macros to get parts of the start-line or length of these
* parts. Request and response start-lines are both composed of 3 parts.

View File

@ -42,8 +42,6 @@ extern char clf_tcp_log_format[];
extern char default_http_log_format[];
extern char clf_http_log_format[];
extern char default_https_log_format[];
extern char keylog_format_fc[];
extern char keylog_format_bc[];
extern char default_rfc5424_sd_log_format[];

View File

@ -81,19 +81,9 @@ struct qcc {
struct quic_fctl fc; /* stream flow control applied on sending */
uint64_t buf_in_flight; /* sum of currently allocated Tx buffer sizes */
struct list frms; /* list of STREAM frames ready for sent */
union {
struct {
/* quic */
struct quic_pacer pacer; /* engine used to pace emission */
int paced_sent_ctr; /* counter for when emission is interrupted due to pacing */
};
/* qstrm */
struct buffer qstrm_buf;
};
struct quic_pacer pacer; /* engine used to pace emission */
int paced_sent_ctr; /* counter for when emission is interrupted due to pacing */
} tx;
struct {
struct buffer qstrm_buf;
} rx;
uint64_t largest_bidi_r; /* largest remote bidi stream ID opened. */
uint64_t largest_uni_r; /* largest remote uni stream ID opened. */
@ -174,16 +164,13 @@ struct qcs {
struct bdata_ctr data; /* data utilization counter. Note that <tot> is now used for now as accounting may be difficult with ncbuf. */
} rx;
struct {
union {
struct qc_stream_desc *stream; /* quic */
struct buffer qstrm_buf; /* qstrm */
};
struct quic_fctl fc; /* stream flow control applied on sending */
struct quic_frame *msd_frm; /* MAX_STREAM_DATA frame prepared */
} tx;
struct eb64_node by_id;
uint64_t id;
struct qc_stream_desc *stream;
struct list el_recv; /* element of qcc.recv_list */
struct list el_send; /* element of qcc.send_list */

View File

@ -1,14 +0,0 @@
#ifndef _HAPROXY_MUX_QUIC_PRIV_H
#define _HAPROXY_MUX_QUIC_PRIV_H
/* This header file should only be used by QUIC-MUX layer internally. */
#include <haproxy/mux_quic-t.h>
void qcs_idle_open(struct qcs *qcs);
void qcs_close_local(struct qcs *qcs);
int qcs_is_completed(struct qcs *qcs);
uint64_t qcs_prep_bytes(const struct qcs *qcs);
#endif /* _HAPROXY_MUX_QUIC_PRIV_H */

View File

@ -1,10 +0,0 @@
#ifndef _HAPROXY_MUX_QUIC_QSTRM_H
#define _HAPROXY_MUX_QUIC_QSTRM_H
#include <haproxy/mux_quic.h>
int qcc_qstrm_recv(struct qcc *qcc);
int qcc_qstrm_send_frames(struct qcc *qcc, struct list *frms);
#endif /* _HAPROXY_MUX_QUIC_QSTRM_H */

View File

@ -47,7 +47,6 @@ enum obj_type {
OBJ_TYPE_DGRAM, /* object is a struct quic_dgram */
#endif
OBJ_TYPE_HATERM, /* object is a struct hstream */
OBJ_TYPE_ACME_RSLV, /* object is a struct acme_rslv */
OBJ_TYPE_ENTRIES /* last one : number of entries */
} __attribute__((packed)) ;

View File

@ -22,7 +22,6 @@
#ifndef _HAPROXY_OBJ_TYPE_H
#define _HAPROXY_OBJ_TYPE_H
#include <haproxy/acme_resolvers-t.h>
#include <haproxy/api.h>
#include <haproxy/applet-t.h>
#include <haproxy/check-t.h>
@ -46,18 +45,17 @@ static inline enum obj_type obj_type(const enum obj_type *t)
static inline const char *obj_type_name(const enum obj_type *t)
{
switch (obj_type(t)) {
case OBJ_TYPE_NONE: return "NONE";
case OBJ_TYPE_LISTENER: return "LISTENER";
case OBJ_TYPE_PROXY: return "PROXY";
case OBJ_TYPE_SERVER: return "SERVER";
case OBJ_TYPE_APPLET: return "APPLET";
case OBJ_TYPE_APPCTX: return "APPCTX";
case OBJ_TYPE_CONN: return "CONN";
case OBJ_TYPE_SRVRQ: return "SRVRQ";
case OBJ_TYPE_SC: return "SC";
case OBJ_TYPE_STREAM: return "STREAM";
case OBJ_TYPE_CHECK: return "CHECK";
case OBJ_TYPE_ACME_RSLV: return "ACME_RSLV";
case OBJ_TYPE_NONE: return "NONE";
case OBJ_TYPE_LISTENER: return "LISTENER";
case OBJ_TYPE_PROXY: return "PROXY";
case OBJ_TYPE_SERVER: return "SERVER";
case OBJ_TYPE_APPLET: return "APPLET";
case OBJ_TYPE_APPCTX: return "APPCTX";
case OBJ_TYPE_CONN: return "CONN";
case OBJ_TYPE_SRVRQ: return "SRVRQ";
case OBJ_TYPE_SC: return "SC";
case OBJ_TYPE_STREAM: return "STREAM";
case OBJ_TYPE_CHECK: return "CHECK";
#ifdef USE_QUIC
case OBJ_TYPE_DGRAM: return "DGRAM";
#endif
@ -205,18 +203,6 @@ static inline struct hstream *objt_hstream(enum obj_type *t)
return __objt_hstream(t);
}
static inline struct acme_rslv *__objt_acme_rslv(enum obj_type *t)
{
return container_of(t, struct acme_rslv, obj_type);
}
static inline struct acme_rslv *objt_acme_rslv(enum obj_type *t)
{
if (!t || *t != OBJ_TYPE_ACME_RSLV)
return NULL;
return __objt_acme_rslv(t);
}
#ifdef USE_QUIC
static inline struct quic_dgram *__objt_dgram(enum obj_type *t)
{
@ -234,18 +220,17 @@ static inline struct quic_dgram *objt_dgram(enum obj_type *t)
static inline void *obj_base_ptr(enum obj_type *t)
{
switch (obj_type(t)) {
case OBJ_TYPE_NONE: return NULL;
case OBJ_TYPE_LISTENER: return __objt_listener(t);
case OBJ_TYPE_PROXY: return __objt_proxy(t);
case OBJ_TYPE_SERVER: return __objt_server(t);
case OBJ_TYPE_APPLET: return __objt_applet(t);
case OBJ_TYPE_APPCTX: return __objt_appctx(t);
case OBJ_TYPE_CONN: return __objt_conn(t);
case OBJ_TYPE_SRVRQ: return __objt_resolv_srvrq(t);
case OBJ_TYPE_SC: return __objt_sc(t);
case OBJ_TYPE_STREAM: return __objt_stream(t);
case OBJ_TYPE_CHECK: return __objt_check(t);
case OBJ_TYPE_ACME_RSLV: return __objt_acme_rslv(t);
case OBJ_TYPE_NONE: return NULL;
case OBJ_TYPE_LISTENER: return __objt_listener(t);
case OBJ_TYPE_PROXY: return __objt_proxy(t);
case OBJ_TYPE_SERVER: return __objt_server(t);
case OBJ_TYPE_APPLET: return __objt_applet(t);
case OBJ_TYPE_APPCTX: return __objt_appctx(t);
case OBJ_TYPE_CONN: return __objt_conn(t);
case OBJ_TYPE_SRVRQ: return __objt_resolv_srvrq(t);
case OBJ_TYPE_SC: return __objt_sc(t);
case OBJ_TYPE_STREAM: return __objt_stream(t);
case OBJ_TYPE_CHECK: return __objt_check(t);
#ifdef USE_QUIC
case OBJ_TYPE_DGRAM: return __objt_dgram(t);
#endif

View File

@ -124,12 +124,6 @@ static inline int real_family(int ss_family)
return fam ? fam->real_family : AF_UNSPEC;
}
static inline int proto_is_quic(const struct protocol *proto)
{
return (proto->proto_type == PROTO_TYPE_DGRAM &&
proto->xprt_type == PROTO_TYPE_STREAM);
}
#endif /* _HAPROXY_PROTOCOL_H */
/*

View File

@ -117,9 +117,10 @@ enum PR_SRV_STATE_FILE {
#define PR_O_HTTP_DROP_REQ_TRLS 0x04000000 /* Drop the request trailers when forwarding to the server */
#define PR_O_HTTP_DROP_RES_TRLS 0x08000000 /* Drop response trailers when forwarding to the client */
/* unused: 0x10000000 */
#define PR_O_TCPCHK_SSL 0x10000000 /* at least one TCPCHECK connect rule requires SSL */
#define PR_O_CONTSTATS 0x20000000 /* continuous counters */
/* unused: 0x40000000..0x80000000 */
#define PR_O_DISABLE404 0x40000000 /* Disable a server on a 404 response to a health-check */
/* unused: 0x80000000 */
/* bits for proxy->options2 */
#define PR_O2_SPLIC_REQ 0x00000001 /* transfer requests using linux kernel's splice() */
@ -144,7 +145,7 @@ enum PR_SRV_STATE_FILE {
#define PR_O2_NODELAY 0x00020000 /* fully interactive mode, never delay outgoing data */
#define PR_O2_USE_PXHDR 0x00040000 /* use Proxy-Connection for proxy requests */
/* unused: 0x00080000 */
#define PR_O2_CHK_SNDST 0x00080000 /* send the state of each server along with HTTP health checks */
#define PR_O2_SRC_ADDR 0x00100000 /* get the source ip and port for logs */
@ -155,17 +156,14 @@ enum PR_SRV_STATE_FILE {
#define PR_O2_RSTRICT_REQ_HDR_NAMES_NOOP 0x01000000 /* preserve request header names containing chars outside of [0-9a-zA-Z-] charset */
#define PR_O2_RSTRICT_REQ_HDR_NAMES_MASK 0x01c00000 /* mask for restrict-http-header-names option */
/* server health checks */
#define PR_O2_CHK_NONE 0x00000000 /* no L7 health checks configured (TCP by default) */
#define PR_O2_TCPCHK_CHK 0x02000000 /* use TCPCHK check for server health */
#define PR_O2_EXT_CHK 0x04000000 /* use external command for server health */
#define PR_O2_CHK_ANY 0x06000000 /* Mask to cover any check */
/* unused : 0x02000000 ... 0x08000000 */
#define PR_O2_USE_SBUF_QUEUE 0x08000000 /* use small buffer for request when stream are queued*/
#define PR_O2_USE_SBUF_L7_RETRY 0x10000000 /* use small buffer for request when L7 retires are enabled */
#define PR_O2_USE_SBUF_CHECK 0x20000000 /* use small buffer for request's healthchecks */
#define PR_O2_USE_SBUF_ALL 0x38000000 /* all flags for use-large-buffer option */
/* unused : 0x40000000 ... 0x80000000 */
/* server health checks */
#define PR_O2_CHK_NONE 0x00000000 /* no L7 health checks configured (TCP by default) */
#define PR_O2_TCPCHK_CHK 0x90000000 /* use TCPCHK check for server health */
#define PR_O2_EXT_CHK 0xA0000000 /* use external command for server health */
/* unused: 0xB0000000 to 0xF000000, reserved for health checks */
#define PR_O2_CHK_ANY 0xF0000000 /* Mask to cover any check */
/* end of proxy->options2 */
/* bits for proxy->options3 */
@ -243,12 +241,12 @@ enum PR_SRV_STATE_FILE {
/* Proxy flags */
#define PR_FL_DISABLED 0x00000001 /* The proxy was disabled in the configuration (not at runtime) */
#define PR_FL_STOPPED 0x00000002 /* The proxy was stopped */
#define PR_FL_DEF_EXPLICIT_MODE 0x00000004 /* Proxy mode is explicitly defined - only used for defaults instance */
#define PR_FL_DEF_EXPLICIT_MODE 0x00000004 /* Proxy mode is explicitely defined - only used for defaults instance */
#define PR_FL_EXPLICIT_REF 0x00000008 /* The default proxy is explicitly referenced by another proxy */
#define PR_FL_IMPLICIT_REF 0x00000010 /* The default proxy is implicitly referenced by another proxy */
#define PR_FL_PAUSED 0x00000020 /* The proxy was paused at run time (reversible) */
#define PR_FL_CHECKED 0x00000040 /* The proxy configuration was fully checked (including postparsing checks) */
#define PR_FL_BE_UNPUBLISHED 0x00000080 /* The proxy cannot be targeted by content switching rules */
#define PR_FL_BE_UNPUBLISHED 0x00000080 /* The proxy cannot be targetted by content switching rules */
#define PR_FL_DELETED 0x00000100 /* Proxy has been deleted and must be manipulated with care */
#define PR_FL_NON_PURGEABLE 0x00000200 /* Proxy referenced by config elements which prevent its runtime removal. */
@ -444,7 +442,7 @@ struct proxy {
struct stktable *table; /* table for storing sticking streams */
struct task *task; /* the associated task, mandatory to manage rate limiting, stopping and resource shortage, NULL if disabled */
struct tcpcheck tcpcheck; /* tcp-check to use to perform a health-check */
struct tcpcheck_rules tcpcheck_rules; /* tcp-check send / expect rules */
char *check_command; /* Command to use for external agent checks */
char *check_path; /* PATH environment to use for external agent checks */
struct http_reply *replies[HTTP_ERR_SIZE]; /* HTTP replies for known errors */
@ -509,12 +507,6 @@ struct proxy {
* name is used
*/
struct list filter_configs; /* list of the filters that are declared on this proxy */
struct { /* sequence in which declared filters on the proxy should be execute
* (list of filter_sequence_elt)
*/
struct list req; /* during request handling */
struct list res; /* during response handling */
} filter_sequence;
struct guid_node guid; /* GUID global tree node */
struct mt_list watcher_list; /* list of elems which currently references this proxy instance (currently only used with backends) */

View File

@ -32,6 +32,7 @@
#include <import/ebtree-t.h>
#include <haproxy/buf-t.h>
#include <haproxy/list.h>
#include <haproxy/quic_stream-t.h>
#include <haproxy/quic_token.h>
extern struct pool_head *pool_head_quic_frame;
@ -86,7 +87,6 @@ enum quic_frame_type {
* defined in quic_frame.c. Do not forget to complete the associated function
* quic_frame_type_is_known() and both qf_builder()/qf_parser().
*/
extern const uint64_t QUIC_FT_QX_TRANSPORT_PARAMETERS;
#define QUIC_FT_PKT_TYPE_I_BITMASK (1 << QUIC_PACKET_TYPE_INITIAL)
#define QUIC_FT_PKT_TYPE_0_BITMASK (1 << QUIC_PACKET_TYPE_0RTT)
@ -169,7 +169,7 @@ struct qf_new_token {
struct qf_stream {
uint64_t id;
void *stream;
struct qc_stream_desc *stream;
/* used only on TX when constructing frames.
* Data cleared when processing ACK related to this STREAM frame.
@ -252,10 +252,6 @@ struct qf_connection_close_app {
unsigned char reason_phrase[QUIC_CC_REASON_PHRASE_MAXLEN];
};
struct qf_qx_transport_parameters {
struct quic_transport_params params;
};
struct quic_frame {
struct list list; /* List elem from parent elem (typically a Tx packet instance, a PKTNS or a MUX element). */
struct quic_tx_packet *pkt; /* Last Tx packet used to send the frame. */
@ -283,7 +279,6 @@ struct quic_frame {
struct qf_path_challenge_response path_challenge_response;
struct qf_connection_close connection_close;
struct qf_connection_close_app connection_close_app;
struct qf_qx_transport_parameters qmux_transport_params;
};
struct quic_frame *origin; /* Parent frame. Set if frame is a duplicate (used for retransmission). */
struct list reflist; /* List head containing duplicated children frames. */

View File

@ -34,24 +34,13 @@
const char *quic_frame_type_string(enum quic_frame_type ft);
int qc_build_frm(struct quic_frame *frm,
unsigned char **pos, const unsigned char *end,
int qc_build_frm(unsigned char **pos, const unsigned char *end,
struct quic_frame *frm, struct quic_tx_packet *pkt,
struct quic_conn *conn);
int qc_build_frm_pkt(struct quic_frame *frm, struct quic_tx_packet *pkt,
unsigned char **pos, const unsigned char *end,
struct quic_conn *qc);
int qc_parse_frm_type(struct quic_frame *frm,
const unsigned char **pos, const unsigned char *end,
struct quic_conn *conn);
int qc_parse_frm_pkt(const struct quic_frame *frm,
const struct quic_rx_packet *pkt, int *flags);
int qc_parse_frm_payload(struct quic_frame *frm,
const unsigned char **pos, const unsigned char *end,
struct quic_conn *qc);
int qc_parse_frm(struct quic_frame *frm, struct quic_rx_packet *pkt,
const unsigned char **pos, const unsigned char *end,
struct quic_conn *conn);
void qc_release_frm(struct quic_conn *qc, struct quic_frame *frm);

View File

@ -124,5 +124,12 @@ struct quic_early_transport_params {
uint64_t initial_max_streams_uni;
};
/* Return type for QUIC TP decode function */
enum quic_tp_dec_err {
QUIC_TP_DEC_ERR_NONE = 0, /* no error */
QUIC_TP_DEC_ERR_INVAL, /* invalid value as per RFC 9000 */
QUIC_TP_DEC_ERR_TRUNC, /* field encoding too small or too large */
};
#endif /* USE_QUIC */
#endif /* _HAPROXY_QUIC_TP_T_H */

View File

@ -129,12 +129,6 @@ static inline void quic_transport_params_dump(struct buffer *b,
quic_tp_version_info_dump(b, &p->version_information, local);
}
int quic_transport_param_enc_int(unsigned char **buf,
const unsigned char *end,
uint64_t type, uint64_t val);
int quic_transport_params_decode(struct quic_transport_params *p, int server,
const unsigned char *buf, const unsigned char *end);
static inline void quic_early_transport_params_dump(struct buffer *b,
const struct quic_conn *qc,
const struct quic_early_transport_params *p)

View File

@ -72,7 +72,6 @@ extern struct pool_head *resolv_requester_pool;
/* dns record types (non exhaustive list) */
#define DNS_RTYPE_A 1 /* IPv4 address */
#define DNS_RTYPE_CNAME 5 /* canonical name */
#define DNS_RTYPE_TXT 16 /* TXT */
#define DNS_RTYPE_AAAA 28 /* IPv6 address */
#define DNS_RTYPE_SRV 33 /* SRV record */
#define DNS_RTYPE_OPT 41 /* OPT */

View File

@ -220,12 +220,8 @@ static inline void srv_inc_sess_ctr(struct server *s)
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
if (s->counters.shared.tg) {
uint now_sec = ns_to_sec(now_ms);
if (HA_ATOMIC_LOAD(&s->counters.shared.tg[tgid - 1]->last_sess) != now_sec)
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, now_sec);
}
if (s->counters.shared.tg)
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}
/* returns the current server throttle rate between 0 and 100% */

View File

@ -339,7 +339,7 @@ struct global_ssl {
char **passphrase_cmd;
int passphrase_cmd_args_cnt;
unsigned int certificate_compression:1; /* allow to explicitly disable certificate compression */
unsigned int certificate_compression:1; /* allow to explicitely disable certificate compression */
};
/* The order here matters for picking a default context,

View File

@ -54,7 +54,6 @@ extern struct xprt_ops ssl_sock;
extern int ssl_capture_ptr_index;
extern int ssl_keylog_index;
extern int ssl_client_sni_index;
extern int ssl_crtname_index;
extern struct pool_head *pool_head_ssl_keylog;
extern struct pool_head *pool_head_ssl_keylog_str;
extern struct list openssl_providers;

View File

@ -37,7 +37,7 @@
#define STAT_F_CHUNKED 0x00000040 /* use chunked encoding (HTTP/1.1) */
#define STAT_F_JSON_SCHM 0x00000080 /* dump the json schema */
#define STAT_F_SHOWVER 0x00000100 /* conf: report the version and reldate */
#define STAT_F_HIDEVER 0x00000100 /* conf: do not report the version and reldate */
#define STAT_F_SHNODE 0x00000200 /* conf: show node name */
#define STAT_F_SHDESC 0x00000400 /* conf: show description */
#define STAT_F_SHLGNDS 0x00000800 /* conf: show legends */

View File

@ -73,9 +73,7 @@ enum se_flags {
SE_FL_DETACHED = 0x00000010, /* The endpoint is detached (no mux/no applet) */
SE_FL_ORPHAN = 0x00000020, /* The endpoint is orphan (no stream connector) */
SE_FL_APP_STARTED= 0x00000040, /* the application layer has really started */
/* unused: 0x00000080 */
/* unused: 0x00000040 .. 0x00000080 */
SE_FL_SHRD = 0x00000100, /* read shut, draining extra data */
SE_FL_SHRR = 0x00000200, /* read shut, resetting extra data */
@ -137,12 +135,12 @@ static forceinline char *se_show_flags(char *buf, size_t len, const char *delim,
_(0);
/* flags */
_(SE_FL_T_MUX, _(SE_FL_T_APPLET, _(SE_FL_DETACHED, _(SE_FL_ORPHAN,
_(SE_FL_APP_STARTED, _(SE_FL_SHRD, _(SE_FL_SHRR, _(SE_FL_SHWN, _(SE_FL_SHWS,
_(SE_FL_SHRD, _(SE_FL_SHRR, _(SE_FL_SHWN, _(SE_FL_SHWS,
_(SE_FL_NOT_FIRST, _(SE_FL_WEBSOCKET, _(SE_FL_EOI, _(SE_FL_EOS,
_(SE_FL_ERROR, _(SE_FL_ERR_PENDING, _(SE_FL_RCV_MORE,
_(SE_FL_WANT_ROOM, _(SE_FL_EXP_NO_DATA, _(SE_FL_MAY_FASTFWD_PROD, _(SE_FL_MAY_FASTFWD_CONS,
_(SE_FL_WAIT_FOR_HS, _(SE_FL_KILL_CONN, _(SE_FL_WAIT_DATA,
_(SE_FL_WONT_CONSUME, _(SE_FL_HAVE_NO_DATA, _(SE_FL_APPLET_NEED_CONN))))))))))))))))))))))))));
_(SE_FL_WONT_CONSUME, _(SE_FL_HAVE_NO_DATA, _(SE_FL_APPLET_NEED_CONN)))))))))))))))))))))))));
/* epilogue */
_(~0U);
return buf;

View File

@ -45,7 +45,8 @@ void se_shutdown(struct sedesc *sedesc, enum se_shut_mode mode);
struct stconn *sc_new_from_endp(struct sedesc *sedesc, struct session *sess, struct buffer *input);
struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags);
struct stconn *sc_new_from_check(struct check *check);
struct stconn *sc_new_from_check(struct check *check, unsigned int flags);
struct stconn *sc_new_from_haterm(struct sedesc *sd, struct session *sess, struct buffer *input);
void sc_free(struct stconn *sc);
int sc_attach_mux(struct stconn *sc, void *target, void *ctx);

View File

@ -130,22 +130,20 @@ struct notification {
* on return.
*/
#define TASK_COMMON \
unsigned int state; /* task state : bitfield of TASK_ */ \
int tid; /* tid of task/tasklet. <0 = local for tasklet, unbound for task */ \
struct task *(*process)(struct task *t, void *ctx, unsigned int state); /* the function which processes the task */ \
void *context; /* the task's context */ \
const struct ha_caller *caller; /* call place of last wakeup(); 0 on init, -1 on free */ \
uint32_t wake_date; /* date of the last task wakeup */ \
unsigned int calls; /* number of times process was called */ \
TASK_DEBUG_STORAGE; \
short last_run; /* 16-bit now_ms of last run */
/* a 16- or 48-bit hole remains here and is used by task */
struct { \
unsigned int state; /* task state : bitfield of TASK_ */ \
int tid; /* tid of task/tasklet. <0 = local for tasklet, unbound for task */ \
struct task *(*process)(struct task *t, void *ctx, unsigned int state); /* the function which processes the task */ \
void *context; /* the task's context */ \
const struct ha_caller *caller; /* call place of last wakeup(); 0 on init, -1 on free */ \
uint32_t wake_date; /* date of the last task wakeup */ \
unsigned int calls; /* number of times process was called */ \
TASK_DEBUG_STORAGE; \
}
/* The base for all tasks */
struct task {
TASK_COMMON; /* must be at the beginning! */
short nice; /* task prio from -1024 to +1024 */
int expire; /* next expiration date for this task, in ticks */
struct eb32_node rq; /* ebtree node used to hold the task in the run queue */
/* WARNING: the struct task is often aliased as a struct tasklet when
* it is NOT in the run queue. The tasklet has its struct list here
@ -153,12 +151,14 @@ struct task {
* ever reorder these fields without taking this into account!
*/
struct eb32_node wq; /* ebtree node used to hold the task in the wait queue */
int expire; /* next expiration date for this task, in ticks */
short nice; /* task prio from -1024 to +1024 */
/* 16-bit hole here */
};
/* lightweight tasks, without priority, mainly used for I/Os */
struct tasklet {
TASK_COMMON; /* must be at the beginning! */
/* 48-bit hole here */
struct list list;
/* WARNING: the struct task is often aliased as a struct tasklet when
* it is not in the run queue. The task has its struct rq here where

View File

@ -104,15 +104,10 @@ enum tcpcheck_rule_type {
TCPCHK_ACT_ACTION_KW, /* custom registered action_kw rule. */
};
#define TCPCHK_FL_NONE 0x00000000
#define TCPCHK_FL_UNUSED_TCP_RS 0x00000001 /* An unused tcp-check ruleset exists for the current proxy */
#define TCPCHK_FL_UNUSED_HTTP_RS 0x00000002 /* An unused http-check ruleset exists for the current proxy */
#define TCPCHK_FL_UNUSED_RS 0x00000003 /* Mask for unused ruleset */
#define TCPCHK_FL_USE_SSL 0x00000004 /* tcp-check uses SSL connection */
#define TCPCHK_RULES_NONE 0x00000000
#define TCPCHK_RULES_DISABLE404 0x00000001 /* Disable a server on a 404 response wht HTTP health checks */
#define TCPCHK_RULES_SNDST 0x00000002 /* send the state of each server along with HTTP health checks */
#define TCPCHK_RULES_NONE 0x00000000
#define TCPCHK_RULES_UNUSED_TCP_RS 0x00000001 /* An unused tcp-check ruleset exists */
#define TCPCHK_RULES_UNUSED_HTTP_RS 0x00000002 /* An unused http-check ruleset exists */
#define TCPCHK_RULES_UNUSED_RS 0x00000003 /* Mask for unused ruleset */
#define TCPCHK_RULES_PGSQL_CHK 0x00000010
#define TCPCHK_RULES_REDIS_CHK 0x00000020
@ -126,7 +121,6 @@ enum tcpcheck_rule_type {
/* Unused 0x000000A0..0x00000FF0 (reserved for future proto) */
#define TCPCHK_RULES_TCP_CHK 0x00000FF0
#define TCPCHK_RULES_PROTO_CHK 0x00000FF0 /* Mask to cover protocol check */
#define TCPCHK_RULES_MAY_USE_SBUF 0x00001000 /* checks may try to use small buffers if possible for the request */
struct check;
struct tcpcheck_connect {
@ -233,24 +227,18 @@ struct tcpcheck_var {
struct list list; /* element to chain tcp-check vars */
};
/* a list of tcp-check rules */
struct tcpcheck_rules {
unsigned int flags; /* flags applied to the rules */
struct list *list; /* the list of tcpcheck_rules */
struct list preset_vars; /* The list of variable to preset before executing the ruleset */
};
/* A list of tcp-check rules with a name */
struct tcpcheck_ruleset {
struct list rules; /* the list of tcpcheck_rule */
unsigned int flags; /* flags applied to the rules */
struct ebpt_node node; /* node in the shared tree */
struct {
struct list preset_vars; /* The list of variable to preset for healthcheck sections */
unsigned int flags; /* TCPCHECK_FL_* for healthcheck sections */
const char *file; /* file where the section appears */
int line; /* line where the section appears */
} conf; /* config information */
};
struct tcpcheck {
struct tcpcheck_ruleset *rs; /* The tcp-check ruleset to use */
char *healthcheck; /* name of the healthcheck section (NULL if not used) */
struct list preset_vars; /* The list of variable to preset before executing the ruleset */
unsigned int flags; /* TCPCHECK_FL_* */
};
#endif /* _HAPROXY_CHECKS_T_H */

View File

@ -36,7 +36,7 @@ extern struct action_kw_list tcp_check_keywords;
extern struct pool_head *pool_head_tcpcheck_rule;
int tcpcheck_get_step_id(const struct check *check, const struct tcpcheck_rule *rule);
struct tcpcheck_rule *get_first_tcpcheck_rule(const struct tcpcheck_ruleset *rs);
struct tcpcheck_rule *get_first_tcpcheck_rule(const struct tcpcheck_rules *rules);
struct tcpcheck_ruleset *create_tcpcheck_ruleset(const char *name);
struct tcpcheck_ruleset *find_tcpcheck_ruleset(const char *name);
@ -50,9 +50,9 @@ void free_tcpcheck_var(struct tcpcheck_var *var);
int dup_tcpcheck_vars(struct list *dst, const struct list *src);
void free_tcpcheck_vars(struct list *vars);
int add_tcpcheck_expect_str(struct tcpcheck_ruleset *rs, const char *str);
int add_tcpcheck_send_strs(struct tcpcheck_ruleset *rs, const char * const *strs);
int tcpcheck_add_http_rule(struct tcpcheck_rule *chk, struct tcpcheck_ruleset *rs, char **errmsg);
int add_tcpcheck_expect_str(struct tcpcheck_rules *rules, const char *str);
int add_tcpcheck_send_strs(struct tcpcheck_rules *rules, const char * const *strs);
int tcpcheck_add_http_rule(struct tcpcheck_rule *chk, struct tcpcheck_rules *rules, char **errmsg);
void free_tcpcheck_http_hdr(struct tcpcheck_http_hdr *hdr);
@ -106,8 +106,6 @@ int proxy_parse_spop_check_opt(char **args, int cur_arg, struct proxy *curpx, co
int proxy_parse_httpchk_opt(char **args, int cur_arg, struct proxy *curpx, const struct proxy *defpx,
const char *file, int line);
int check_server_tcpcheck(struct server *srv);
void tcp_check_keywords_register(struct action_kw_list *kw_list);
/* Return the struct action_kw associated to a keyword */
@ -135,22 +133,6 @@ static inline int tcpchk_rules_type_to_proto_mode(int tcpchk_rules_type)
return mode;
}
static inline const char *tcpcheck_ruleset_type_to_str(struct tcpcheck_ruleset *rs)
{
switch (rs->flags & TCPCHK_RULES_PROTO_CHK) {
case TCPCHK_RULES_PGSQL_CHK: return "PGSQL"; break;
case TCPCHK_RULES_REDIS_CHK: return "REDIS"; break;
case TCPCHK_RULES_SMTP_CHK: return "SMTP"; break;
case TCPCHK_RULES_HTTP_CHK: return "HTTP"; break;
case TCPCHK_RULES_MYSQL_CHK: return "MYSQL"; break;
case TCPCHK_RULES_LDAP_CHK: return "LDAP"; break;
case TCPCHK_RULES_SSL3_CHK: return "SSL3"; break;
case TCPCHK_RULES_AGENT_CHK: return "AGENT"; break;
case TCPCHK_RULES_SPOP_CHK: return "SPOP"; break;
case TCPCHK_RULES_TCP_CHK: return "TCP"; break;
default: return "???"; break;
}
}
#endif /* _HAPROXY_TCPCHECK_H */
/*

View File

@ -1,7 +0,0 @@
#ifndef _HAPROXY_XPRT_QSTRM_H
#define _HAPROXY_XPRT_QSTRM_H
const struct quic_transport_params *xprt_qstrm_lparams(const void *context);
const struct quic_transport_params *xprt_qstrm_rparams(const void *context);
#endif /* _HAPROXY_XPRT_QSTRM_H */

View File

@ -1587,7 +1587,7 @@ struct XXH3_state_s {
/*!
* simple alias to preselected XXH3_128bits variant
* simple alias to pre-selected XXH3_128bits variant
*/
XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);

View File

@ -1,267 +0,0 @@
varnishtest "Health-checks: tests of healthcheck sections"
feature ignore_unknown_macro
#REQUIRE_OPTION=OPENSSL
#REGTEST_TYPE=slow
server s1 {
rxreq
expect req.method == OPTIONS
expect req.url == /
expect req.proto == HTTP/1.0
expect req.http.host == <undef>
txresp
} -start
server s2 {
rxreq
expect req.method == GET
expect req.url == /status
expect req.proto == HTTP/1.1
expect req.http.host == "www.haproxy.org"
txresp
} -start
server s3 {
rxreq
expect req.method == GET
expect req.url == /health
expect req.proto == HTTP/1.1
txresp
} -start
server s4 {
rxreq
expect req.method == GET
expect req.url == /req1
expect req.proto == HTTP/1.1
expect req.http.x-test == "server=srv"
expect req.http.x-haproxy-server-state ~ "UP.+name=be1/srv4"
expect req.bodylen == 0
txresp
accept
rxreq
expect req.method == GET
expect req.url == /req2
expect req.proto == HTTP/1.1
expect req.http.x-test == "server="
expect req.http.x-haproxy-server-state ~ "UP.+name=be1/srv4"
expect req.http.content-length == 17
expect req.bodylen == 17
expect req.body == "health-check body"
txresp
accept
rxreq
expect req.method == GET
expect req.url == /req3
expect req.proto == HTTP/1.0
expect req.http.x-test == <undef>
expect req.http.x-haproxy-server-state ~ "UP.+name=be1/srv4"
expect req.bodylen == 0
txresp
accept
rxreq
expect req.method == GET
expect req.url == /
expect req.proto == HTTP/1.0
expect req.http.x-test == <undef>
expect req.http.x-haproxy-server-state ~ "UP.+name=be1/srv4"
expect req.bodylen == 24
expect req.body == "health-check on be1-srv4"
txresp
} -start
# REDIS
server s5 {
recv 14
send "+PONG\r\n"
} -start
# TCP-CHECK
server s6 {
rxreq
expect req.method == GET
expect req.url == /
expect req.proto == HTTP/1.0
expect req.http.host == "www.haproxy.org"
txresp
} -start
# PgSQL
server s8 {
recv 23
sendhex "52000000170000000A534352414D2D5348412D3235360000"
} -start
# SMTP
server s9 {
send "220 smtp-check.vtc SMTP Server\r\n"
recv 17
send "250-smtp-check.vtc\r\n"
send "250-KEYWORD\r\n"
send "250 LAST KEYWORD\r\n"
recv 6
send "221 smtp-check.vtc closing\r\n"
} -start
# MySQL
server s10 {
sendhex "4A0000000A382E302E3139000A0000006F3C025E6249410D00FFFFFF0200FFC715000000000000000000007C182159106E2761144322200063616368696E675F736861325F70617373776F726400"
recv 47
sendhex "0700000200000002000000"
} -start
# LDAP
server s11 {
recv 14
sendhex "308400000010020101 61 84000000070A01"
sendhex "00 04000400"
} -start
# SPOP
server s12 {
recv 82
sendhex "00000036 65 00000001 0000 0776657273696F6E 0803322E30 0E6D61782D6672616D652D73697A65 03FCF0 060C6361706162696C6974696573 0800"
} -start
syslog S1 -level notice {
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv[1-4] succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv[1-4] succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv[1-4] succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be1/srv[1-4] succeeded"
} -start
syslog S2 -level notice {
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9] succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
recv
expect ~ "[^:\\[ ]\\[${h1_pid}\\]: Health check for server be2/srv[0-9]+ succeeded"
} -start
haproxy h1 -conf {
global
.if feature(THREAD)
thread-groups 1
.endif
.if !ssllib_name_startswith(AWS-LC)
tune.ssl.default-dh-param 2048
.endif
defaults
mode tcp
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
healthcheck http-status
type httpchk GET /status HTTP/1.1 www.haproxy.org
healthcheck http-health
type httpchk
http-check send meth GET uri /health ver HTTP/1.1
healthcheck http-complex
http-check send-state
http-check connect addr ${s4_addr}:${s4_port}
http-check set-var(check.server) "str(srv)"
http-check set-var(check.path) "str(/req1)"
http-check send meth GET uri-lf "%[var(check.path)]" ver HTTP/1.1 hdr x-test "server=%[var(check.server)]"
http-check expect status 200
http-check connect addr ${s4_addr} port ${s4_port}
http-check unset-var(check.server)
http-check set-var(check.path) "str(/req2)"
http-check send meth GET uri-lf "%[var(check.path)]" ver HTTP/1.1 hdr x-test "server=%[var(check.server)]" body "health-check body"
http-check expect rstatus "^2[0-9]{2}"
http-check connect addr ${s4_addr} port ${s4_port}
http-check set-var(check.path) "str(/req3)"
http-check send meth GET uri-lf "%[var(check.path)]"
http-check expect rstatus "^2[0-9]{2}"
http-check connect addr ${s4_addr} port ${s4_port}
http-check unset-var(check.path)
http-check send meth GET uri-lf "%[var(check.path)]" body-lf "health-check on %[be_name]-%[srv_name]"
## implicit expect rule
type httpchk
healthcheck tcpchk
type tcp-check
tcp-check connect
tcp-check send GET\ /\ HTTP/1.0\r\n
tcp-check send Host:\ www.haproxy.org\r\n
tcp-check send \r\n
tcp-check expect rstring (2..|3..)
tcp-check connect addr ${h1_li6_addr} port ${h1_li6_port} ssl
tcp-check send GET\ /\ HTTP/1.0\r\n
tcp-check send Host:\ www.haproxy.org\r\n
tcp-check send \r\n
tcp-check expect rstring (2..|3..)
healthcheck redis
type redis-check
healthcheck sslchk
type ssl-hello-chk
healthcheck pgchk
type pgsql-check user postgres
healthcheck smtpchk
type smtpchk EHLO domain.tld
healthcheck mysqlchk
type mysql-check user user
healthcheck ldapchk
type ldap-check
healthcheck spopchk
type spop-check
listen li6
mode http
bind "fd@${li6}" ssl crt ${testdir}/certs/common.pem
http-request return status 200
backend be1
log ${S1_addr}:${S1_port} daemon
option log-health-checks
option httpchk
server srv1 ${s1_addr}:${s1_port} check inter 100ms rise 1 fall 1
server srv2 ${s2_addr}:${s2_port} check inter 100ms rise 1 fall 1 healthcheck http-status
server srv3 ${s3_addr}:${s3_port} check inter 100ms rise 1 fall 1 healthcheck http-health
server srv4 ${s4_addr}:${s4_port} check inter 100ms rise 1 fall 1 healthcheck http-complex
backend be2
log ${S2_addr}:${S2_port} daemon
option log-health-checks
server srv5 ${s5_addr}:${s5_port} check inter 100ms rise 1 fall 1 healthcheck redis
server srv6 ${s6_addr}:${s6_port} check inter 100ms rise 1 fall 1 healthcheck tcpchk verify none
server srv7 ${h1_li6_addr}:${h1_li6_port} check inter 100ms rise 1 fall 1 healthcheck sslchk
server srv8 ${s8_addr}:${s8_port} check inter 100ms rise 1 fall 1 healthcheck pgchk
server srv9 ${s9_addr}:${s9_port} check inter 100ms rise 1 fall 1 healthcheck smtpchk
server srv10 ${s10_addr}:${s10_port} check inter 100ms rise 1 fall 1 healthcheck mysqlchk
server srv11 ${s11_addr}:${s11_port} check inter 100ms rise 1 fall 1 healthcheck ldapchk
server srv12 ${s12_addr}:${s12_port} check inter 100ms rise 1 fall 1 healthcheck spopchk
} -start
syslog S1 -wait
syslog S2 -wait

View File

@ -1,46 +0,0 @@
FilterA = {}
FilterA.id = "A filter"
FilterA.flags = filter.FLT_CFG_FL_HTX
FilterA.__index = FilterA
function FilterA:new()
local filter = {}
setmetatable(filter, FilterA)
return filter
end
function FilterA:start_analyze(txn, chn)
if chn:is_resp() then
core.Info("FilterA.resp")
else
core.Info("FilterA.req")
end
end
core.register_filter("FilterA", FilterA, function(flt, args)
return flt
end)
FilterB = {}
FilterB.id = "A filter"
FilterB.flags = filter.FLT_CFG_FL_HTX
FilterB.__index = FilterB
function FilterB:new()
local filter = {}
setmetatable(filter, FilterB)
return filter
end
function FilterB:start_analyze(txn, chn)
if chn:is_resp() then
core.Info("FilterB.resp")
else
core.Info("FilterB.req")
end
end
core.register_filter("FilterB", FilterB, function(flt,args)
return flt
end)

View File

@ -1,86 +0,0 @@
varnishtest "Test filter-sequence feature"
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev0)'"
feature ignore_unknown_macro
#REQUIRE_OPTIONS=LUA
barrier b1 cond 2 -cyclic
server s1 {
rxreq
txresp
} -repeat 2 -start
syslog Slg1 -level info {
recv
expect ~ ".* haproxy ${h1_pid} .* FilterA.req"
recv
expect ~ ".* haproxy ${h1_pid} .* FilterB.req"
recv
expect ~ ".* haproxy ${h1_pid} .* FilterA.resp"
recv
expect ~ ".* haproxy ${h1_pid} .* FilterB.resp"
barrier b1 sync
recv
expect ~ ".* haproxy ${h1_pid} .* FilterB.req"
recv
expect ~ ".* haproxy ${h1_pid} .* FilterA.req"
recv
expect ~ ".* haproxy ${h1_pid} .* FilterB.resp"
} -start
haproxy h1 -conf {
global
tune.lua.bool-sample-conversion normal
lua-load ${testdir}/dummy_filters.lua
log udp@${Slg1_addr}:${Slg1_port} format rfc5424 local0
defaults
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
listen fe1
mode http
filter lua.FilterA
filter lua.FilterB
bind "fd@${fe1}"
# test normal sequence (inherited from filter ordering)
server app1 ${s1_addr}:${s1_port}
listen fe2
mode http
filter lua.FilterA
filter lua.FilterB
# test forced filter-sequence on request
filter-sequence request lua.FilterB,lua.FilterA
# ignore filterA on response
filter-sequence response lua.FilterB
bind "fd@${fe2}"
server app1 ${s1_addr}:${s1_port}
} -start
client c0 -connect ${h1_fe1_sock} {
txreq -url "/"
rxresp
expect resp.status == 200
} -start -wait
# Wait matching log messages
barrier b1 sync
client c1 -connect ${h1_fe2_sock} {
txreq -url "/"
rxresp
expect resp.status == 200
} -start -wait
syslog Slg1 -wait

View File

@ -336,16 +336,6 @@ dump_commit_matrix | column -t | \
elif [ -n "$SINCELAST" ]; then
echo "Found ${#since_last[@]} commit(s) added to branch $REF since last backported commit $last_bkp:"
echo
if [ -z "$QUIET" ]; then
for c in "${since_last[@]}"; do
echo "$(git log -1 --pretty=" %h | %s" "$c")"
done
echo
echo "In order to show and/or apply them all to current branch :"
echo
echo " git show --pretty=format:'%C(yellow)commit %H%C(normal)%nAuthor: %an <%ae>%nDate: %aD%n%n%C(green)%C(bold)git cherry-pick -sx %h%n%n%w(0,4,4)%B%N' ${since_last[@]}"
echo
fi
echo " git cherry-pick -sx ${since_last[@]}"
echo
elif [ -n "$MISSING" -a ${#left_commits[@]} -eq 0 ]; then

View File

@ -4,7 +4,6 @@
* Implements the ACMEv2 RFC 8555 protocol
*/
#include "haproxy/ticks.h"
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
@ -15,9 +14,7 @@
#include <haproxy/acme-t.h>
#include <haproxy/acme_resolvers.h>
#include <haproxy/base64.h>
#include <haproxy/intops.h>
#include <haproxy/cfgparse.h>
#include <haproxy/cli.h>
#include <haproxy/errors.h>
@ -26,7 +23,6 @@
#include <haproxy/list.h>
#include <haproxy/log.h>
#include <haproxy/pattern.h>
#include <haproxy/resolvers.h>
#include <haproxy/sink.h>
#include <haproxy/ssl_ckch.h>
#include <haproxy/ssl_gencert.h>
@ -115,23 +111,18 @@ static void acme_trace(enum trace_level level, uint64_t mask, const struct trace
}
chunk_appendf(&trace_buf, ", st: ");
switch (ctx->state) {
case ACME_RESOURCES: chunk_appendf(&trace_buf, "ACME_RESOURCES"); break;
case ACME_NEWNONCE: chunk_appendf(&trace_buf, "ACME_NEWNONCE"); break;
case ACME_CHKACCOUNT: chunk_appendf(&trace_buf, "ACME_CHKACCOUNT"); break;
case ACME_NEWACCOUNT: chunk_appendf(&trace_buf, "ACME_NEWACCOUNT"); break;
case ACME_NEWORDER: chunk_appendf(&trace_buf, "ACME_NEWORDER"); break;
case ACME_AUTH: chunk_appendf(&trace_buf, "ACME_AUTH"); break;
case ACME_CLI_WAIT : chunk_appendf(&trace_buf, "ACME_CLI_WAIT"); break;
case ACME_INITIAL_DELAY: chunk_appendf(&trace_buf, "ACME_INITIAL_DELAY"); break;
case ACME_RSLV_RETRY_DELAY: chunk_appendf(&trace_buf, "ACME_RSLV_RETRY_DELAY"); break;
case ACME_RSLV_TRIGGER: chunk_appendf(&trace_buf, "ACME_RSLV_TRIGGER"); break;
case ACME_RSLV_READY: chunk_appendf(&trace_buf, "ACME_RSLV_READY"); break;
case ACME_CHALLENGE: chunk_appendf(&trace_buf, "ACME_CHALLENGE"); break;
case ACME_CHKCHALLENGE: chunk_appendf(&trace_buf, "ACME_CHKCHALLENGE"); break;
case ACME_FINALIZE: chunk_appendf(&trace_buf, "ACME_FINALIZE"); break;
case ACME_CHKORDER: chunk_appendf(&trace_buf, "ACME_CHKORDER"); break;
case ACME_CERTIFICATE: chunk_appendf(&trace_buf, "ACME_CERTIFICATE"); break;
case ACME_END: chunk_appendf(&trace_buf, "ACME_END"); break;
case ACME_RESOURCES: chunk_appendf(&trace_buf, "ACME_RESOURCES"); break;
case ACME_NEWNONCE: chunk_appendf(&trace_buf, "ACME_NEWNONCE"); break;
case ACME_CHKACCOUNT: chunk_appendf(&trace_buf, "ACME_CHKACCOUNT"); break;
case ACME_NEWACCOUNT: chunk_appendf(&trace_buf, "ACME_NEWACCOUNT"); break;
case ACME_NEWORDER: chunk_appendf(&trace_buf, "ACME_NEWORDER"); break;
case ACME_AUTH: chunk_appendf(&trace_buf, "ACME_AUTH"); break;
case ACME_CHALLENGE: chunk_appendf(&trace_buf, "ACME_CHALLENGE"); break;
case ACME_CHKCHALLENGE: chunk_appendf(&trace_buf, "ACME_CHKCHALLENGE"); break;
case ACME_FINALIZE: chunk_appendf(&trace_buf, "ACME_FINALIZE"); break;
case ACME_CHKORDER: chunk_appendf(&trace_buf, "ACME_CHKORDER"); break;
case ACME_CERTIFICATE: chunk_appendf(&trace_buf, "ACME_CERTIFICATE"); break;
case ACME_END: chunk_appendf(&trace_buf, "ACME_END"); break;
}
}
if (mask & (ACME_EV_REQ|ACME_EV_RES)) {
@ -199,8 +190,6 @@ struct acme_cfg *new_acme_cfg(const char *name)
ret->linenum = 0;
ret->challenge = strdup("http-01"); /* default value */
ret->dns_delay = 30; /* default DNS re-trigger delay in seconds */
ret->dns_timeout = 600; /* default DNS retry timeout */
/* The default generated keys are EC-384 */
ret->key.type = EVP_PKEY_EC;
@ -277,6 +266,7 @@ static int cfg_parse_acme(const char *file, int linenum, char **args, int kwm)
mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
if (strcmp(args[0], "acme") == 0) {
struct acme_cfg *tmp_acme = acme_cfgs;
if (alertif_too_many_args(1, file, linenum, args, &err_code))
goto out;
@ -302,7 +292,7 @@ static int cfg_parse_acme(const char *file, int linenum, char **args, int kwm)
* name */
err_code |= ERR_ALERT | ERR_FATAL;
ha_alert("parsing [%s:%d]: acme section '%s' already exists (%s:%d).\n",
file, linenum, args[1], cur_acme->filename, cur_acme->linenum);
file, linenum, args[1], tmp_acme->filename, tmp_acme->linenum);
goto out;
}
@ -430,18 +420,6 @@ static int cfg_parse_acme_kws(char **args, int section_type, struct proxy *curpx
ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
goto out;
}
/* require the CLI by default */
if ((strcasecmp("dns-01", args[1]) == 0) && (cur_acme->cond_ready == 0)) {
cur_acme->cond_ready = ACME_RDY_CLI;
}
if ((strcasecmp("http-01", args[1]) == 0) && (cur_acme->cond_ready != 0)) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section, \"http-01\" is not compatible with the \"challenge-ready\" option\n", file, linenum, args[0], cursection);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
} else if (strcmp(args[0], "map") == 0) {
/* save the map name for thumbprint + token storage */
if (!*args[1]) {
@ -459,102 +437,6 @@ static int cfg_parse_acme_kws(char **args, int section_type, struct proxy *curpx
ha_alert("parsing [%s:%d]: out of memory.\n", file, linenum);
goto out;
}
} else if (strcmp(args[0], "challenge-ready") == 0) {
char *str = args[1];
char *saveptr;
if (!*args[1]) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires an argument\n", file, linenum, args[0], cursection);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
if (alertif_too_many_args(1, file, linenum, args, &err_code))
goto out;
cur_acme->cond_ready = 0;
while ((str = strtok_r(str, ",", &saveptr))) {
if (strcmp(str, "cli") == 0) {
/* wait for the CLI-ready to run the challenge */
cur_acme->cond_ready |= ACME_RDY_CLI;
} else if (strcmp(str, "dns") == 0) {
/* wait for the DNS-check to run the challenge */
cur_acme->cond_ready |= ACME_RDY_DNS;
} else if (strcmp(str, "delay") == 0) {
/* wait for the DNS-check to run the challenge */
cur_acme->cond_ready |= ACME_RDY_DELAY;
} else if (strcmp(str, "none") == 0) {
if (cur_acme->cond_ready || (saveptr && *saveptr)) {
err_code |= ERR_ALERT | ERR_FATAL;
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' can't combine 'none' with other keywords.\n", file, linenum, args[0], cursection);
goto out;
}
cur_acme->cond_ready = ACME_RDY_NONE;
} else {
err_code |= ERR_ALERT | ERR_FATAL;
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires parameter separated by commas: 'cli', 'dns' or 'none'\n", file, linenum, args[0], cursection);
goto out;
}
str = NULL;
}
if ((strcasecmp("http-01", cur_acme->challenge) == 0) && (cur_acme->cond_ready != 0)) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section, \"http-01\" is not compatible with the \"challenge-ready\" option\n", file, linenum, args[0], cursection);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
} else if (strcmp(args[0], "dns-delay") == 0) {
const char *res;
if (!*args[1]) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires an argument\n", file, linenum, args[0], cursection);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
if (alertif_too_many_args(1, file, linenum, args, &err_code))
goto out;
res = parse_time_err(args[1], &cur_acme->dns_delay, TIME_UNIT_S);
if (res == PARSE_TIME_OVER) {
ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to '%s'\n", file, linenum, args[1], args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
} else if (res == PARSE_TIME_UNDER) {
ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to '%s'\n", file, linenum, args[1], args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
} else if (res) {
ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to '%s'\n", file, linenum, *res, args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
} else if (strcmp(args[0], "dns-timeout") == 0) {
const char *res;
if (!*args[1]) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires an argument\n", file, linenum, args[0], cursection);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
if (alertif_too_many_args(1, file, linenum, args, &err_code))
goto out;
res = parse_time_err(args[1], &cur_acme->dns_timeout, TIME_UNIT_S);
if (res == PARSE_TIME_OVER) {
ha_alert("parsing [%s:%d]: timer overflow in argument <%s> to '%s'\n", file, linenum, args[1], args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
} else if (res == PARSE_TIME_UNDER) {
ha_alert("parsing [%s:%d]: timer underflow in argument <%s> to '%s'\n", file, linenum, args[1], args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
} else if (res) {
ha_alert("parsing [%s:%d]: unexpected character '%c' in argument to '%s'\n", file, linenum, *res, args[0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
} else if (strcmp(args[0], "reuse-key") == 0) {
if (!*args[1]) {
ha_alert("parsing [%s:%d]: keyword '%s' in '%s' section requires an argument\n", file, linenum, args[0], cursection);
@ -959,9 +841,6 @@ static struct cfg_kw_list cfg_kws_acme = {ILH, {
{ CFG_ACME, "curves", cfg_parse_acme_cfg_key },
{ CFG_ACME, "map", cfg_parse_acme_kws },
{ CFG_ACME, "reuse-key", cfg_parse_acme_kws },
{ CFG_ACME, "challenge-ready", cfg_parse_acme_kws },
{ CFG_ACME, "dns-delay", cfg_parse_acme_kws },
{ CFG_ACME, "dns-timeout", cfg_parse_acme_kws },
{ CFG_ACME, "acme-vars", cfg_parse_acme_vars_provider },
{ CFG_ACME, "provider-name", cfg_parse_acme_vars_provider },
{ CFG_GLOBAL, "acme.scheduler", cfg_parse_global_acme_sched },
@ -1000,7 +879,6 @@ static void acme_ctx_destroy(struct acme_ctx *ctx)
istfree(&auth->chall);
istfree(&auth->token);
istfree(&auth->dns);
acme_rslv_free(auth->rslv);
next = auth->next;
free(auth);
auth = next;
@ -1310,7 +1188,7 @@ int acme_res_certificate(struct task *task, struct acme_ctx *ctx, char **errmsg)
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
}
@ -1383,7 +1261,7 @@ int acme_res_chkorder(struct task *task, struct acme_ctx *ctx, char **errmsg)
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
}
@ -1466,6 +1344,7 @@ int acme_req_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
csr->data = ret;
chunk_printf(req_in, "{ \"csr\": \"%.*s\" }", (int)csr->data, csr->area);
OPENSSL_free(data);
if (acme_jws_payload(req_in, ctx->nonce, ctx->finalize, ctx->cfg->account.pkey, ctx->kid, req_out, errmsg) != 0)
@ -1479,7 +1358,6 @@ int acme_req_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
error:
memprintf(errmsg, "couldn't request the finalize URL");
out:
OPENSSL_free(data);
free_trash_chunk(req_in);
free_trash_chunk(req_out);
free_trash_chunk(csr);
@ -1513,7 +1391,7 @@ int acme_res_finalize(struct task *task, struct acme_ctx *ctx, char **errmsg)
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
}
@ -1614,7 +1492,7 @@ enum acme_ret acme_res_challenge(struct task *task, struct acme_ctx *ctx, struct
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
}
@ -1740,7 +1618,7 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
}
@ -1776,19 +1654,6 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
auth->dns = istdup(ist2(t2->area, t2->data));
ret = mjson_get_string(hc->res.buf.area, hc->res.buf.data, "$.status", trash.area, trash.size);
if (ret == -1) {
memprintf(errmsg, "couldn't get a \"status\" from Authorization URL \"%s\"", auth->auth.ptr);
goto error;
}
trash.data = ret;
/* if auth is already valid we need to skip solving challenges */
if (strncasecmp("valid", trash.area, trash.data) == 0) {
auth->validated = 1;
goto out;
}
/* get the multiple challenges and select the one from the configuration */
for (i = 0; ; i++) {
int ret;
@ -1852,13 +1717,8 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
goto error;
}
/* replace the token by the TXT entry */
istfree(&auth->token);
auth->token = istdup(ist2(dns_record->area, dns_record->data));
if (ctx->cfg->cond_ready & ACME_RDY_CLI)
send_log(NULL, LOG_NOTICE,"acme: %s: dns-01 requires to set the \"_acme-challenge.%.*s\" TXT record to \"%.*s\" and use the \"acme challenge_ready %s domain %.*s\" command over the CLI\n",
ctx->store->path, (int)auth->dns.len, auth->dns.ptr, (int)auth->token.len, auth->token.ptr, ctx->store->path, (int)auth->dns.len, auth->dns.ptr);
send_log(NULL, LOG_NOTICE,"acme: %s: dns-01 requires to set the \"_acme-challenge.%.*s\" TXT record to \"%.*s\" and use the \"acme challenge_ready %s domain %.*s\" command over the CLI\n",
ctx->store->path, (int)auth->dns.len, auth->dns.ptr, (int)dns_record->data, dns_record->area, ctx->store->path, (int)auth->dns.len, auth->dns.ptr);
/* dump to the "dpapi" sink */
line[nmsg++] = ist("acme deploy ");
@ -1901,7 +1761,6 @@ int acme_res_auth(struct task *task, struct acme_ctx *ctx, struct acme_auth *aut
break;
}
out:
ret = 0;
error:
@ -1990,7 +1849,7 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
/* get the order URL */
if (isteqi(hdr->n, ist("Location"))) {
@ -2042,6 +1901,11 @@ int acme_res_neworder(struct task *task, struct acme_ctx *ctx, char **errmsg)
goto error;
}
/* if the challenge is not dns-01, consider that the challenge
* is ready because computed by HAProxy */
if (strcasecmp(ctx->cfg->challenge, "dns-01") != 0)
auth->ready = 1;
auth->next = ctx->auths;
ctx->auths = auth;
ctx->next_auth = auth;
@ -2145,7 +2009,7 @@ int acme_res_account(struct task *task, struct acme_ctx *ctx, int newaccount, ch
}
/* get the next retry timing */
if (isteqi(hdr->n, ist("Retry-After"))) {
ctx->retryafter = __strl2uic(hdr->v.ptr, hdr->v.len);
ctx->retryafter = atol(hdr->v.ptr);
}
if (isteqi(hdr->n, ist("Replay-Nonce"))) {
istfree(&ctx->nonce);
@ -2390,196 +2254,18 @@ re:
goto retry;
}
if ((ctx->next_auth = ctx->next_auth->next) == NULL) {
if (strcasecmp(ctx->cfg->challenge, "dns-01") == 0 && ctx->cfg->cond_ready)
st = ACME_CLI_WAIT;
else
st = ACME_CHALLENGE;
st = ACME_CHALLENGE;
ctx->next_auth = ctx->auths;
}
/* call with next auth or do the challenge step */
goto nextreq;
}
break;
case ACME_CLI_WAIT: {
struct acme_auth *auth;
int all_cond_ready = ctx->cfg->cond_ready;
for (auth = ctx->auths; auth != NULL; auth = auth->next) {
all_cond_ready &= auth->ready;
}
/* if everything is ready, let's do the challenge request */
if ((all_cond_ready & ctx->cfg->cond_ready) == ctx->cfg->cond_ready) {
st = ACME_CHALLENGE;
ctx->http_state = ACME_HTTP_REQ;
ctx->state = st;
goto nextreq;
}
/* if we need to wait for the CLI, let's wait */
if ((ctx->cfg->cond_ready & ACME_RDY_CLI) && !(all_cond_ready & ACME_RDY_CLI))
goto wait;
/* next step */
st = ACME_INITIAL_DELAY;
goto nextreq;
}
break;
case ACME_INITIAL_DELAY: {
struct acme_auth *auth;
int all_cond_ready = ctx->cfg->cond_ready;
for (auth = ctx->auths; auth != NULL; auth = auth->next) {
all_cond_ready &= auth->ready;
}
/* if everything is ready, let's do the challenge request */
if ((all_cond_ready & ctx->cfg->cond_ready) == ctx->cfg->cond_ready) {
st = ACME_CHALLENGE;
goto nextreq;
}
/* if we don't have an initial delay, let's trigger */
if (!(ctx->cfg->cond_ready & ACME_RDY_DELAY)) {
st = ACME_RSLV_TRIGGER;
goto nextreq;
}
for (auth = ctx->auths; auth != NULL; auth = auth->next) {
auth->ready |= ACME_RDY_DELAY;
}
/* either trigger the resolution of the challenge */
if (ctx->cfg->cond_ready & ACME_RDY_DNS)
st = ACME_RSLV_TRIGGER;
else
st = ACME_CHALLENGE;
ctx->http_state = ACME_HTTP_REQ;
ctx->state = st;
send_log(NULL, LOG_NOTICE, "acme: %s: dns-01: waiting %ds\n",
ctx->store->path, ctx->cfg->dns_delay);
task->expire = tick_add(now_ms, ctx->cfg->dns_delay * 1000);
return task;
}
break;
case ACME_RSLV_RETRY_DELAY: {
struct acme_auth *auth;
int all_cond_ready = ctx->cfg->cond_ready;
for (auth = ctx->auths; auth != NULL; auth = auth->next) {
all_cond_ready &= auth->ready;
}
/* if everything is ready, let's do the challenge request */
if ((all_cond_ready & ctx->cfg->cond_ready) == ctx->cfg->cond_ready) {
st = ACME_CHALLENGE;
goto nextreq;
}
/* Check if the next resolution would be triggered too
* late according to the dns_timeout and abort is
* necessary. */
if (ctx->dnsstarttime && ns_to_sec(now_ns) + ctx->cfg->dns_delay > ctx->dnsstarttime + ctx->cfg->dns_timeout) {
memprintf(&errmsg, "dns-01: Couldn't resolve the TXT records in %ds.", ctx->cfg->dns_timeout);
goto abort;
}
/* we don't need to wait, we can trigger the resolution
* after the delay */
st = ACME_RSLV_TRIGGER;
ctx->http_state = ACME_HTTP_REQ;
ctx->state = st;
send_log(NULL, LOG_NOTICE, "acme: %s: dns-01: retrying the resolution in %ds\n",
ctx->store->path, ctx->cfg->dns_delay);
task->expire = tick_add(now_ms, ctx->cfg->dns_delay * 1000);
return task;
}
break;
case ACME_RSLV_TRIGGER: {
struct acme_auth *auth;
/* set the start time of the DNS checks so we can apply
* the timeout */
if (ctx->dnsstarttime == 0)
ctx->dnsstarttime = ns_to_sec(now_ns);
/* on timer expiry, re-trigger resolution for non-ready auths */
for (auth = ctx->auths; auth != NULL; auth = auth->next) {
if (auth->ready == ctx->cfg->cond_ready)
continue;
HA_ATOMIC_INC(&ctx->dnstasks);
auth->rslv = acme_rslv_start(auth, &ctx->dnstasks, &errmsg);
if (!auth->rslv)
goto abort;
auth->rslv->acme_task = task;
}
st = ACME_RSLV_READY;
goto wait;
}
break;
case ACME_RSLV_READY: {
struct acme_auth *auth;
int all_ready = 1;
/* if triggered by the CLI, wait for the DNS tasks to
* finish
*/
if (HA_ATOMIC_LOAD(&ctx->dnstasks) != 0)
goto wait;
/* triggered by the latest DNS task */
for (auth = ctx->auths; auth != NULL; auth = auth->next) {
if (auth->ready == ctx->cfg->cond_ready)
continue;
if (auth->rslv->result != RSLV_STATUS_VALID) {
send_log(NULL, LOG_NOTICE, "acme: %s: dns-01: Couldn't get the TXT record for \"_acme-challenge.%.*s\", expected \"%.*s\" (status=%d)\n",
ctx->store->path, (int)auth->dns.len, auth->dns.ptr,
(int)auth->token.len, auth->token.ptr,
auth->rslv->result);
all_ready = 0;
} else {
if (isteq(auth->rslv->txt, auth->token)) {
auth->ready |= ACME_RDY_DNS;
} else {
send_log(NULL, LOG_NOTICE, "acme: %s: dns-01: TXT record mismatch for \"_acme-challenge.%.*s\": expected \"%.*s\", got \"%.*s\"\n",
ctx->store->path, (int)auth->dns.len, auth->dns.ptr,
(int)auth->token.len, auth->token.ptr,
(int)auth->rslv->txt.len, auth->rslv->txt.ptr);
all_ready = 0;
}
}
acme_rslv_free(auth->rslv);
auth->rslv = NULL;
}
if (all_ready) {
st = ACME_CHALLENGE;
ctx->next_auth = ctx->auths;
goto nextreq;
}
/* not all ready yet, retry after dns-delay */
st = ACME_RSLV_RETRY_DELAY;
ctx->http_state = ACME_HTTP_REQ;
ctx->state = st;
goto nextreq;
}
break;
case ACME_CHALLENGE:
if (http_st == ACME_HTTP_REQ) {
/* if challenge is already validated we skip this stage */
if (ctx->next_auth->validated) {
if ((ctx->next_auth = ctx->next_auth->next) == NULL) {
st = ACME_CHKCHALLENGE;
ctx->next_auth = ctx->auths;
}
goto nextreq;
}
/* if the challenge is not ready, wait to be wakeup */
if (ctx->next_auth->ready != ctx->cfg->cond_ready)
if (!ctx->next_auth->ready)
goto wait;
if (acme_req_challenge(task, ctx, ctx->next_auth, &errmsg) != 0)
@ -2606,14 +2292,6 @@ re:
break;
case ACME_CHKCHALLENGE:
if (http_st == ACME_HTTP_REQ) {
/* if challenge is already validated we skip this stage */
if (ctx->next_auth->validated) {
if ((ctx->next_auth = ctx->next_auth->next) == NULL)
st = ACME_FINALIZE;
goto nextreq;
}
if (acme_post_as_get(task, ctx, ctx->next_auth->chall, &errmsg) != 0)
goto retry;
}
@ -2848,9 +2526,9 @@ X509_REQ *acme_x509_req(EVP_PKEY *pkey, char **san)
{
struct buffer *san_trash = NULL;
X509_REQ *x = NULL;
X509_NAME *nm = NULL;
X509_NAME *nm;
STACK_OF(X509_EXTENSION) *exts = NULL;
X509_EXTENSION *ext_san = NULL;
X509_EXTENSION *ext_san;
char *str_san = NULL;
int i = 0;
@ -2881,36 +2559,26 @@ X509_REQ *acme_x509_req(EVP_PKEY *pkey, char **san)
for (i = 0; san[i]; i++) {
chunk_appendf(san_trash, "%sDNS:%s", i ? "," : "", san[i]);
}
if ((str_san = my_strndup(san_trash->area, san_trash->data)) == NULL)
goto error;
str_san = my_strndup(san_trash->area, san_trash->data);
if ((ext_san = X509V3_EXT_conf_nid(NULL, NULL, NID_subject_alt_name, str_san)) == NULL)
goto error;
if (!sk_X509_EXTENSION_push(exts, ext_san))
goto error;
ext_san = NULL; /* handle double-free upon error */
if (!X509_REQ_add_extensions(x, exts))
goto error;
sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
if (!X509_REQ_sign(x, pkey, EVP_sha256()))
goto error;
sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
X509_NAME_free(nm);
free(str_san);
free_trash_chunk(san_trash);
return x;
error:
X509_EXTENSION_free(ext_san);
sk_X509_EXTENSION_pop_free(exts, X509_EXTENSION_free);
X509_REQ_free(x);
X509_NAME_free(nm);
free(str_san);
free_trash_chunk(san_trash);
return NULL;
@ -2959,7 +2627,7 @@ EVP_PKEY *acme_gen_tmp_pkey()
/* start an ACME task */
static int acme_start_task(struct ckch_store *store, char **errmsg)
{
struct task *task = NULL;
struct task *task;
struct acme_ctx *ctx = NULL;
struct acme_cfg *cfg;
struct ckch_store *newstore = NULL;
@ -3044,8 +2712,6 @@ err:
HA_RWLOCK_WRUNLOCK(OTHER_LOCK, &acme_lock);
acme_ctx_destroy(ctx);
}
if (task)
task_destroy(task);
memprintf(errmsg, "%sCan't start the ACME client.", *errmsg ? *errmsg : "");
return 1;
}
@ -3055,10 +2721,7 @@ static int cli_acme_renew_parse(char **args, char *payload, struct appctx *appct
struct ckch_store *store = NULL;
char *errmsg = NULL;
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (!*args[2]) {
if (!*args[1]) {
memprintf(&errmsg, ": not enough parameters\n");
goto err;
}
@ -3092,16 +2755,13 @@ static int cli_acme_chall_ready_parse(char **args, char *payload, struct appctx
const char *crt;
const char *dns;
struct acme_ctx *ctx = NULL;
struct acme_auth *auth = NULL;
struct acme_auth *auth;
int found = 0;
int remain = 0;
struct ebmb_node *node = NULL;
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (!*args[2] || !*args[3] || !*args[4]) {
memprintf(&msg, "Not enough parameters: \"acme challenge_ready <certfile> domain <domain>\"\n");
if (!*args[2] && !*args[3] && !*args[4]) {
memprintf(&msg, ": not enough parameters\n");
goto err;
}
@ -3112,18 +2772,17 @@ static int cli_acme_chall_ready_parse(char **args, char *payload, struct appctx
node = ebst_lookup(&acme_tasks, crt);
if (node) {
ctx = ebmb_entry(node, struct acme_ctx, node);
if (ctx->cfg->cond_ready & ACME_RDY_CLI)
auth = ctx->auths;
auth = ctx->auths;
while (auth) {
if (strncmp(dns, auth->dns.ptr, auth->dns.len) == 0) {
if (!(auth->ready & ACME_RDY_CLI)) {
auth->ready |= ACME_RDY_CLI;
if (!auth->ready) {
auth->ready = 1;
found++;
} else {
memprintf(&msg, "ACME challenge for crt \"%s\" and dns \"%s\" was already READY !\n", crt, dns);
}
}
if ((auth->ready & ACME_RDY_CLI) == 0)
if (auth->ready == 0)
remain++;
auth = auth->next;
}
@ -3131,7 +2790,7 @@ static int cli_acme_chall_ready_parse(char **args, char *payload, struct appctx
HA_RWLOCK_WRUNLOCK(OTHER_LOCK, &acme_lock);
if (!found) {
if (!msg)
memprintf(&msg, "Couldn't find an ACME task using crt \"%s\" and dns \"%s\" to set as ready!\n", crt, dns);
memprintf(&msg, "Couldn't find the ACME task using crt \"%s\" and dns \"%s\" !\n", crt, dns);
goto err;
} else {
if (!remain) {
@ -3223,12 +2882,8 @@ end:
return 1;
}
static int cli_acme_parse_status(char **args, char *payload, struct appctx *appctx, void *private)
static int cli_acme_ps(char **args, char *payload, struct appctx *appctx, void *private)
{
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
return 0;
}
@ -3236,7 +2891,7 @@ static int cli_acme_parse_status(char **args, char *payload, struct appctx *appc
static struct cli_kw_list cli_kws = {{ },{
{ { "acme", "renew", NULL }, "acme renew <certfile> : renew a certificate using the ACME protocol", cli_acme_renew_parse, NULL, NULL, NULL, 0 },
{ { "acme", "status", NULL }, "acme status : show status of certificates configured with ACME", cli_acme_parse_status, cli_acme_status_io_handler, NULL, NULL, 0 },
{ { "acme", "status", NULL }, "acme status : show status of certificates configured with ACME", cli_acme_ps, cli_acme_status_io_handler, NULL, NULL, 0 },
{ { "acme", "challenge_ready", NULL }, "acme challenge_ready <certfile> domain <domain> : notify HAProxy that the ACME challenge is ready", cli_acme_chall_ready_parse, NULL, NULL, NULL, 0 },
{ { NULL }, NULL, NULL, NULL }
}};

View File

@ -1,166 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Implements the DNS resolution pre-check for dns-01
*/
#include <haproxy/openssl-compat.h>
#if defined(HAVE_ACME)
#include <stdlib.h>
#include <string.h>
#include <haproxy/acme_resolvers.h>
#include <haproxy/applet.h>
#include <haproxy/obj_type.h>
#include <haproxy/resolvers.h>
#include <haproxy/tools.h>
/* success callback, copy the TXT string to rslv->txt */
static int acme_rslv_success_cb(struct resolv_requester *req, struct dns_counters *counters)
{
struct acme_rslv *rslv = objt_acme_rslv(req->owner);
struct resolv_resolution *res;
struct eb32_node *eb32;
struct resolv_answer_item *item;
if (!rslv)
return 1;
rslv->result = RSLV_STATUS_INVALID;
res = req->resolution;
if (!res)
goto done;
/* XXX: must fail on multiple TXT entries for the same dn */
/* copy the data from the response tree */
for (eb32 = eb32_first(&res->response.answer_tree); eb32 != NULL; eb32 = eb32_next(eb32)) {
item = eb32_entry(eb32, typeof(*item), link);
/* only handle 1 entry */
if (item->type == DNS_RTYPE_TXT) {
int len = item->data_len;
if (len > DNS_MAX_NAME_SIZE)
len = DNS_MAX_NAME_SIZE;
rslv->txt = istdup(ist2(item->data.target, len));
break;
}
}
rslv->result = RSLV_STATUS_VALID;
done:
/* if there's no other DNS task for this acme task, wake up acme_task */
if (HA_ATOMIC_SUB_FETCH(rslv->dnstasks, 1) == 0) {
if (rslv->acme_task)
task_wakeup(rslv->acme_task, TASK_WOKEN_MSG);
}
return 1;
}
/* error callback, set the error code to rslv->result */
static int acme_rslv_error_cb(struct resolv_requester *req, int error_code)
{
struct acme_rslv *rslv = objt_acme_rslv(req->owner);
if (!rslv)
return 0;
rslv->result = error_code;
if (HA_ATOMIC_SUB_FETCH(rslv->dnstasks, 1) == 0) {
if (rslv->acme_task)
task_wakeup(rslv->acme_task, TASK_WOKEN_MSG);
}
return 0;
}
/* unlink from the resolver and free the acme_rslv */
void acme_rslv_free(struct acme_rslv *rslv)
{
if (!rslv)
return;
if (rslv->requester)
resolv_unlink_resolution(rslv->requester);
free(rslv->hostname_dn);
istfree(&rslv->txt);
free(rslv);
}
struct acme_rslv *acme_rslv_start(struct acme_auth *auth, unsigned int *dnstasks, char **errmsg)
{
struct acme_rslv *rslv = NULL;
struct resolvers *resolvers;
char hostname[DNS_MAX_NAME_SIZE + 1];
char dn[DNS_MAX_NAME_SIZE + 1];
int hostname_len;
int dn_len;
/* XXX: allow to change the resolvers section to use */
resolvers = find_resolvers_by_id("default");
if (!resolvers) {
memprintf(errmsg, "couldn't find the \"default\" resolvers section!\n");
goto error;
}
/* dns-01 TXT record lives at _acme-challenge.<domain> */
hostname_len = snprintf(hostname, sizeof(hostname), "_acme-challenge.%.*s",
(int)auth->dns.len, auth->dns.ptr);
if (hostname_len < 0 || hostname_len >= (int)sizeof(hostname)) {
memprintf(errmsg, "hostname \"_acme-challenge.%.*s\" too long!\n", (int)auth->dns.len, auth->dns.ptr);
goto error;
}
dn_len = resolv_str_to_dn_label(hostname, hostname_len, dn, sizeof(dn));
if (dn_len <= 0) {
memprintf(errmsg, "couldn't convert hostname \"_acme-challenge.%.*s\" into dn label\n", (int)auth->dns.len, auth->dns.ptr);
goto error;
}
rslv = calloc(1, sizeof(*rslv));
if (!rslv) {
memprintf(errmsg, "Could not allocate memory\n");
goto error;
}
rslv->obj_type = OBJ_TYPE_ACME_RSLV;
rslv->resolvers = resolvers;
rslv->hostname_dn = strdup(dn);
rslv->hostname_dn_len = dn_len;
rslv->result = RSLV_STATUS_NONE;
rslv->success_cb = acme_rslv_success_cb;
rslv->error_cb = acme_rslv_error_cb;
rslv->dnstasks = dnstasks;
if (!rslv->hostname_dn) {
memprintf(errmsg, "Could not allocate memory\n");
goto error;
}
if (resolv_link_resolution(rslv, OBJ_TYPE_ACME_RSLV, 0) < 0) {
memprintf(errmsg, "Could not create resolution task for \"%.*s\"\n", hostname_len, hostname);
goto error;
}
resolv_trigger_resolution(rslv->requester);
return rslv;
error:
if (rslv)
free(rslv->hostname_dn);
free(rslv);
return NULL;
}
#endif /* HAVE_ACME */
/*
* Local variables:
* c-indent-level: 8
* c-basic-offset: 8
* End:
*/

View File

@ -511,7 +511,7 @@ size_t appctx_htx_rcv_buf(struct appctx *appctx, struct buffer *buf, size_t coun
goto out;
}
htx_xfer(buf_htx, appctx_htx, count, HTX_XFER_DEFAULT);
htx_xfer_blks(buf_htx, appctx_htx, count, HTX_BLK_UNUSED);
buf_htx->flags |= (appctx_htx->flags & (HTX_FL_PARSING_ERROR|HTX_FL_PROCESSING_ERROR));
if (htx_is_empty(appctx_htx)) {
buf_htx->flags |= (appctx_htx->flags & HTX_FL_EOM);
@ -608,7 +608,7 @@ size_t appctx_htx_snd_buf(struct appctx *appctx, struct buffer *buf, size_t coun
goto end;
}
htx_xfer(appctx_htx, buf_htx, count, HTX_XFER_DEFAULT);
htx_xfer_blks(appctx_htx, buf_htx, count, HTX_BLK_UNUSED);
if (htx_is_empty(buf_htx)) {
appctx_htx->flags |= (buf_htx->flags & HTX_FL_EOM);
}

View File

@ -2067,7 +2067,7 @@ int connect_server(struct stream *s)
* available.
*
* This check must be performed before conn_prepare()
* to ensure consistency across the whole stack, in
* to ensure consistency accross the whole stack, in
* particular for QUIC between quic-conn and mux layer.
*/
if (IS_HTX_STRM(s) && srv->use_ssl &&
@ -2124,11 +2124,6 @@ int connect_server(struct stream *s)
srv_conn->flags |= CO_FL_SOCKS4;
}
if (srv && srv->mux_proto && isteq(srv->mux_proto->token, ist("qmux"))) {
srv_conn->flags |= (CO_FL_QSTRM_RECV|CO_FL_QSTRM_SEND);
may_start_mux_now = 0;
}
#if defined(USE_OPENSSL) && defined(TLSEXT_TYPE_application_layer_protocol_negotiation)
/* if websocket stream, try to update connection ALPN. */
if (unlikely(s->flags & SF_WEBSOCKET) &&
@ -3068,7 +3063,7 @@ int be_downtime(struct proxy *px) {
/* Checks if <px> backend supports the addition of servers at runtime. Either a
* backend or a defaults proxy are supported. If proxy is incompatible, <msg>
* will be allocated to contain a textual explanation.
* will be allocated to contain a textual explaination.
*/
int be_supports_dynamic_srv(struct proxy *px, char **msg)
{

View File

@ -232,7 +232,7 @@ int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err)
const char *p;
ret = 0; // assume feature not found
for (p = build_features; *p && (p = strstr(p, term->args[0].data.str.area)); p++) {
for (p = build_features; (p = strstr(p, term->args[0].data.str.area)); p++) {
if (p > build_features &&
(p[term->args[0].data.str.data] == ' ' ||
p[term->args[0].data.str.data] == 0)) {
@ -272,10 +272,8 @@ int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err)
case CFG_PRED_OSSL_VERSION_ATLEAST: { // checks if the current openssl version is at least this one
int opensslret = openssl_compare_current_version(term->args[0].data.str.area);
if (opensslret < -1) { /* can't parse the string or no openssl available */
memprintf(err, "invalid argument to conditional expression predicate '%s': '%s'", term->pred->word, term->args[0].data.str.area);
if (opensslret < -1) /* can't parse the string or no openssl available */
ret = -1;
}
else
ret = opensslret <= 0;
break;
@ -283,10 +281,8 @@ int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err)
case CFG_PRED_OSSL_VERSION_BEFORE: { // checks if the current openssl version is older than this one
int opensslret = openssl_compare_current_version(term->args[0].data.str.area);
if (opensslret < -1) { /* can't parse the string or no openssl available */
memprintf(err, "invalid argument to conditional expression predicate '%s': '%s'", term->pred->word, term->args[0].data.str.area);
if (opensslret < -1) /* can't parse the string or no openssl available */
ret = -1;
}
else
ret = opensslret > 0;
break;
@ -294,10 +290,8 @@ int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err)
case CFG_PRED_AWSLC_API_ATLEAST: { // checks if the current AWSLC API is at least this one
int awslcret = awslc_compare_current_api(term->args[0].data.str.area);
if (awslcret < -1) { /* can't parse the string or no AWS-LC available */
memprintf(err, "invalid argument to conditional expression predicate '%s': '%s'", term->pred->word, term->args[0].data.str.area);
if (awslcret < -1) /* can't parse the string or no AWS-LC available */
ret = -1;
}
else
ret = awslcret <= 0;
break;
@ -305,10 +299,8 @@ int cfg_eval_cond_term(const struct cfg_cond_term *term, char **err)
case CFG_PRED_AWSLC_API_BEFORE: { // checks if the current AWSLC API is older than this one
int awslcret = awslc_compare_current_api(term->args[0].data.str.area);
if (awslcret < -1) { /* can't parse the string or no AWS-LC available */
memprintf(err, "invalid argument to conditional expression predicate '%s': '%s'", term->pred->word, term->args[0].data.str.area);
if (awslcret < -1) /* can't parse the string or no AWS-LC available */
ret = -1;
}
else
ret = awslcret > 0;
break;
@ -572,8 +564,6 @@ int cfg_eval_condition(char **args, char **err, const char **errptr)
}
ret = cfg_eval_cond_expr(expr, err);
if (ret < 0)
goto fail;
goto done;
}

View File

@ -71,7 +71,7 @@ static void srv_diag_cookies(int *ret, struct server *srv, struct eb_root *cooki
static void srv_diag_check_reuse(int *ret, struct server *srv, struct proxy *px)
{
if (srv->do_check && srv->check.reuse_pool) {
if (px->tcpcheck.rs && (px->tcpcheck.rs->flags & TCPCHK_RULES_PROTO_CHK) != TCPCHK_RULES_HTTP_CHK) {
if ((px->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) != TCPCHK_RULES_HTTP_CHK) {
diag_warning(ret, "parsing [%s:%d] : 'server %s': check-reuse-pool is ineffective for non http-check rulesets.\n",
srv->conf.file, srv->conf.line, srv->id);
}

View File

@ -1358,15 +1358,14 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
goto out;
}
if (warnif_misplaced_http_req(curproxy, file, linenum, args[0], NULL))
err_code |= ERR_WARN;
err_code |= warnif_misplaced_http_req(curproxy, file, linenum, args[0], NULL);
if (curproxy->cap & PR_CAP_FE)
where |= SMP_VAL_FE_HRQ_HDR;
if (curproxy->cap & PR_CAP_BE)
where |= SMP_VAL_BE_HRQ_HDR;
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
LIST_APPEND(&curproxy->http_req_rules, &rule->list);
@ -1401,7 +1400,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
if (curproxy->cap & PR_CAP_BE)
where |= SMP_VAL_BE_HRS_HDR;
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
LIST_APPEND(&curproxy->http_res_rules, &rule->list);
@ -1435,7 +1434,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
if (curproxy->cap & PR_CAP_BE)
where |= SMP_VAL_BE_HRS_HDR;
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
LIST_APPEND(&curproxy->http_after_res_rules, &rule->list);
@ -1492,15 +1491,14 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
}
LIST_APPEND(&curproxy->redirect_rules, &rule->list);
if (warnif_misplaced_redirect(curproxy, file, linenum, args[0], NULL))
err_code |= ERR_WARN;
err_code |= warnif_misplaced_redirect(curproxy, file, linenum, args[0], NULL);
if (curproxy->cap & PR_CAP_FE)
where |= SMP_VAL_FE_HRQ_HDR;
if (curproxy->cap & PR_CAP_BE)
where |= SMP_VAL_BE_HRQ_HDR;
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
}
else if (strcmp(args[0], "use_backend") == 0) {
@ -1530,7 +1528,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
}
err_code |= warnif_cond_conflicts(cond, SMP_VAL_FE_SET_BCK, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
}
else if (*args[2]) {
@ -1593,7 +1591,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
}
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
rule = calloc(1, sizeof(*rule));
@ -1648,7 +1646,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
* where force-persist is applied.
*/
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_REQ_CNT, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
rule = calloc(1, sizeof(*rule));
@ -1816,7 +1814,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_STO_RUL, &errmsg);
else
err_code |= warnif_cond_conflicts(cond, SMP_VAL_BE_SET_SRV, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
rule = calloc(1, sizeof(*rule));
@ -1874,7 +1872,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
if (curproxy->cap & PR_CAP_BE)
where |= SMP_VAL_BE_HRQ_HDR;
err_code |= warnif_cond_conflicts(cond, where, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
rule = calloc(1, sizeof(*rule));
@ -1954,7 +1952,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
if (curproxy->cap & PR_CAP_BE)
where |= SMP_VAL_BE_HRQ_HDR;
err_code |= warnif_cond_conflicts(rule->cond, where, &errmsg);
if (errmsg && *errmsg)
if (err_code)
ha_warning("parsing [%s:%d] : '%s.\n'", file, linenum, errmsg);
LIST_APPEND(&curproxy->uri_auth->http_req_rules, &rule->list);
@ -1976,10 +1974,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
if (!stats_check_init_uri_auth(&curproxy->uri_auth))
goto alloc_error;
} else if (strcmp(args[1], "hide-version") == 0) {
if (curproxy->uri_auth)
curproxy->uri_auth->flags &= ~STAT_F_SHOWVER;
} else if (strcmp(args[1], "show-version") == 0) {
if (!stats_set_flag(&curproxy->uri_auth, STAT_F_SHOWVER))
if (!stats_set_flag(&curproxy->uri_auth, STAT_F_HIDEVER))
goto alloc_error;
} else if (strcmp(args[1], "show-legends") == 0) {
if (!stats_set_flag(&curproxy->uri_auth, STAT_F_SHLGNDS))
@ -2046,7 +2041,7 @@ int cfg_parse_listen(const char *file, int linenum, char **args, int kwm)
}
} else {
stats_error_parsing:
ha_alert("parsing [%s:%d]: %s '%s', expects 'admin', 'uri', 'realm', 'auth', 'scope', 'enable', 'hide-version', 'show-node', 'show-desc' , 'show-legends' or 'show-version'.\n",
ha_alert("parsing [%s:%d]: %s '%s', expects 'admin', 'uri', 'realm', 'auth', 'scope', 'enable', 'hide-version', 'show-node', 'show-desc' or 'show-legends'.\n",
file, linenum, *args[1]?"unknown stats parameter":"missing keyword in", args[*args[1]?1:0]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
@ -2205,42 +2200,6 @@ stats_error_parsing:
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
else if (strcmp(args[1], "use-small-buffers") == 0) {
unsigned int flags = PR_O2_USE_SBUF_ALL;
if (warnifnotcap(curproxy, PR_CAP_BE, file, linenum, args[1], NULL)) {
err_code |= ERR_WARN;
goto out;
}
if (*(args[2])) {
int cur_arg;
flags = 0;
for (cur_arg = 2; *(args[cur_arg]); cur_arg++) {
if (strcmp(args[cur_arg], "queue") == 0)
flags |= PR_O2_USE_SBUF_QUEUE;
else if (strcmp(args[cur_arg], "l7-retries") == 0)
flags |= PR_O2_USE_SBUF_L7_RETRY;
else if (strcmp(args[cur_arg], "check") == 0)
flags |= PR_O2_USE_SBUF_CHECK;
else {
ha_alert("parsing [%s:%d] : invalid parameter '%s'. option '%s' expects 'queue', 'l7-retries' or 'check' value.\n",
file, linenum, args[cur_arg], args[1]);
err_code |= ERR_ALERT | ERR_FATAL;
goto out;
}
}
}
if (kwm == KWM_STD) {
curproxy->options2 &= ~PR_O2_USE_SBUF_ALL;
curproxy->options2 |= flags;
}
else if (kwm == KWM_NO) {
curproxy->options2 &= ~flags;
}
goto out;
}
if (kwm != KWM_STD) {
ha_alert("parsing [%s:%d]: negation/default is not supported for option '%s'.\n",
@ -2598,8 +2557,7 @@ stats_error_parsing:
goto out;
}
if (warnif_misplaced_monitor(curproxy, file, linenum, args[0], args[1]))
err_code |= ERR_WARN;
err_code |= warnif_misplaced_monitor(curproxy, file, linenum, args[0], args[1]);
if ((cond = build_acl_cond(file, linenum, &curproxy->acl, curproxy, (const char **)args + 2, &errmsg)) == NULL) {
ha_alert("parsing [%s:%d] : error detected while parsing a '%s %s' condition : %s.\n",
file, linenum, args[0], args[1], errmsg);

View File

@ -521,7 +521,7 @@ static int ssl_parse_global_keylog(char **args, int section_type, struct proxy *
}
#endif
/* Allow to explicitly disable certificate compression when set to "off" */
/* Allow to explicitely disable certificate compression when set to "off" */
#ifdef SSL_OP_NO_RX_CERTIFICATE_COMPRESSION
static int ssl_parse_certificate_compression(char **args, int section_type, struct proxy *curpx,
const struct proxy *defpx, const char *file, int line,

View File

@ -63,7 +63,6 @@
#include <haproxy/global.h>
#include <haproxy/http_ana.h>
#include <haproxy/http_rules.h>
#include <haproxy/http_htx.h>
#include <haproxy/lb_chash.h>
#include <haproxy/lb_fas.h>
#include <haproxy/lb_fwlc.h>
@ -2319,18 +2318,6 @@ int check_config_validity()
"Please fix either value to remove this warning.\n",
global.tune.bufsize_large, global.tune.bufsize);
global.tune.bufsize_large = 0;
err_code |= ERR_WARN;
}
}
if (global.tune.bufsize_small > 0) {
if (global.tune.bufsize_small == global.tune.bufsize)
global.tune.bufsize_small = 0;
else if (global.tune.bufsize_small > global.tune.bufsize) {
ha_warning("invalid small buffer size %d bytes which is greater to default bufsize %d bytes.\n",
global.tune.bufsize_small, global.tune.bufsize);
global.tune.bufsize_small = 0;
err_code |= ERR_WARN;
}
}
@ -2405,8 +2392,6 @@ int check_config_validity()
else {
cfgerr += acl_find_targets(defpx);
}
err_code |= proxy_check_http_errors(defpx);
}
/* starting to initialize the main proxies list */

View File

@ -235,7 +235,19 @@ static void check_trace(enum trace_level level, uint64_t mask,
if (mask & CHK_EV_TCPCHK) {
const char *type;
type = tcpcheck_ruleset_type_to_str(check->tcpcheck->rs);
switch (check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) {
case TCPCHK_RULES_PGSQL_CHK: type = "PGSQL"; break;
case TCPCHK_RULES_REDIS_CHK: type = "REDIS"; break;
case TCPCHK_RULES_SMTP_CHK: type = "SMTP"; break;
case TCPCHK_RULES_HTTP_CHK: type = "HTTP"; break;
case TCPCHK_RULES_MYSQL_CHK: type = "MYSQL"; break;
case TCPCHK_RULES_LDAP_CHK: type = "LDAP"; break;
case TCPCHK_RULES_SSL3_CHK: type = "SSL3"; break;
case TCPCHK_RULES_AGENT_CHK: type = "AGENT"; break;
case TCPCHK_RULES_SPOP_CHK: type = "SPOP"; break;
case TCPCHK_RULES_TCP_CHK: type = "TCP"; break;
default: type = "???"; break;
}
if (check->current_step)
chunk_appendf(&trace_buf, " - tcp-check=(%s,%d)", type, tcpcheck_get_step_id(check, NULL));
else
@ -259,7 +271,7 @@ static void check_trace(enum trace_level level, uint64_t mask,
buf = (b_is_null(&check->bo) ? NULL : &check->bo);
if (buf) {
if ((check->tcpcheck->rs->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK) {
if ((check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_HTTP_CHK) {
int full = (src->verbosity == CHK_VERB_COMPLETE);
chunk_memcat(&trace_buf, "\n\t", 2);
@ -820,7 +832,7 @@ void chk_report_conn_err(struct check *check, int errno_bck, int expired)
chk = get_trash_chunk();
if (check->type == PR_O2_TCPCHK_CHK &&
(check->tcpcheck->rs->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_TCP_CHK) {
(check->tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK) == TCPCHK_RULES_TCP_CHK) {
step = tcpcheck_get_step_id(check, NULL);
if (!step) {
TRACE_DEVEL("initial connection failure", CHK_EV_HCHK_END|CHK_EV_HCHK_ERR, check);
@ -1305,7 +1317,7 @@ struct task *process_chk_conn(struct task *t, void *context, unsigned int state)
check->current_step = NULL;
check->sc = sc_new_from_check(check);
check->sc = sc_new_from_check(check, SC_FL_NONE);
if (!check->sc) {
set_server_check_status(check, HCHK_STATUS_SOCKERR, NULL);
goto end;
@ -1503,15 +1515,13 @@ int check_buf_available(void *target)
/*
* Allocate a buffer. If it fails, it adds the check in buffer wait queue.
*/
struct buffer *check_get_buf(struct check *check, struct buffer *bptr, unsigned int small_buffer)
struct buffer *check_get_buf(struct check *check, struct buffer *bptr)
{
struct buffer *buf = NULL;
if (small_buffer == 0 || (buf = b_alloc_small(bptr)) == NULL) {
if (likely(!LIST_INLIST(&check->buf_wait.list)) &&
unlikely((buf = b_alloc(bptr, DB_CHANNEL)) == NULL)) {
b_queue(DB_CHANNEL, &check->buf_wait, check, check_buf_available);
}
if (likely(!LIST_INLIST(&check->buf_wait.list)) &&
unlikely((buf = b_alloc(bptr, DB_CHANNEL)) == NULL)) {
b_queue(DB_CHANNEL, &check->buf_wait, check, check_buf_available);
}
return buf;
}
@ -1523,11 +1533,8 @@ struct buffer *check_get_buf(struct check *check, struct buffer *bptr, unsigned
void check_release_buf(struct check *check, struct buffer *bptr)
{
if (bptr->size) {
int defbuf = b_is_default(bptr);
b_free(bptr);
if (defbuf)
offer_buffers(check->buf_wait.target, 1);
offer_buffers(check->buf_wait.target, 1);
}
}
@ -1552,10 +1559,9 @@ void free_check(struct check *check)
* done for health-check : the proxy is the owner of the rules / vars
* in this case.
*/
if (check->state & CHK_ST_AGENT || check->tcpcheck->healthcheck) {
free_tcpcheck_vars(&check->tcpcheck->preset_vars);
ha_free(&check->tcpcheck->healthcheck);
ha_free(&check->tcpcheck);
if (check->state & CHK_ST_AGENT) {
free_tcpcheck_vars(&check->tcpcheck_rules->preset_vars);
ha_free(&check->tcpcheck_rules);
}
ha_free(&check->pool_conn_name);
@ -1648,6 +1654,7 @@ int start_check_task(struct check *check, int mininter,
*/
static int start_checks()
{
struct proxy *px;
struct server *s;
char *errmsg = NULL;
@ -1674,10 +1681,6 @@ static int start_checks()
*/
for (px = proxies_list; px; px = px->next) {
for (s = px->srv; s; s = s->next) {
if ((px->options2 & PR_O2_USE_SBUF_CHECK) &&
(s->check.tcpcheck->rs->flags & TCPCHK_RULES_MAY_USE_SBUF))
s->check.state |= CHK_ST_USE_SMALL_BUFF;
if (s->check.state & CHK_ST_CONFIGURED) {
nbcheck++;
if ((srv_getinter(&s->check) >= SRV_CHK_INTER_THRES) &&
@ -1792,7 +1795,7 @@ int init_srv_check(struct server *srv)
if (!srv->do_check || !(srv->proxy->cap & PR_CAP_BE))
goto out;
check_type = srv->check.tcpcheck->rs->flags & TCPCHK_RULES_PROTO_CHK;
check_type = srv->check.tcpcheck_rules->flags & TCPCHK_RULES_PROTO_CHK;
if (!(srv->flags & SRV_F_DYNAMIC)) {
/* If neither a port nor an addr was specified and no check
@ -1802,15 +1805,7 @@ int init_srv_check(struct server *srv)
* specified.
*/
if (!srv->check.port && !is_addr(&srv->check.addr)) {
/*
* If any setting is set for the check, then we can't
* assume we'll use the same XPRT as the server, the
* server may be QUIC, but we want a TCP check.
*/
if (!srv->check.use_ssl && srv->use_ssl != -1 &&
!srv->check.via_socks4 && !srv->check.send_proxy &&
(!srv->check.alpn_len || (srv->check.alpn_len == srv->ssl_ctx.alpn_len && !strncmp(srv->check.alpn_str, srv->ssl_ctx.alpn_str, srv->check.alpn_len))) &&
(!srv->check.mux_proto || srv->check.mux_proto != srv->mux_proto))
if (!srv->check.use_ssl && srv->use_ssl != -1)
srv->check.xprt = srv->xprt;
else if (srv->check.use_ssl == 1)
srv->check.xprt = xprt_get(XPRT_SSL);
@ -1886,7 +1881,7 @@ int init_srv_check(struct server *srv)
(!is_inet_addr(&srv->check.addr) && (is_addr(&srv->check.addr) || !is_inet_addr(&srv->addr))))
goto init;
if (LIST_ISEMPTY(&srv->check.tcpcheck->rs->rules)) {
if (!srv->proxy->tcpcheck_rules.list || LIST_ISEMPTY(srv->proxy->tcpcheck_rules.list)) {
ha_alert("config: %s '%s': server '%s' has neither service port nor check port.\n",
proxy_type_str(srv->proxy), srv->proxy->id, srv->id);
ret |= ERR_ALERT | ERR_ABORT;
@ -1894,7 +1889,7 @@ int init_srv_check(struct server *srv)
}
/* search the first action (connect / send / expect) in the list */
r = get_first_tcpcheck_rule(srv->check.tcpcheck->rs);
r = get_first_tcpcheck_rule(&srv->proxy->tcpcheck_rules);
if (!r || (r->action != TCPCHK_ACT_CONNECT) || (!r->connect.port && !get_host_port(&r->connect.addr))) {
ha_alert("config: %s '%s': server '%s' has neither service port nor check port "
"nor tcp_check rule 'connect' with port information.\n",
@ -1904,7 +1899,7 @@ int init_srv_check(struct server *srv)
}
/* scan the tcp-check ruleset to ensure a port has been configured */
list_for_each_entry(r, &srv->check.tcpcheck->rs->rules, list) {
list_for_each_entry(r, srv->proxy->tcpcheck_rules.list, list) {
if ((r->action == TCPCHK_ACT_CONNECT) && (!r->connect.port && !get_host_port(&r->connect.addr))) {
ha_alert("config: %s '%s': server '%s' has neither service port nor check port, "
"and a tcp_check rule 'connect' with no port information.\n",
@ -1951,7 +1946,7 @@ int init_srv_agent_check(struct server *srv)
/* If there is no connect rule preceding all send / expect rules, an
* implicit one is inserted before all others.
*/
chk = get_first_tcpcheck_rule(srv->agent.tcpcheck->rs);
chk = get_first_tcpcheck_rule(srv->agent.tcpcheck_rules);
if (!chk || chk->action != TCPCHK_ACT_CONNECT) {
chk = calloc(1, sizeof(*chk));
if (!chk) {
@ -1963,14 +1958,14 @@ int init_srv_agent_check(struct server *srv)
}
chk->action = TCPCHK_ACT_CONNECT;
chk->connect.options = (TCPCHK_OPT_DEFAULT_CONNECT|TCPCHK_OPT_IMPLICIT);
LIST_INSERT(&srv->agent.tcpcheck->rs->rules, &chk->list);
LIST_INSERT(srv->agent.tcpcheck_rules->list, &chk->list);
}
/* <chk> is always defined here and it is a CONNECT action. If there is
* a preset variable, it means there is an agent string defined and data
* will be sent after the connect.
*/
if (!LIST_ISEMPTY(&srv->agent.tcpcheck->preset_vars))
if (!LIST_ISEMPTY(&srv->agent.tcpcheck_rules->preset_vars))
chk->connect.options |= TCPCHK_OPT_HAS_DATA;
@ -2061,7 +2056,6 @@ static int srv_parse_addr(char **args, int *cur_arg, struct proxy *curpx, struct
char **errmsg)
{
struct sockaddr_storage *sk;
struct protocol *proto;
int port1, port2, err_code = 0;
@ -2070,7 +2064,7 @@ static int srv_parse_addr(char **args, int *cur_arg, struct proxy *curpx, struct
goto error;
}
sk = str2sa_range(args[*cur_arg+1], NULL, &port1, &port2, NULL, &proto, NULL, errmsg, NULL, NULL, NULL,
sk = str2sa_range(args[*cur_arg+1], NULL, &port1, &port2, NULL, NULL, NULL, errmsg, NULL, NULL, NULL,
PA_O_RESOLVE | PA_O_PORT_OK | PA_O_STREAM | PA_O_CONNECT);
if (!sk) {
memprintf(errmsg, "'%s' : %s", args[*cur_arg], *errmsg);
@ -2078,7 +2072,6 @@ static int srv_parse_addr(char **args, int *cur_arg, struct proxy *curpx, struct
}
srv->check.addr = *sk;
srv->check.proto = proto;
/* if agentaddr was never set, we can use addr */
if (!(srv->flags & SRV_F_AGENTADDR))
srv->agent.addr = *sk;
@ -2108,11 +2101,7 @@ static int srv_parse_agent_addr(char **args, int *cur_arg, struct proxy *curpx,
goto error;
}
set_srv_agent_addr(srv, &sk);
/* Agent currently only uses TCP */
if (sk.ss_family == AF_INET)
srv->agent.proto = &proto_tcpv4;
else
srv->agent.proto = &proto_tcpv6;
out:
return err_code;
@ -2126,7 +2115,7 @@ static int srv_parse_agent_check(char **args, int *cur_arg, struct proxy *curpx,
char **errmsg)
{
struct tcpcheck_ruleset *rs = NULL;
struct tcpcheck *tc = srv->agent.tcpcheck;
struct tcpcheck_rules *rules = srv->agent.tcpcheck_rules;
struct tcpcheck_rule *chk;
int err_code = 0;
@ -2139,15 +2128,17 @@ static int srv_parse_agent_check(char **args, int *cur_arg, struct proxy *curpx,
return ERR_WARN;
}
if (!tc) {
tc = calloc(1, sizeof(*tc));
if (!tc) {
if (!rules) {
rules = calloc(1, sizeof(*rules));
if (!rules) {
memprintf(errmsg, "out of memory.");
goto error;
}
LIST_INIT(&tc->preset_vars);
srv->agent.tcpcheck = tc;
LIST_INIT(&rules->preset_vars);
srv->agent.tcpcheck_rules = rules;
}
rules->list = NULL;
rules->flags = 0;
rs = find_tcpcheck_ruleset("*agent-check");
if (rs)
@ -2180,9 +2171,9 @@ static int srv_parse_agent_check(char **args, int *cur_arg, struct proxy *curpx,
LIST_APPEND(&rs->rules, &chk->list);
ruleset_found:
tc->rs = rs;
tc->flags &= ~TCPCHK_FL_UNUSED_RS;
rs->flags |= TCPCHK_RULES_AGENT_CHK;
rules->list = &rs->rules;
rules->flags &= ~(TCPCHK_RULES_PROTO_CHK|TCPCHK_RULES_UNUSED_RS);
rules->flags |= TCPCHK_RULES_AGENT_CHK;
srv->do_agent = 1;
out:
@ -2275,7 +2266,7 @@ static int srv_parse_agent_port(char **args, int *cur_arg, struct proxy *curpx,
int set_srv_agent_send(struct server *srv, const char *send)
{
struct tcpcheck *tc = srv->agent.tcpcheck;
struct tcpcheck_rules *rules = srv->agent.tcpcheck_rules;
struct tcpcheck_var *var = NULL;
char *str;
@ -2284,13 +2275,13 @@ int set_srv_agent_send(struct server *srv, const char *send)
if (str == NULL || var == NULL)
goto error;
free_tcpcheck_vars(&tc->preset_vars);
free_tcpcheck_vars(&rules->preset_vars);
var->data.type = SMP_T_STR;
var->data.u.str.area = str;
var->data.u.str.data = strlen(str);
LIST_INIT(&var->list);
LIST_APPEND(&tc->preset_vars, &var->list);
LIST_APPEND(&rules->preset_vars, &var->list);
return 1;
@ -2304,7 +2295,7 @@ int set_srv_agent_send(struct server *srv, const char *send)
static int srv_parse_agent_send(char **args, int *cur_arg, struct proxy *curpx, struct server *srv,
char **errmsg)
{
struct tcpcheck *tc = srv->agent.tcpcheck;
struct tcpcheck_rules *rules = srv->agent.tcpcheck_rules;
int err_code = 0;
if (!*(args[*cur_arg+1])) {
@ -2312,14 +2303,14 @@ static int srv_parse_agent_send(char **args, int *cur_arg, struct proxy *curpx,
goto error;
}
if (!tc) {
tc = calloc(1, sizeof(*tc));
if (!tc) {
if (!rules) {
rules = calloc(1, sizeof(*rules));
if (!rules) {
memprintf(errmsg, "out of memory.");
goto error;
}
LIST_INIT(&tc->preset_vars);
srv->agent.tcpcheck = tc;
LIST_INIT(&rules->preset_vars);
srv->agent.tcpcheck_rules = rules;
}
if (!set_srv_agent_send(srv, args[*cur_arg+1])) {

View File

@ -53,22 +53,6 @@ struct pool_head *pool_head_large_trash __read_mostly = NULL;
/* this is used to drain data, and as a temporary large buffer */
THREAD_LOCAL struct buffer trash_large = { };
/* small trash chunks used for various conversions */
static THREAD_LOCAL struct buffer *small_trash_chunk;
static THREAD_LOCAL struct buffer small_trash_chunk1;
static THREAD_LOCAL struct buffer small_trash_chunk2;
/* small trash buffers used for various conversions */
static int small_trash_size __read_mostly = 0;
static THREAD_LOCAL char *small_trash_buf1 = NULL;
static THREAD_LOCAL char *small_trash_buf2 = NULL;
/* the trash pool for reentrant allocations */
struct pool_head *pool_head_small_trash __read_mostly = NULL;
/* this is used to drain data, and as a temporary small buffer */
THREAD_LOCAL struct buffer trash_small = { };
/*
* Returns a pre-allocated and initialized trash chunk that can be used for any
* type of conversion. Two chunks and their respective buffers are alternatively
@ -96,7 +80,7 @@ struct buffer *get_trash_chunk(void)
}
/* Similar to get_trash_chunk() but return a pre-allocated large chunk
* instead. Because large buffers are not enabled by default, this function may
* instead. Becasuse large buffers are not enabled by default, this function may
* return NULL.
*/
struct buffer *get_large_trash_chunk(void)
@ -119,40 +103,14 @@ struct buffer *get_large_trash_chunk(void)
return large_trash_chunk;
}
/* Similar to get_trash_chunk() but return a pre-allocated small chunk
* instead. Because small buffers are not enabled by default, this function may
* return NULL.
*/
struct buffer *get_small_trash_chunk(void)
{
char *small_trash_buf;
if (!small_trash_size)
return NULL;
if (small_trash_chunk == &small_trash_chunk1) {
small_trash_chunk = &small_trash_chunk2;
small_trash_buf = small_trash_buf2;
}
else {
small_trash_chunk = &small_trash_chunk1;
small_trash_buf = small_trash_buf1;
}
*small_trash_buf = 0;
chunk_init(small_trash_chunk, small_trash_buf, small_trash_size);
return small_trash_chunk;
}
/* Returns a trash chunk accordingly to the requested size. This function may
* fail if the requested size is too big or if the large chubks are not
* configured.
*/
struct buffer *get_trash_chunk_sz(size_t size)
{
if (likely(size > small_trash_size && size <= trash_size))
return get_trash_chunk();
else if (small_trash_size && size <= small_trash_size)
return get_small_trash_chunk();
if (likely(size <= trash_size))
return get_trash_chunk();
else if (large_trash_size && size <= large_trash_size)
return get_large_trash_chunk();
else
@ -164,20 +122,17 @@ struct buffer *get_trash_chunk_sz(size_t size)
*/
struct buffer *get_larger_trash_chunk(struct buffer *chk)
{
struct buffer *chunk = NULL;
struct buffer *chunk;
if (!chk || chk->size == small_trash_size) {
/* no chunk or a small one, use a regular buffer */
chunk = get_trash_chunk();
}
else if (large_trash_size && chk->size <= large_trash_size) {
/* a regular byffer, use a large buffer if possible */
chunk = get_large_trash_chunk();
}
if (!chk)
return get_trash_chunk();
if (chk && chunk)
b_xfer(chunk, chk, b_data(chk));
/* No large buffers or current chunk is alread a large trash chunk */
if (!large_trash_size || chk->size == large_trash_size)
return NULL;
chunk = get_large_trash_chunk();
b_xfer(chunk, chk, b_data(chk));
return chunk;
}
@ -211,29 +166,9 @@ static int alloc_large_trash_buffers(int bufsize)
return trash_large.area && large_trash_buf1 && large_trash_buf2;
}
/* allocates the trash small buffers if necessary. Returns 0 in case of
* failure. Unlike alloc_trash_buffers(), It is unexpected to call this function
* multiple times. Small buffers are not used during configuration parsing.
*/
static int alloc_small_trash_buffers(int bufsize)
{
small_trash_size = bufsize;
if (!small_trash_size)
return 1;
BUG_ON(trash_small.area && small_trash_buf1 && small_trash_buf2);
chunk_init(&trash_small, my_realloc2(trash_small.area, bufsize), bufsize);
small_trash_buf1 = (char *)my_realloc2(small_trash_buf1, bufsize);
small_trash_buf2 = (char *)my_realloc2(small_trash_buf2, bufsize);
return trash_small.area && small_trash_buf1 && small_trash_buf2;
}
static int alloc_trash_buffers_per_thread()
{
return (alloc_trash_buffers(global.tune.bufsize) &&
alloc_large_trash_buffers(global.tune.bufsize_large) &&
alloc_small_trash_buffers(global.tune.bufsize_large));
return alloc_trash_buffers(global.tune.bufsize) && alloc_large_trash_buffers(global.tune.bufsize_large);
}
static void free_trash_buffers_per_thread()
@ -245,10 +180,6 @@ static void free_trash_buffers_per_thread()
chunk_destroy(&trash_large);
ha_free(&large_trash_buf2);
ha_free(&large_trash_buf1);
chunk_destroy(&trash_small);
ha_free(&small_trash_buf2);
ha_free(&small_trash_buf1);
}
/* Initialize the trash buffers. It returns 0 if an error occurred. */
@ -276,14 +207,6 @@ int init_trash_buffers(int first)
if (!pool_head_large_trash)
return 0;
}
if (!first && global.tune.bufsize_small) {
pool_head_small_trash = create_pool("small_trash",
sizeof(struct buffer) + global.tune.bufsize_small,
MEM_F_EXACT);
if (!pool_head_small_trash)
return 0;
}
return 1;
}

View File

@ -2670,7 +2670,7 @@ static int _send_status(char **args, char *payload, struct appctx *appctx, void
}
/* the sockpair between the master and the worker is
* used temporarily as a listener to receive
* used temporarly as a listener to receive
* _send_status. Once it is received we don't want to
* use this FD as a listener anymore, but only as a
* server, to allow only connections from the master to

View File

@ -182,7 +182,7 @@ int conn_notify_mux(struct connection *conn, int old_flags, int forced_wake)
* information to create one, typically from the ALPN. If we're
* done with the handshake, attempt to create one.
*/
if (unlikely(!conn->mux) && !(conn->flags & (CO_FL_WAIT_XPRT|CO_FL_QSTRM_RECV|CO_FL_QSTRM_SEND))) {
if (unlikely(!conn->mux) && !(conn->flags & CO_FL_WAIT_XPRT)) {
ret = conn_create_mux(conn, NULL);
if (ret < 0)
goto done;
@ -412,7 +412,7 @@ int conn_install_mux_chk(struct connection *conn, void *ctx, struct session *ses
struct ist mux_proto;
const char *alpn_str = NULL;
int alpn_len = 0;
int mode = tcpchk_rules_type_to_proto_mode(check->tcpcheck->flags);
int mode = tcpchk_rules_type_to_proto_mode(check->tcpcheck_rules->flags);
conn_get_alpn(conn, &alpn_str, &alpn_len);
mux_proto = ist2(alpn_str, alpn_len);

View File

@ -109,7 +109,7 @@ static int _counters_shared_prepare(struct counters_shared *shared,
shm_obj = shm_stats_file_add_object(errmsg);
if (shm_obj) {
snprintf(shm_obj->guid, sizeof(shm_obj->guid), "%s", guid_get(guid));
snprintf(shm_obj->guid, sizeof(shm_obj->guid)- 1, "%s", guid_get(guid));
if (is_be) {
shm_obj->type = SHM_STATS_FILE_OBJECT_TYPE_BE;
be_shared = (struct be_counters_shared *)shared;

View File

@ -24,7 +24,6 @@
struct pool_head *pool_head_buffer __read_mostly;
struct pool_head *pool_head_large_buffer __read_mostly = NULL;
struct pool_head *pool_head_small_buffer __read_mostly;
/* perform minimal initializations, report 0 in case of error, 1 if OK. */
int init_buffer()
@ -44,12 +43,6 @@ int init_buffer()
return 0;
}
if (global.tune.bufsize_small) {
pool_head_small_buffer = create_aligned_pool("small_buffer", global.tune.bufsize_small, 64, MEM_F_SHARED|MEM_F_EXACT);
if (!pool_head_small_buffer)
return 0;
}
/* make sure any change to the queues assignment isn't overlooked */
BUG_ON(DB_PERMANENT - DB_UNLIKELY - 1 != DYNBUF_NBQ);
BUG_ON(DB_MUX_RX_Q < DB_SE_RX_Q || DB_MUX_RX_Q >= DYNBUF_NBQ);

View File

@ -136,10 +136,6 @@ static int cli_parse_show_ech(char **args, char *payload,
{
struct show_ech_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
/* no parameter, shows only file list */
if (*args[3]) {
SSL_CTX *sctx = NULL;
@ -301,9 +297,6 @@ static int cli_parse_add_ech(char **args, char *payload, struct appctx *appctx,
OSSL_ECHSTORE *es = NULL;
BIO *es_in = NULL;
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (!*args[3] || !payload)
return cli_err(appctx, "syntax: add ssl ech <name> <PEM file content>");
if (cli_find_ech_specific_ctx(args[3], &sctx) != 1)
@ -331,9 +324,6 @@ static int cli_parse_set_ech(char **args, char *payload, struct appctx *appctx,
OSSL_ECHSTORE *es = NULL;
BIO *es_in = NULL;
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (!*args[3] || !payload)
return cli_err(appctx, "syntax: set ssl ech <name> <PEM file content>");
if (cli_find_ech_specific_ctx(args[3], &sctx) != 1)
@ -361,9 +351,6 @@ static int cli_parse_del_ech(char **args, char *payload, struct appctx *appctx,
char success_message[ECH_SUCCESS_MSG_MAX];
OSSL_ECHSTORE *es = NULL;
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (!*args[3])
return cli_err(appctx, "syntax: del ssl ech <name>");
if (*args[4])

View File

@ -249,8 +249,6 @@ parse_filter(char **args, int section_type, struct proxy *curpx,
cur_arg = 1;
kw = flt_find_kw(args[cur_arg]);
if (kw) {
/* default name is keyword name, unless overriden by parse func */
fconf->name = kw->kw;
if (!kw->parse) {
memprintf(err, "parsing [%s:%d] : '%s' : "
"'%s' option is not implemented in this version (check build options).",
@ -297,136 +295,6 @@ parse_filter(char **args, int section_type, struct proxy *curpx,
}
/*
* Parses the "filter-sequence" keyword
*/
static int
parse_filter_sequence(char **args, int section_type, struct proxy *curpx,
const struct proxy *defpx, const char *file, int line, char **err)
{
/* filter-sequence cannot be defined on a default proxy */
if (curpx == defpx) {
memprintf(err, "parsing [%s:%d] : %s is not allowed in a 'default' section.",
file, line, args[0]);
return -1;
}
if (strcmp(args[0], "filter-sequence") == 0) {
struct list *list;
char *str;
size_t cur_sep;
if (!*args[1]) {
memprintf(err,
"parsing [%s:%d] : missing argument for '%s' in %s '%s'.",
file, line, args[0], proxy_type_str(curpx), curpx->id);
goto error;
}
if (!strcmp(args[1], "request"))
list = &curpx->filter_sequence.req;
else if (!strcmp(args[1], "response"))
list = &curpx->filter_sequence.res;
else {
memprintf(err,
"parsing [%s:%d] : expected either 'request' or 'response' for '%s' in %s '%s'.",
file, line, args[0], proxy_type_str(curpx), curpx->id);
goto error;
}
if (!*args[2]) {
memprintf(err,
"parsing [%s:%d] : missing filter list for '%s' in %s '%s'.",
file, line, args[0], proxy_type_str(curpx), curpx->id);
goto error;
}
str = args[2];
while (str[0]) {
struct filter_sequence_elt *elt;
elt = calloc(1, sizeof(*elt));
if (!elt) {
memprintf(err, "'%s %s' : out of memory", args[0], args[1]);
goto error;
}
cur_sep = strcspn(str, ",");
elt->flt_name = my_strndup(str, cur_sep);
if (!elt->flt_name) {
ha_free(&elt);
goto error;
}
LIST_APPEND(list, &elt->list);
if (str[cur_sep])
str += cur_sep + 1;
else
str += cur_sep;
}
}
return 0;
error:
return -1;
}
static int compile_filter_sequence_elt(struct proxy *px, struct filter_sequence_elt *elt, char **errmsg)
{
struct flt_conf *fconf;
int ret = ERR_NONE;
list_for_each_entry(fconf, &px->filter_configs, list) {
if (!strcmp(elt->flt_name, fconf->name)) {
elt->flt_conf = fconf;
break;
}
}
if (!elt->flt_conf) {
memprintf(errmsg, "invalid filter name: '%s' is not defined on the proxy", elt->flt_name);
ret = ERR_FATAL;
}
return ret;
}
/* after config is checked, time to resolve filter-sequence (both request and response)
* used on the proxy in order to associate filter names with valid flt_conf entries
* this will help decrease filter lookup time during runtime (filter ids are compared
* using their address, not string content)
*/
static int postcheck_filter_sequence(struct proxy *px)
{
struct filter_sequence_elt *elt;
char *errmsg = NULL;
int ret = ERR_NONE;
list_for_each_entry(elt, &px->filter_sequence.req, list) {
ret = compile_filter_sequence_elt(px, elt, &errmsg);
if (ret & ERR_CODE) {
memprintf(&errmsg, "error while postparsing request filter-sequence '%s' : %s", elt->flt_name, errmsg);
goto error;
}
}
list_for_each_entry(elt, &px->filter_sequence.res, list) {
ret = compile_filter_sequence_elt(px, elt, &errmsg);
if (ret & ERR_CODE) {
memprintf(&errmsg, "error while postparsing response filter-sequence '%s' : %s", elt->flt_name, errmsg);
goto error;
}
}
return ret;
error:
ha_alert("%s: %s\n", px->id, errmsg);
ha_free(&errmsg);
return ret;
}
REGISTER_POST_PROXY_CHECK(postcheck_filter_sequence);
/*
* Calls 'init' callback for all filters attached to a proxy. This happens after
* the configuration parsing. Filters can finish to fill their config. Returns
@ -531,7 +399,6 @@ void
flt_deinit(struct proxy *proxy)
{
struct flt_conf *fconf, *back;
struct filter_sequence_elt *fsequence, *fsequenceb;
list_for_each_entry_safe(fconf, back, &proxy->filter_configs, list) {
if (fconf->ops->deinit)
@ -539,16 +406,6 @@ flt_deinit(struct proxy *proxy)
LIST_DELETE(&fconf->list);
free(fconf);
}
list_for_each_entry_safe(fsequence, fsequenceb, &proxy->filter_sequence.req, list) {
LIST_DEL_INIT(&fsequence->list);
ha_free(&fsequence->flt_name);
ha_free(&fsequence);
}
list_for_each_entry_safe(fsequence, fsequenceb, &proxy->filter_sequence.res, list) {
LIST_DEL_INIT(&fsequence->list);
ha_free(&fsequence->flt_name);
ha_free(&fsequence);
}
}
/*
@ -579,7 +436,7 @@ flt_deinit_all_per_thread()
/* Attaches a filter to a stream. Returns -1 if an error occurs, 0 otherwise. */
static int
flt_stream_add_filter(struct stream *s, struct proxy *px, struct flt_conf *fconf, unsigned int flags)
flt_stream_add_filter(struct stream *s, struct flt_conf *fconf, unsigned int flags)
{
struct filter *f;
@ -602,38 +459,18 @@ flt_stream_add_filter(struct stream *s, struct proxy *px, struct flt_conf *fconf
}
LIST_APPEND(&strm_flt(s)->filters, &f->list);
LIST_INIT(&f->req_list);
LIST_INIT(&f->res_list);
/* use filter config ordering unless filter-sequence says otherwise */
if (LIST_ISEMPTY(&px->filter_sequence.req))
LIST_APPEND(&s->req.flt.filters, &f->req_list);
if (LIST_ISEMPTY(&px->filter_sequence.res))
LIST_APPEND(&s->res.flt.filters, &f->res_list);
/* for now f->req_list == f->res_list to preserve
* historical behavior, but the ordering will change
* in the future
*/
LIST_APPEND(&s->req.flt.filters, &f->req_list);
LIST_APPEND(&s->res.flt.filters, &f->res_list);
strm_flt(s)->flags |= STRM_FLT_FL_HAS_FILTERS;
return 0;
}
static void flt_stream_organize_filters(struct stream *s, struct proxy *px)
{
struct filter_sequence_elt *fsequence;
struct filter *filter;
list_for_each_entry(fsequence, &px->filter_sequence.req, list) {
list_for_each_entry(filter, &strm_flt(s)->filters, list) {
if (filter->config == fsequence->flt_conf && !LIST_INLIST(&filter->req_list))
LIST_APPEND(&s->req.flt.filters, &filter->req_list);
}
}
list_for_each_entry(fsequence, &px->filter_sequence.res, list) {
list_for_each_entry(filter, &strm_flt(s)->filters, list) {
if (filter->config == fsequence->flt_conf && !LIST_INLIST(&filter->res_list))
LIST_APPEND(&s->res.flt.filters, &filter->res_list);
}
}
}
/*
* Called when a stream is created. It attaches all frontend filters to the
* stream. Returns -1 if an error occurs, 0 otherwise.
@ -650,10 +487,9 @@ flt_stream_init(struct stream *s)
memset(&s->res.flt, 0, sizeof(s->res.flt));
LIST_INIT(&s->res.flt.filters);
list_for_each_entry(fconf, &strm_fe(s)->filter_configs, list) {
if (flt_stream_add_filter(s, strm_fe(s), fconf, 0) < 0)
if (flt_stream_add_filter(s, fconf, 0) < 0)
return -1;
}
flt_stream_organize_filters(s, strm_fe(s));
return 0;
}
@ -767,12 +603,10 @@ flt_set_stream_backend(struct stream *s, struct proxy *be)
goto end;
list_for_each_entry(fconf, &be->filter_configs, list) {
if (flt_stream_add_filter(s, be, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
if (flt_stream_add_filter(s, fconf, FLT_FL_IS_BACKEND_FILTER) < 0)
return -1;
}
flt_stream_organize_filters(s, be);
end:
list_for_each_entry(filter, &strm_flt(s)->filters, list) {
if (FLT_OPS(filter)->stream_set_backend) {
@ -1395,7 +1229,6 @@ handle_analyzer_result(struct stream *s, struct channel *chn,
* not enabled. */
static struct cfg_kw_list cfg_kws = {ILH, {
{ CFG_LISTEN, "filter", parse_filter },
{ CFG_LISTEN, "filter-sequence", parse_filter_sequence },
{ 0, NULL, NULL },
}
};

View File

@ -1033,7 +1033,6 @@ parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
}
fconf->id = http_comp_req_flt_id;
fconf->name = "comp-req";
fconf->conf = NULL;
fconf->ops = &comp_req_ops;
@ -1049,7 +1048,6 @@ parse_http_comp_flt(char **args, int *cur_arg, struct proxy *px,
return -1;
}
fconf_res->id = http_comp_res_flt_id;
fconf_res->name = "comp-res";
fconf_res->conf = NULL;
fconf_res->ops = &comp_res_ops;

View File

@ -1114,7 +1114,7 @@ static int spoe_process_event(struct stream *s, struct spoe_context *ctx,
}
else if (ret == 0) {
if ((s->scf->flags & SC_FL_ERROR) ||
((s->scf->flags & SC_FL_EOS) && proxy_abrt_close_def(s->be, 1))) {
((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) && proxy_abrt_close_def(s->be, 1))) {
ctx->status_code = SPOE_CTX_ERR_INTERRUPT;
spoe_stop_processing(agent, ctx);
spoe_handle_processing_error(s, agent, ctx, dir);

View File

@ -1121,8 +1121,6 @@ static int read_cfg()
setenv("HAPROXY_HTTPS_LOG_FMT", default_https_log_format, 1);
setenv("HAPROXY_TCP_LOG_FMT", default_tcp_log_format, 1);
setenv("HAPROXY_TCP_CLF_LOG_FMT", clf_tcp_log_format, 1);
setenv("HAPROXY_KEYLOG_FC_LOG_FMT", keylog_format_fc, 1);
setenv("HAPROXY_KEYLOG_BC_LOG_FMT", keylog_format_bc, 1);
setenv("HAPROXY_BRANCH", PRODUCT_BRANCH, 1);
list_for_each_entry(cfg, &cfg_cfgfiles, list) {
int ret;
@ -2824,7 +2822,7 @@ void deinit(void)
* they are respectively cleaned up in sink_deinit() and deinit_log_forward()
*/
/* If named defaults were preserved, ensure <def_ref> count is reset. */
/* If named defaults were preserved, ensure <def_ref> count is resetted. */
if (!(global.tune.options & GTUNE_PURGE_DEFAULTS))
defaults_px_unref_all();

View File

@ -1,6 +1,3 @@
#define _GNU_SOURCE
#include <fcntl.h>
#include <haproxy/buf.h>
#include <haproxy/cfgparse.h>
#include <haproxy/chunk.h>
@ -8,8 +5,6 @@
#include <haproxy/hstream-t.h>
#include <haproxy/http_htx.h>
#include <haproxy/http.h>
#include <haproxy/istbuf.h>
#include <haproxy/pipe.h>
#include <haproxy/pool.h>
#include <haproxy/proxy-t.h>
#include <haproxy/sc_strm.h>
@ -29,11 +24,6 @@ DECLARE_TYPED_POOL(pool_head_hstream, "hstream", struct hstream);
#define HS_ST_HTTP_HELP 0x0010
#define HS_ST_HTTP_EXPECT 0x0020
#define HS_ST_HTTP_RESP_SL_SENT 0x0040
#define HS_ST_OPT_CHUNK_RES 0x0080 /* chunk-encoded response (?k=1) */
#define HS_ST_OPT_REQ_AFTER_RES 0x0100 /* drain the request payload after the response (?A=1) */
#define HS_ST_OPT_RANDOM_RES 0x0200 /* random response (?R=1) */
#define HS_ST_OPT_NO_CACHE 0x0400 /* non-cacheable resposne (?c=0) */
#define HS_ST_OPT_NO_SPLICING 0x0800 /* no splicing (?S=1) */
const char *HTTP_HELP =
"HAProxy's dummy HTTP server for benchmarks - version " HAPROXY_VERSION ".\n"
@ -53,28 +43,23 @@ const char *HTTP_HELP =
" E.g. /?K=1\n"
" - /?t=<time> wait <time> milliseconds before responding.\n"
" E.g. /?t=500\n"
" - /?k=<enable> Enable transfer encoding chunked with only one chunk if >0 (disable fast-forward and splicing).\n"
" - /?S=<enable> Disable use of splice() to send data if <1.\n"
" - /?k=<enable> Enable transfer encoding chunked with only one chunk if >0.\n"
" - /?R=<enable> Enable sending random data if >0.\n"
"\n"
"Note that those arguments may be cumulated on one line separated by a set of\n"
"delimiters among [&?,;/] :\n"
"delimitors among [&?,;/] :\n"
" - GET /?s=20k&c=1&t=700&K=30r HTTP/1.0\n"
" - GET /?r=500?s=0?c=0?t=1000 HTTP/1.0\n"
"\n";
char common_response[RESPSIZE];
char common_chunk_resp[RESPSIZE];
char *random_resp;
int random_resp_len = RESPSIZE;
#if defined(USE_LINUX_SPLICE)
struct pipe *master_pipe = NULL;
size_t master_pipesize = 0;
#endif
static size_t hstream_add_ff_data(struct hstream *hs, struct sedesc *sd, unsigned long long len);
static size_t hstream_add_htx_data(struct hstream *hs, struct htx *htx, unsigned long long len);
/* Size in bytes of the prebuilts response buffers */
#define RESPSIZE 16384
/* Number of bytes by body response line */
#define HS_COMMON_RESPONSE_LINE_SZ 50
static char common_response[RESPSIZE];
static char common_chunk_resp[RESPSIZE];
static char *random_resp;
static int random_resp_len = RESPSIZE;
#define TRACE_SOURCE &trace_haterm
struct trace_source trace_haterm;
@ -214,7 +199,7 @@ struct task *sc_hstream_io_cb(struct task *t, void *ctx, unsigned int state)
task_wakeup(hs->task, TASK_WOKEN_IO);
}
if (((!(hs->flags & HS_ST_OPT_REQ_AFTER_RES) || !hs->to_write) && hs->req_body) ||
if (((!hs->req_after_res || !hs->to_write) && hs->req_body) ||
!htx_is_empty(htxbuf(&hs->req))) {
TRACE_STATE("waking up task", HS_EV_HSTRM_IO_CB, hs);
task_wakeup(hs->task, TASK_WOKEN_IO);
@ -311,76 +296,6 @@ static int hstream_htx_buf_rcv(struct connection *conn, struct hstream *hs)
goto leave;
}
static int hstream_ff_snd(struct connection *conn, struct hstream *hs)
{
size_t len;
unsigned int nego_flags = NEGO_FF_FL_NONE;
struct sedesc *sd = hs->sc->sedesc;
int ret = 0;
/* First try to resume FF*/
if (se_have_ff_data(sd)) {
ret = CALL_MUX_WITH_RET(conn->mux, resume_fastfwd(hs->sc, 0));
if (ret > 0)
sd->iobuf.flags &= ~IOBUF_FL_FF_BLOCKED;
}
nego_flags |= NEGO_FF_FL_EXACT_SIZE;
#if defined(USE_LINUX_SPLICE)
if ((global.tune.options & GTUNE_USE_SPLICE) &&
!(sd->iobuf.flags & IOBUF_FL_NO_SPLICING) &&
!(hs->flags & HS_ST_OPT_NO_SPLICING))
nego_flags |= NEGO_FF_FL_MAY_SPLICE;
#endif
len = se_nego_ff(sd, &BUF_NULL, hs->to_write, nego_flags);
if (sd->iobuf.flags & IOBUF_FL_NO_FF) {
TRACE_DEVEL("Fast-forwarding not supported by endpoint, disable it", HS_EV_HSTRM_RESP, hs);
goto abort;
}
if (sd->iobuf.flags & IOBUF_FL_FF_BLOCKED) {
TRACE_DEVEL("Fast-forwarding blocked", HS_EV_HSTRM_RESP, hs);
goto out;
}
#if defined(USE_LINUX_SPLICE)
if (sd->iobuf.pipe) {
if (len > master_pipesize)
len = master_pipesize;
ret = tee(master_pipe->cons, sd->iobuf.pipe->prod, len, SPLICE_F_NONBLOCK);
if (ret > 0) {
sd->iobuf.pipe->data += ret;
hs->to_write -= ret;
}
if (!hs->to_write)
sd->iobuf.flags |= IOBUF_FL_EOI;
goto done;
}
#endif
hs->to_write -= hstream_add_ff_data(hs, sd, len);
if (!hs->to_write)
sd->iobuf.flags |= IOBUF_FL_EOI;
done:
if (se_done_ff(sd) != 0 || !(sd->iobuf.flags & (IOBUF_FL_FF_BLOCKED|IOBUF_FL_FF_WANT_ROOM))) {
/* Something was forwarding or the consumer states it is not
* blocked anyore, don't reclaim more room */
}
if (se_have_ff_data(sd)) {
TRACE_DEVEL("data not fully sent, wait", HS_EV_HSTRM_SEND, hs);
conn->mux->subscribe(hs->sc, SUB_RETRY_SEND, &hs->sc->wait_event);
}
else if (hs->to_write) {
TRACE_STATE("waking up task", HS_EV_HSTRM_IO_CB, hs);
task_wakeup(hs->task, TASK_WOKEN_IO);
}
out:
return ret;
abort:
return -1;
}
/* Send HTX data prepared for <hs> haterm stream from <conn> connection */
static int hstream_htx_buf_snd(struct connection *conn, struct hstream *hs)
{
@ -392,7 +307,7 @@ static int hstream_htx_buf_snd(struct connection *conn, struct hstream *hs)
if (!htxbuf(&hs->res)->data) {
/* This is possible after having drained the body, so after
* having sent the response here when HS_ST_OPT_REQ_AFTER_RES flag is set.
* having sent the response here when req_after_res=1.
*/
ret = 1;
goto out;
@ -408,7 +323,7 @@ static int hstream_htx_buf_snd(struct connection *conn, struct hstream *hs)
}
/* The HTX data are not fully sent if the last HTX data
* were not fully transferred or if there are remaining data
* were not fully transfered or if there are remaining data
* to send (->to_write > 0).
*/
if (!htx_is_empty(htxbuf(&hs->res))) {
@ -532,49 +447,12 @@ err:
goto leave;
}
static size_t hstream_add_ff_data(struct hstream *hs, struct sedesc *sd, unsigned long long len)
{
size_t ret;
char *data_ptr;
unsigned int offset;
char *buffer;
size_t buffer_len;
int modulo;
TRACE_ENTER(HS_EV_HSTRM_ADD_DATA, hs);
b_add(sd->iobuf.buf, sd->iobuf.offset);
if (hs->flags & HS_ST_OPT_RANDOM_RES) {
buffer = random_resp;
buffer_len = random_resp_len;
modulo = random_resp_len;
}
else {
buffer = common_response;
buffer_len = sizeof(common_response);
modulo = HS_COMMON_RESPONSE_LINE_SZ;
}
offset = (hs->req_size - len) % modulo;
data_ptr = buffer + offset;
if (len > (unsigned long long)(buffer_len - offset))
len = (unsigned long long)(buffer_len - offset);
ret = b_putist(sd->iobuf.buf, ist2(data_ptr, len));
if (!ret)
TRACE_STATE("unable to fast-forward payload", HS_EV_HSTRM_ADD_DATA, hs);
b_sub(sd->iobuf.buf, sd->iobuf.offset);
sd->iobuf.data += ret;
TRACE_LEAVE(HS_EV_HSTRM_ADD_DATA, hs);
return ret;
}
/* Add data to HTX response buffer from pre-built responses */
static size_t hstream_add_htx_data(struct hstream *hs, struct htx *htx, unsigned long long len)
static void hstream_add_data(struct htx *htx, struct hstream *hs)
{
size_t ret;
int ret;
char *data_ptr;
unsigned long long max;
unsigned int offset;
char *buffer;
size_t buffer_len;
@ -582,12 +460,12 @@ static size_t hstream_add_htx_data(struct hstream *hs, struct htx *htx, unsigned
TRACE_ENTER(HS_EV_HSTRM_ADD_DATA, hs);
if (hs->flags & HS_ST_OPT_CHUNK_RES) {
if (hs->req_chunked) {
buffer = common_chunk_resp;
buffer_len = sizeof(common_chunk_resp);
modulo = sizeof(common_chunk_resp);
}
else if (hs->flags & HS_ST_OPT_RANDOM_RES) {
else if (hs->req_random) {
buffer = random_resp;
buffer_len = random_resp_len;
modulo = random_resp_len;
@ -598,16 +476,19 @@ static size_t hstream_add_htx_data(struct hstream *hs, struct htx *htx, unsigned
modulo = HS_COMMON_RESPONSE_LINE_SZ;
}
offset = (hs->req_size - len) % modulo;
offset = (hs->req_size - hs->to_write) % modulo;
data_ptr = buffer + offset;
if (len > (unsigned long long)(buffer_len - offset))
len = (unsigned long long)(buffer_len - offset);
max = hs->to_write;
if (max > (unsigned long long)(buffer_len - offset))
max = (unsigned long long)(buffer_len - offset);
ret = htx_add_data(htx, ist2(data_ptr, len));
ret = htx_add_data(htx, ist2(data_ptr, max));
if (!ret)
TRACE_STATE("unable to add payload to HTX message", HS_EV_HSTRM_ADD_DATA, hs);
hs->to_write -= ret;
TRACE_LEAVE(HS_EV_HSTRM_ADD_DATA, hs);
return ret;
return;
}
/* Build the HTTP response with eventually some BODY data depending on ->to_write
@ -618,7 +499,7 @@ static int hstream_build_http_resp(struct hstream *hs)
int ret = 0;
struct buffer *buf;
struct htx *htx;
unsigned int flags = HTX_SL_F_IS_RESP | HTX_SL_F_XFER_LEN | (!(hs->flags & HS_ST_OPT_CHUNK_RES) ? HTX_SL_F_CLEN : 0);
unsigned int flags = HTX_SL_F_IS_RESP | HTX_SL_F_XFER_LEN | (!hs->req_chunked ? HTX_SL_F_CLEN : 0);
struct htx_sl *sl;
char *end;
@ -658,7 +539,7 @@ static int hstream_build_http_resp(struct hstream *hs)
}
}
if (!(hs->flags & HS_ST_OPT_CHUNK_RES) && (hs->ka & 1)) {
if (!hs->req_chunked && (hs->ka & 1)) {
char *end = ultoa_o(hs->req_size, trash.area, trash.size);
if (!htx_add_header(htx, ist("Content-Length"), ist2(trash.area, end - trash.area))) {
TRACE_ERROR("could not add content-length HTX header", HS_EV_HSTRM_RESP, hs);
@ -666,7 +547,7 @@ static int hstream_build_http_resp(struct hstream *hs)
}
}
if ((hs->flags & HS_ST_OPT_NO_CACHE) && !htx_add_header(htx, ist("Cache-control"), ist("no-cache"))) {
if (!hs->req_cache && !htx_add_header(htx, ist("Cache-control"), ist("no-cache"))) {
TRACE_ERROR("could not add cache-control HTX header", HS_EV_HSTRM_RESP, hs);
goto err;
}
@ -689,11 +570,15 @@ static int hstream_build_http_resp(struct hstream *hs)
if (!end)
goto err;
trash.data = end - trash.area;
if (!chunk_strcat(&trash, ((hs->flags & HS_ST_OPT_NO_CACHE) ? ", cache=0" : ", cache=1"))) {
if (!chunk_strcat(&trash, ", cache=")) {
TRACE_ERROR("could not build x-rsp HTX header", HS_EV_HSTRM_RESP, hs);
goto err;
}
if ((hs->flags & HS_ST_OPT_CHUNK_RES) && !chunk_strcat(&trash, ", chunked,")) {
end = ultoa_o(hs->req_cache, trash.area + trash.data, trash.size - trash.data);
if (!end)
goto err;
trash.data = end - trash.area;
if (hs->req_chunked && !chunk_strcat(&trash, ", chunked,")) {
TRACE_ERROR("could not build x-rsp HTX header", HS_EV_HSTRM_RESP, hs);
goto err;
}
@ -718,7 +603,7 @@ static int hstream_build_http_resp(struct hstream *hs)
}
if (hs->to_write > 0)
hs->to_write -= hstream_add_htx_data(hs, htx, hs->to_write);
hstream_add_data(htx, hs);
if (hs->to_write <= 0)
htx->flags |= HTX_FL_EOM;
htx_to_buf(htx, buf);
@ -796,16 +681,10 @@ static void hstream_parse_uri(struct ist uri, struct hstream *hs)
hs->res_wait = MS_TO_TICKS(result << mult);
break;
case 'c':
if (result < 1)
hs->flags |= HS_ST_OPT_NO_CACHE;
else
hs->flags &= ~HS_ST_OPT_NO_CACHE;
hs->req_cache = result << mult;
break;
case 'A':
if (result > 0)
hs->flags |= HS_ST_OPT_REQ_AFTER_RES;
else
hs->flags &= ~HS_ST_OPT_REQ_AFTER_RES;
hs->req_after_res = result;
break;
case 'C':
hs->ka = (hs->ka & 4) | 2 | !result; // forced OFF
@ -814,22 +693,10 @@ static void hstream_parse_uri(struct ist uri, struct hstream *hs)
hs->ka = (hs->ka & 4) | 2 | !!result; // forced ON
break;
case 'k':
if (result > 0)
hs->flags |= HS_ST_OPT_CHUNK_RES;
else
hs->flags &= ~HS_ST_OPT_CHUNK_RES;
break;
case 'S':
if (result < 1)
hs->flags |= HS_ST_OPT_NO_SPLICING;
else
hs->flags &= ~HS_ST_OPT_NO_SPLICING;
hs->req_chunked = result;
break;
case 'R':
if (result > 0)
hs->flags |= HS_ST_OPT_RANDOM_RES;
else
hs->flags &= ~HS_ST_OPT_RANDOM_RES;
hs->req_random = result;
break;
}
arg = next;
@ -875,11 +742,11 @@ static inline int hstream_sl_hdrs_htx_buf_snd(struct hstream *hs,
/* Must be called before sending to determine if the body request must be
* drained asap before sending. Return 1 if this is the case, 0 if not.
* This is the case by default before sending the response except if
* the contrary has been asked with flag HS_ST_OPT_REQ_AFTER_RES.
* the contrary has been asked with ->req_after_res=0.
* Return true if the body request has not been fully drained (->hs->req_body>0)
* and if the response has been sent (hs->to_write=0 &&
* htx_is_empty(htxbuf(&hs->res) or if it must not be drained after having
* sent the response (HS_ST_OPT_REQ_AFTER_RES not set) or
* sent the response (->req_after_res=0) or
*/
static inline int hstream_must_drain(struct hstream *hs)
{
@ -887,20 +754,12 @@ static inline int hstream_must_drain(struct hstream *hs)
TRACE_ENTER(HS_EV_PROCESS_HSTRM, hs);
ret = !(hs->flags & HS_ST_CONN_ERROR) && hs->req_body > 0 &&
((!hs->to_write && htx_is_empty(htxbuf(&hs->res))) || !(hs->flags & HS_ST_OPT_REQ_AFTER_RES));
((!hs->to_write && htx_is_empty(htxbuf(&hs->res))) || !hs->req_after_res);
TRACE_LEAVE(HS_EV_PROCESS_HSTRM, hs);
return ret;
}
static inline int hstream_is_fastfwd_supported(struct hstream *hs)
{
return (!(global.tune.no_zero_copy_fwd & NO_ZERO_COPY_FWD) &&
sc_ep_test(hs->sc, SE_FL_MAY_FASTFWD_CONS) &&
!(hs->sc->sedesc->iobuf.flags & IOBUF_FL_NO_FF) &&
!(hs->flags & HS_ST_OPT_CHUNK_RES) && hs->to_write);
}
/* haterm stream processing task */
static struct task *process_hstream(struct task *t, void *context, unsigned int state)
{
@ -928,10 +787,6 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
struct htx_sl *sl = http_get_stline(htx);
struct http_hdr_ctx expect, clength;
/* we're starting to work with this endpoint, let's flag it */
if (unlikely(!sc_ep_test(hs->sc, SE_FL_APP_STARTED)))
sc_ep_set(hs->sc, SE_FL_APP_STARTED);
if (sl->flags & HTX_SL_F_VER_11)
hs->ka = 5;
@ -966,7 +821,7 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
}
if (hstream_must_drain(hs)) {
/* The request must be drained before sending the response (HS_ST_OPT_REQ_AFTER_RES not set).
/* The request must be drained before sending the response (hs->req_after_res=0).
* The body will be drained upon next wakeup.
*/
TRACE_STATE("waking up task", HS_EV_HSTRM_IO_CB, hs);
@ -986,7 +841,7 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
goto err;
if (hstream_must_drain(hs)) {
/* The request must be drained before sending the response (HS_ST_OPT_REQ_AFTER_RES not set).
/* The request must be drained before sending the response (hs->req_after_res=0).
* The body will be drained upon next wakeup.
*/
TRACE_STATE("waking up task", HS_EV_HSTRM_IO_CB, hs);
@ -997,7 +852,6 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
else {
struct buffer *buf;
struct htx *htx;
int ret = 0;
/* HTX RX part */
if (hstream_must_drain(hs)) {
@ -1019,19 +873,7 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
if (!hstream_sl_hdrs_htx_buf_snd(hs, conn))
goto err;
/* TX part */
if (hstream_is_fastfwd_supported(hs)) {
if (!htx_is_empty(htxbuf(&hs->res)))
goto flush_res_buf;
if (!hs->to_write && !se_have_ff_data(hs->sc->sedesc))
goto out;
ret = hstream_ff_snd(conn, hs);
if (ret >= 0)
goto send_done;
/* fallback to regular send */
}
/* HTX TX part */
if (!hs->to_write && htx_is_empty(htxbuf(&hs->res)))
goto out;
@ -1043,16 +885,13 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
htx = htx_from_buf(buf);
if (hs->to_write > 0)
hs->to_write -= hstream_add_htx_data(hs, htx, hs->to_write);
hstream_add_data(htx, hs);
if (hs->to_write <= 0)
htx->flags |= HTX_FL_EOM;
htx_to_buf(htx, &hs->res);
flush_res_buf:
hstream_htx_buf_snd(conn, hs);
send_done:
if (hs->req_body && (hs->flags & HS_ST_OPT_REQ_AFTER_RES) && !hs->to_write) {
if (hs->req_body && hs->req_after_res && !hs->to_write) {
/* Response sending has just complete. The body will be drained upon
* next wakeup.
*/
@ -1063,7 +902,7 @@ static struct task *process_hstream(struct task *t, void *context, unsigned int
}
out:
if (!hs->to_write && !hs->req_body && htx_is_empty(htxbuf(&hs->res)) && !se_have_ff_data(hs->sc->sedesc)) {
if (!hs->to_write && !hs->req_body && htx_is_empty(htxbuf(&hs->res))) {
TRACE_DEVEL("shutting down stream", HS_EV_HSTRM_SEND, hs);
CALL_MUX_NO_RET(conn->mux, shut(hs->sc, SE_SHW_SILENT|SE_SHW_NORMAL, NULL));
}
@ -1118,11 +957,15 @@ void *hstream_new(struct session *sess, struct stconn *sc, struct buffer *input)
hs->flags = 0;
hs->ka = 0;
hs->req_cache = 1;
hs->req_size = 0;
hs->req_body = 0;
hs->req_code = 200;
hs->res_wait = TICK_ETERNITY;
hs->res_time = TICK_ETERNITY;
hs->req_chunked = 0;
hs->req_random = 0;
hs->req_after_res = 0;
hs->req_meth = HTTP_METH_OTHER;
if (sc_conn(sc)) {
@ -1163,3 +1006,41 @@ void *hstream_new(struct session *sess, struct stconn *sc, struct buffer *input)
TRACE_DEVEL("leaving on error", HS_EV_HSTRM_NEW);
return NULL;
}
/* Build the response buffers.
* Return 1 if succeeded, -1 if failed.
*/
static int hstream_build_responses(void)
{
int i;
for (i = 0; i < sizeof(common_response); i++) {
if (i % HS_COMMON_RESPONSE_LINE_SZ == HS_COMMON_RESPONSE_LINE_SZ - 1)
common_response[i] = '\n';
else if (i % 10 == 0)
common_response[i] = '.';
else
common_response[i] = '0' + i % 10;
}
/* original haterm chunk mode responses are made of 1-byte chunks
* but the haproxy muxes do not support this. At this time
* these reponses are handled the same way as for common
* responses with a pre-built buffer.
*/
for (i = 0; i < sizeof(common_chunk_resp); i++)
common_chunk_resp[i] = '1';
random_resp = malloc(random_resp_len);
if (!random_resp) {
ha_alert("not enough memore...\n");
return -1;
}
for (i = 0; i < random_resp_len; i++)
random_resp[i] = ha_random32() >> 16;
return 1;
}
REGISTER_POST_CHECK(hstream_build_responses);

View File

@ -1,14 +1,8 @@
#define _GNU_SOURCE
#include <fcntl.h>
#include <haproxy/api.h>
#include <haproxy/buf.h>
#include <haproxy/chunk.h>
#include <haproxy/errors.h>
#include <haproxy/global.h>
#include <haproxy/hstream.h>
#include <haproxy/pipe.h>
#include <haproxy/tools.h>
#include <haproxy/version.h>
static int haterm_debug;
@ -32,8 +26,6 @@ static void haterm_usage(char *name)
" -c <curves> : ECSDA curves (ex: \"P-256\", \"P-384\"...)\n"
" -v : shows version\n"
" -d : enable the traces for all http protocols\n"
" -dS : disables splice() usage even when available\n"
" -dZ : disable zero-copy forwarding\n"
" --" QUIC_BIND_LONG_OPT " <opts> : append options to QUIC \"bind\" lines\n"
" --" TCP_BIND_LONG_OPT " <opts> : append options to TCP \"bind\" lines\n"
, name);
@ -47,9 +39,6 @@ static void haterm_usage(char *name)
static const char *haterm_cfg_dflt_str =
"defaults\n"
"\tmode haterm\n"
#if defined(USE_LINUX_SPLICE)
"\toption splice-response\n"
#endif
"\ttimeout client 25s\n";
#define HATERM_CFG_CRT_STORE_STR_FMT \
@ -117,7 +106,7 @@ static inline size_t hbuf_is_null(const struct hbuf *h)
/* Simple function, to append <line> to <b> without without
* trailing '\0' character.
* Take into an account the '\t' and '\n' escaped sequences.
* Take into an account the '\t' and '\n' escaped sequeces.
*/
static void hstream_str_buf_append(struct hbuf *h, const char *line)
{
@ -196,10 +185,6 @@ void haproxy_init_args(int argc, char **argv)
/* save the arguments */
sargc = argc; sargv = argv;
#if defined(USE_LINUX_SPLICE)
global.tune.options |= GTUNE_USE_SPLICE;
#endif
/* THIS PART MUST NOT MODIFY THE ARGUMENTS */
/* Parse the arguments which must be reused to build the conf. */
while (argc > 0) {
@ -230,7 +215,7 @@ void haproxy_init_args(int argc, char **argv)
argc--; argv++;
}
/* Restore the arguments */
/* Restore the argumenst */
argc = sargc; argv = sargv;
while (argc > 0) {
char *opt;
@ -257,14 +242,6 @@ void haproxy_init_args(int argc, char **argv)
else
haterm_usage(progname);
}
#if defined(USE_LINUX_SPLICE)
else if (*opt == 'd' && *(opt+1) == 'S') {
global.tune.options &= ~GTUNE_USE_SPLICE;
}
#endif
else if (*opt == 'd' && *(opt+1) == 'Z') {
global.tune.no_zero_copy_fwd |= NO_ZERO_COPY_FWD;
}
else if (*opt == 'd') {
/* empty option */
if (*(opt + 1))
@ -503,103 +480,3 @@ char **copy_argv(int argc, char **argv)
return ret;
}
#if defined(USE_LINUX_SPLICE)
extern struct pipe *master_pipe;
extern size_t master_pipesize;
extern char common_response[RESPSIZE];
extern char common_chunk_resp[RESPSIZE];
extern char *random_resp;
extern int random_resp_len;
static void hstream_init_splicing(void)
{
if (!(global.tune.options & GTUNE_USE_SPLICE))
return;
if (!global.tune.pipesize)
global.tune.pipesize = 65536 * 5 / 4;
master_pipe = get_pipe();
if (master_pipe) {
struct iovec v = { .iov_base = common_response,
.iov_len = sizeof(common_response) };
int total, ret;
total = ret = 0;
do {
ret = vmsplice(master_pipe->prod, &v, 1, SPLICE_F_NONBLOCK);
if (ret > 0)
total += ret;
} while (ret > 0 && total < global.tune.pipesize);
master_pipesize = total;
if (master_pipesize < global.tune.pipesize) {
if (master_pipesize < 60*1024) {
/* Older kernels were limited to around 60-61 kB */
ha_warning("Failed to vmsplice response buffer after %lu bytes, splicing disabled\n", master_pipesize);
global.tune.options &= ~GTUNE_USE_SPLICE;
put_pipe(master_pipe);
master_pipe = NULL;
}
else
ha_warning("Splicing is limited to %lu bytes (too old kernel)\n", master_pipesize);
}
}
else {
ha_warning("Unable to allocate master pipe for splicing, splicing disabled\n");
global.tune.options &= ~GTUNE_USE_SPLICE;
}
}
static void hstream_deinit(void)
{
if (master_pipe)
put_pipe(master_pipe);
}
REGISTER_POST_DEINIT(hstream_deinit);
INITCALL0(STG_INIT_2, hstream_init_splicing);
/* Build the response buffers.
* Return 1 if succeeded, -1 if failed.
*/
static int hstream_build_responses(void)
{
int i;
for (i = 0; i < sizeof(common_response); i++) {
if (i % HS_COMMON_RESPONSE_LINE_SZ == HS_COMMON_RESPONSE_LINE_SZ - 1)
common_response[i] = '\n';
else if (i % 10 == 0)
common_response[i] = '.';
else
common_response[i] = '0' + i % 10;
}
/* original haterm chunk mode responses are made of 1-byte chunks
* but the haproxy muxes do not support this. At this time
* these responses are handled the same way as for common
* responses with a pre-built buffer.
*/
for (i = 0; i < sizeof(common_chunk_resp); i++)
common_chunk_resp[i] = '1';
random_resp = malloc(random_resp_len);
if (!random_resp) {
ha_alert("not enough memory...\n");
return -1;
}
for (i = 0; i < random_resp_len; i++)
random_resp[i] = ha_random32() >> 16;
return 1;
}
REGISTER_POST_CHECK(hstream_build_responses);
#endif

View File

@ -20,7 +20,6 @@
#include <haproxy/action.h>
#include <haproxy/api.h>
#include <haproxy/arg.h>
#include <haproxy/base64.h>
#include <haproxy/capture-t.h>
#include <haproxy/cfgparse.h>
#include <haproxy/chunk.h>
@ -51,7 +50,6 @@ static void release_http_action(struct act_rule *rule)
if (rule->arg.http.re)
regex_free(rule->arg.http.re);
lf_expr_deinit(&rule->arg.http.fmt);
release_sample_expr(rule->arg.http.expr);
}
/* Release memory allocated by HTTP actions relying on an http reply. Concretely,
@ -1481,86 +1479,6 @@ static enum act_return http_action_set_header(struct act_rule *rule, struct prox
goto leave;
}
/* This function executes a set-headers-bin or add-headers-bin actions.
*/
static enum act_return http_action_set_headers_bin(struct act_rule *rule, struct proxy *px,
struct session *sess, struct stream *s, int flags)
{
struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp);
struct htx *htx = htxbuf(&msg->chn->buf);
struct sample *hdrs_bin;
char *p, *end;
enum act_return ret = ACT_RET_CONT;
struct http_hdr_ctx ctx;
struct ist n, v;
uint64_t sz = 0;
hdrs_bin = sample_fetch_as_type(px, sess, s, SMP_OPT_FINAL, rule->arg.http.expr, SMP_T_BIN);
if (!hdrs_bin)
return ACT_RET_CONT;
p = b_orig(&hdrs_bin->data.u.str);
end = b_tail(&hdrs_bin->data.u.str);
while (p < end) {
if (decode_varint(&p, end, &sz) == -1)
goto fail_rewrite;
if (!sz) {
if (decode_varint(&p, end, &sz) == -1 || sz > 0)
goto fail_rewrite;
goto leave;
}
n = ist2(p, sz);
p += sz;
if (decode_varint(&p, end, &sz) == -1)
goto fail_rewrite;
v = ist2(p, sz);
p += sz;
if (istlen(rule->arg.http.str) && !istmatch(n, rule->arg.http.str))
continue;
if (is_immutable_header(n))
continue;
if (rule->action == 0) { // set-header
/* remove all occurrences of the header */
ctx.blk = NULL;
while (http_find_header(htx, n, &ctx, 1))
http_remove_header(htx, &ctx);
}
/* Now add header */
if (!http_add_header(htx, n, v))
goto fail_rewrite;
}
/* invalid encoding */
ret = ACT_RET_ERR;
leave:
return ret;
fail_rewrite:
if (sess->fe_tgcounters)
_HA_ATOMIC_INC(&sess->fe_tgcounters->failed_rewrites);
if ((s->flags & SF_BE_ASSIGNED) && s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->failed_rewrites);
if (sess->li_tgcounters)
_HA_ATOMIC_INC(&sess->li_tgcounters->failed_rewrites);
if (s->sv_tgcounters)
_HA_ATOMIC_INC(&s->sv_tgcounters->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
}
goto leave;
}
/* Parse a "set-header", "add-header" or "early-hint" actions. It takes an
* header name and a log-format string as arguments. It returns ACT_RET_PRS_OK
* on success, ACT_RET_PRS_ERR on error.
@ -1639,64 +1557,6 @@ static enum act_parse_ret parse_http_set_header(const char **args, int *orig_arg
return ACT_RET_PRS_OK;
}
/* Parse set-headers-bin */
static enum act_parse_ret parse_http_set_headers_bin(const char **args, int *orig_arg, struct proxy *px,
struct act_rule *rule, char **err)
{
struct sample_expr *expr;
unsigned int where;
int cur_arg;
if (args[*orig_arg-1][0] == 's')
rule->action = 0; // set-header
else
rule->action = 1; // add-header
rule->action_ptr = http_action_set_headers_bin;
rule->release_ptr = release_http_action;
lf_expr_init(&rule->arg.http.fmt);
cur_arg = *orig_arg;
if (!*args[cur_arg]) {
memprintf(err, "expects exactly one argument or three arguments <headers> prefix <pfx>");
return ACT_RET_PRS_ERR;
}
expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line,
err, &px->conf.args, NULL);
if (!expr)
return ACT_RET_PRS_ERR;
where = 0;
if (px->cap & PR_CAP_FE)
where |= (rule->from == ACT_F_HTTP_REQ ? SMP_VAL_FE_HRQ_HDR : SMP_VAL_FE_HRS_HDR);
if (px->cap & PR_CAP_BE)
where |= (rule->from == ACT_F_HTTP_REQ ? SMP_VAL_BE_HRQ_HDR : SMP_VAL_BE_HRS_HDR);
if (!(expr->fetch->val & where)) {
memprintf(err, "fetch method '%s' extracts information from '%s', none of which is available here",
args[cur_arg-1], sample_src_names(expr->fetch->use));
release_sample_expr(expr);
return ACT_RET_PRS_ERR;
}
/* Check if an argument is available */
if (strcmp(args[cur_arg], "prefix") == 0 ) {
cur_arg++;
if(!*args[cur_arg]) {
memprintf(err, "expects 1 argument: <headers>; or 3 arguments: <headers> prefix <pfx>");
release_sample_expr(expr);
return ACT_RET_PRS_ERR;
}
rule->arg.http.str = ist(strdup(args[cur_arg]));
cur_arg++;
}
rule->arg.http.expr = expr;
*orig_arg = cur_arg;
return ACT_RET_PRS_OK;
}
/* This function executes a replace-header or replace-value actions. It
* builds a string in the trash from the specified format string. It finds
* the action to be performed in <.action>, previously filled by function
@ -1907,166 +1767,6 @@ static enum act_parse_ret parse_http_del_header(const char **args, int *orig_arg
return ACT_RET_PRS_OK;
}
/* This function executes a del-headers-bin action with selected matching mode for
* header name. It finds the matching method to be performed in <.action>, previously
* filled by function parse_http_del_headers_bin(). On success, it returns ACT_RET_CONT.
* Otherwise ACT_RET_ERR is returned.
*/
static enum act_return http_action_del_headers_bin(struct act_rule *rule, struct proxy *px,
struct session *sess, struct stream *s, int flags)
{
struct http_hdr_ctx ctx;
struct http_msg *msg = ((rule->from == ACT_F_HTTP_REQ) ? &s->txn->req : &s->txn->rsp);
struct htx *htx = htxbuf(&msg->chn->buf);
struct sample *hdrs_bin;
char *p, *end;
enum act_return ret = ACT_RET_CONT;
struct ist n;
uint64_t sz = 0;
hdrs_bin = sample_fetch_as_type(px, sess, s, SMP_OPT_FINAL, rule->arg.http.expr, SMP_T_BIN);
if (!hdrs_bin)
return ACT_RET_CONT;
p = b_orig(&hdrs_bin->data.u.str);
end = b_tail(&hdrs_bin->data.u.str);
while (p < end) {
if (decode_varint(&p, end, &sz) == -1)
goto fail_rewrite;
if (!sz)
goto leave;
n = ist2(p, sz);
p += sz;
if (is_immutable_header(n))
continue;
/* remove all occurrences of the header */
ctx.blk = NULL;
switch (rule->action) {
case PAT_MATCH_STR:
while (http_find_header(htx, n, &ctx, 1))
http_remove_header(htx, &ctx);
break;
case PAT_MATCH_BEG:
while (http_find_pfx_header(htx, n, &ctx, 1))
http_remove_header(htx, &ctx);
break;
case PAT_MATCH_END:
while (http_find_sfx_header(htx, n, &ctx, 1))
http_remove_header(htx, &ctx);
break;
case PAT_MATCH_SUB:
while (http_find_sub_header(htx, n, &ctx, 1))
http_remove_header(htx, &ctx);
break;
default:
goto fail_rewrite;
}
}
/* invalid encoding */
ret = ACT_RET_ERR;
leave:
return ret;
fail_rewrite:
if (sess->fe_tgcounters)
_HA_ATOMIC_INC(&sess->fe_tgcounters->failed_rewrites);
if ((s->flags & SF_BE_ASSIGNED) && s->be_tgcounters)
_HA_ATOMIC_INC(&s->be_tgcounters->failed_rewrites);
if (sess->li_tgcounters)
_HA_ATOMIC_INC(&sess->li_tgcounters->failed_rewrites);
if (s->sv_tgcounters)
_HA_ATOMIC_INC(&s->sv_tgcounters->failed_rewrites);
if (!(msg->flags & HTTP_MSGF_SOFT_RW)) {
ret = ACT_RET_ERR;
if (!(s->flags & SF_ERR_MASK))
s->flags |= SF_ERR_PRXCOND;
}
goto leave;
}
/* Parse a "del-headers-bin" action. It takes string as a required argument,
* optional flag (currently only -m) and optional matching method of input string
* with header name to be deleted. Default matching method is exact match (-m str).
* It returns ACT_RET_PRS_OK on success, ACT_RET_PRS_ERR on error.
*/
static enum act_parse_ret parse_http_del_headers_bin(const char **args, int *orig_arg, struct proxy *px,
struct act_rule *rule, char **err)
{
struct sample_expr *expr;
unsigned int where;
int cur_arg;
int pat_idx;
/* set exact matching (-m str) as default */
rule->action = PAT_MATCH_STR;
rule->action_ptr = http_action_del_headers_bin;
rule->release_ptr = release_http_action;
lf_expr_init(&rule->arg.http.fmt);
cur_arg = *orig_arg;
if (!*args[cur_arg]) {
memprintf(err, "expects at least 1 argument");
return ACT_RET_PRS_ERR;
}
expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file, px->conf.args.line,
err, &px->conf.args, NULL);
if (!expr)
return ACT_RET_PRS_ERR;
where = 0;
if (px->cap & PR_CAP_FE)
where |= (rule->from == ACT_F_HTTP_REQ ? SMP_VAL_FE_HRQ_HDR : SMP_VAL_FE_HRS_HDR);
if (px->cap & PR_CAP_BE)
where |= (rule->from == ACT_F_HTTP_REQ ? SMP_VAL_BE_HRQ_HDR : SMP_VAL_BE_HRS_HDR);
if (!(expr->fetch->val & where)) {
memprintf(err, "fetch method '%s' extracts information from '%s', none of which is available here",
args[cur_arg-1], sample_src_names(expr->fetch->use));
release_sample_expr(expr);
return ACT_RET_PRS_ERR;
}
if (strcmp(args[cur_arg], "-m") == 0) {
cur_arg++;
if (!*args[cur_arg]) {
memprintf(err, "-m flag expects exactly 1 argument");
release_sample_expr(expr);
return ACT_RET_PRS_ERR;
}
pat_idx = pat_find_match_name(args[cur_arg]);
switch (pat_idx) {
case PAT_MATCH_REG:
memprintf(err, "-m reg with is unsupported with del-header-bin due to performance reasons");
release_sample_expr(expr);
return ACT_RET_PRS_ERR;
case PAT_MATCH_STR:
case PAT_MATCH_BEG:
case PAT_MATCH_END:
case PAT_MATCH_SUB:
rule->action = pat_idx;
break;
default:
memprintf(err, "-m with unsupported matching method '%s'", args[cur_arg]);
release_sample_expr(expr);
return ACT_RET_PRS_ERR;
}
cur_arg++;
}
rule->arg.http.expr = expr;
*orig_arg = cur_arg;
return ACT_RET_PRS_OK;
}
/* This function executes a pause action.
*/
static enum act_return http_action_pause(struct act_rule *rule, struct proxy *px,
@ -2128,7 +1828,7 @@ static enum act_parse_ret parse_http_pause(const char **args, int *orig_arg, str
rule->arg.timeout.expr = sample_parse_expr((char **)args, &cur_arg, px->conf.args.file,
px->conf.args.line, err, &px->conf.args, NULL);
if (!rule->arg.timeout.expr) {
memprintf(err, "unexpected character '%c' in rule 'pause'", *res);
memprintf(err, "unexpected character '%c' in rule 'mause'", *res);
return ACT_RET_PRS_ERR;
}
}
@ -2830,13 +2530,11 @@ static struct action_kw_list http_req_actions = {
.kw = {
{ "add-acl", parse_http_set_map, KWF_MATCH_PREFIX },
{ "add-header", parse_http_set_header, 0 },
{ "add-headers-bin", parse_http_set_headers_bin, 0 },
{ "allow", parse_http_allow, 0 },
{ "auth", parse_http_auth, 0 },
{ "capture", parse_http_req_capture, 0 },
{ "del-acl", parse_http_set_map, KWF_MATCH_PREFIX },
{ "del-header", parse_http_del_header, 0 },
{ "del-headers-bin", parse_http_del_headers_bin, 0 },
{ "del-map", parse_http_set_map, KWF_MATCH_PREFIX },
{ "deny", parse_http_deny, 0 },
{ "disable-l7-retry", parse_http_req_disable_l7_retry, 0 },
@ -2853,7 +2551,6 @@ static struct action_kw_list http_req_actions = {
{ "replace-value", parse_http_replace_header, 0 },
{ "return", parse_http_return, 0 },
{ "set-header", parse_http_set_header, 0 },
{ "set-headers-bin", parse_http_set_headers_bin, 0 },
{ "set-map", parse_http_set_map, KWF_MATCH_PREFIX },
{ "set-method", parse_set_req_line, 0 },
{ "set-path", parse_set_req_line, 0 },
@ -2875,12 +2572,10 @@ static struct action_kw_list http_res_actions = {
.kw = {
{ "add-acl", parse_http_set_map, KWF_MATCH_PREFIX },
{ "add-header", parse_http_set_header, 0 },
{ "add-headers-bin", parse_http_set_headers_bin,0 },
{ "allow", parse_http_allow, 0 },
{ "capture", parse_http_res_capture, 0 },
{ "del-acl", parse_http_set_map, KWF_MATCH_PREFIX },
{ "del-header", parse_http_del_header, 0 },
{ "del-headers-bin", parse_http_del_headers_bin,0 },
{ "del-map", parse_http_set_map, KWF_MATCH_PREFIX },
{ "deny", parse_http_deny, 0 },
{ "do-log", parse_http_res_do_log, 0 },
@ -2890,7 +2585,6 @@ static struct action_kw_list http_res_actions = {
{ "replace-value", parse_http_replace_header, 0 },
{ "return", parse_http_return, 0 },
{ "set-header", parse_http_set_header, 0 },
{ "set-headers-bin", parse_http_set_headers_bin,0 },
{ "set-map", parse_http_set_map, KWF_MATCH_PREFIX },
{ "set-status", parse_http_set_status, 0 },
{ "strict-mode", parse_http_strict_mode, 0 },
@ -2906,18 +2600,15 @@ INITCALL1(STG_REGISTER, http_res_keywords_register, &http_res_actions);
static struct action_kw_list http_after_res_actions = {
.kw = {
{ "add-header", parse_http_set_header, 0 },
{ "add-headers-bin", parse_http_set_headers_bin,0 },
{ "allow", parse_http_allow, 0 },
{ "capture", parse_http_res_capture, 0 },
{ "del-acl", parse_http_set_map, KWF_MATCH_PREFIX },
{ "del-header", parse_http_del_header, 0 },
{ "del-headers-bin", parse_http_del_headers_bin,0 },
{ "del-map", parse_http_set_map, KWF_MATCH_PREFIX },
{ "do-log", parse_http_after_res_do_log, 0 },
{ "replace-header", parse_http_replace_header, 0 },
{ "replace-value", parse_http_replace_header, 0 },
{ "set-header", parse_http_set_header, 0 },
{ "set-headers-bin", parse_http_set_headers_bin,0 },
{ "set-map", parse_http_set_map, KWF_MATCH_PREFIX },
{ "set-status", parse_http_set_status, 0 },
{ "strict-mode", parse_http_strict_mode, 0 },

View File

@ -2828,7 +2828,8 @@ static enum rule_result http_req_get_intercept_rule(struct proxy *px, struct lis
int act_opts = 0;
if ((s->scf->flags & SC_FL_ERROR) ||
((s->scf->flags & SC_FL_EOS) && proxy_abrt_close_def(px, 1)))
((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
proxy_abrt_close_def(px, 1)))
act_opts |= ACT_OPT_FINAL | ACT_OPT_FINAL_EARLY;
/* If "the current_rule_list" match the executed rule list, we are in
@ -3019,7 +3020,8 @@ static enum rule_result http_res_get_intercept_rule(struct proxy *px, struct lis
if (final)
act_opts |= ACT_OPT_FINAL;
if ((s->scf->flags & SC_FL_ERROR) ||
((s->scf->flags & SC_FL_EOS) && proxy_abrt_close_def(px, 1)))
((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
proxy_abrt_close_def(px, 1)))
act_opts |= ACT_OPT_FINAL | ACT_OPT_FINAL_EARLY;
/* If "the current_rule_list" match the executed rule list, we are in
@ -4335,10 +4337,20 @@ enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
}
if (channel_htx_full(chn, htx, global.tune.maxrewrite) || sc_waiting_room(chn_prod(chn))) {
struct buffer lbuf = BUF_NULL;
struct buffer lbuf;
char *area;
if (large_buffer == 0 || b_is_large(&chn->buf) || !htx_move_to_large_buffer(&lbuf, &chn->buf))
goto end; /* don't use large buffer or already a large buffer */
if (large_buffer == 0 || b_is_large(&chn->buf))
goto end; /* don't use large buffer or large buffer is full */
/* normal buffer is full, allocate a large one
*/
area = pool_alloc(pool_head_large_buffer);
if (!area)
goto end; /* Allocation failure: TODO must be improved to use buffer_wait */
lbuf = b_make(area, global.tune.bufsize_large, 0, 0);
htx_xfer_blks(htx_from_buf(&lbuf), htx, htx_used_space(htx), HTX_BLK_UNUSED);
htx_to_buf(htx, &chn->buf);
b_free(&chn->buf);
offer_buffers(s, 1);
chn->buf = lbuf;
@ -4354,7 +4366,8 @@ enum rule_result http_wait_for_msg_body(struct stream *s, struct channel *chn,
/* we get here if we need to wait for more data */
if ((s->scf->flags & SC_FL_ERROR) ||
((s->scf->flags & SC_FL_EOS) && proxy_abrt_close_def(s->be, 1)))
((s->scf->flags & (SC_FL_EOS|SC_FL_ABRT_DONE)) &&
proxy_abrt_close_def(s->be, 1)))
ret = HTTP_RULE_RES_CONT;
else if (!(chn_prod(chn)->flags & (SC_FL_ERROR|SC_FL_EOS|SC_FL_ABRT_DONE))) {
if (!tick_isset(chn->analyse_exp))
@ -4734,7 +4747,7 @@ int http_forward_proxy_resp(struct stream *s, int final)
if (s->txn->meth == HTTP_METH_HEAD)
htx_skip_msg_payload(htx);
/* Response from haproxy, override HTTP response version using the request one */
/* Respnse from haproxy, override HTTP response verison using the request one */
s->txn->rsp.vsn = s->txn->req.vsn;
channel_auto_read(req);

View File

@ -604,7 +604,10 @@ void httpclient_applet_io_handler(struct appctx *appctx)
htx_to_buf(htx, outbuf);
b_xfer(outbuf, &hc->req.buf, b_data(&hc->req.buf));
} else {
if (!htx_xfer(htx, hc_htx, htx_used_space(hc_htx), HTX_XFER_DEFAULT)) {
struct htx_ret ret;
ret = htx_xfer_blks(htx, hc_htx, htx_used_space(hc_htx), HTX_BLK_UNUSED);
if (!ret.ret) {
applet_have_more_data(appctx);
goto out;
}
@ -708,6 +711,7 @@ void httpclient_applet_io_handler(struct appctx *appctx)
if (hc->options & HTTPCLIENT_O_RES_HTX) {
/* HTX mode transfers the header to the hc buffer */
struct htx *hc_htx;
struct htx_ret ret;
if (!b_alloc(&hc->res.buf, DB_MUX_TX)) {
applet_wont_consume(appctx);
@ -716,7 +720,8 @@ void httpclient_applet_io_handler(struct appctx *appctx)
hc_htx = htxbuf(&hc->res.buf);
/* xfer the headers */
if (!htx_xfer(hc_htx, htx, htx_used_space(htx), HTX_XFER_HDRS_ONLY)) {
ret = htx_xfer_blks(hc_htx, htx, htx_used_space(htx), HTX_BLK_EOH);
if (!ret.ret) {
applet_need_more_data(appctx);
goto out;
}
@ -806,10 +811,12 @@ void httpclient_applet_io_handler(struct appctx *appctx)
if (hc->options & HTTPCLIENT_O_RES_HTX) {
/* HTX mode transfers the header to the hc buffer */
struct htx *hc_htx;
struct htx_ret ret;
hc_htx = htxbuf(&hc->res.buf);
if (!htx_xfer(hc_htx, htx, htx_used_space(htx), HTX_XFER_DEFAULT))
ret = htx_xfer_blks(hc_htx, htx, htx_used_space(htx), HTX_BLK_UNUSED);
if (!ret.ret)
applet_wont_consume(appctx);
else
applet_fl_clr(appctx, APPCTX_FL_INBLK_FULL);

View File

@ -44,7 +44,7 @@
/* this struct is used between calls to smp_fetch_hdr() or smp_fetch_cookie() */
static THREAD_LOCAL struct http_hdr_ctx static_http_hdr_ctx;
/* this is used to convert raw connection buffers to htx */
/* NOTE: For now, raw buffers cannot exceeds the standard size */
/* NOTE: For now, raw bufers cannot exceeds the standard size */
static THREAD_LOCAL struct buffer static_raw_htx_chunk;
static THREAD_LOCAL char *static_raw_htx_buf;

View File

@ -41,18 +41,17 @@ struct list http_replies_list = LIST_HEAD_INIT(http_replies_list);
/* The declaration of an errorfiles/errorfile directives. Used during config
* parsing only. */
struct conf_errors {
enum http_err_directive directive; /* directive type: inline (errorfile <code> <file>) / section (errorfiles <section>) */
char type; /* directive type (0: errorfiles, 1: errorfile) */
union {
struct {
int status; /* the status code associated to this error */
struct http_reply *reply; /* the http reply for the errorfile */
} inl; /* for HTTP_ERR_DIRECTIVE_INLINE only */
} errorfile; /* describe an "errorfile" directive */
struct {
char *name; /* the http-errors section name */
struct http_errors *resolved; /* resolved section pointer set via proxy_check_http_errors() */
enum http_err_import status[HTTP_ERR_SIZE]; /* list of status to import */
} section; /* for HTTP_ERR_DIRECTIVE_SECTION only */
} type;
char status[HTTP_ERR_SIZE]; /* list of status to import (0: ignore, 1: implicit import, 2: explicit import) */
} errorfiles; /* describe an "errorfiles" directive */
} info;
char *file; /* file where the directive appears */
int line; /* line where the directive appears */
@ -2035,9 +2034,9 @@ static int proxy_parse_errorloc(char **args, int section, struct proxy *curpx,
ret = -1;
goto out;
}
conf_err->directive = HTTP_ERR_DIRECTIVE_INLINE;
conf_err->type.inl.status = status;
conf_err->type.inl.reply = reply;
conf_err->type = 1;
conf_err->info.errorfile.status = status;
conf_err->info.errorfile.reply = reply;
conf_err->file = strdup(file);
conf_err->line = line;
@ -2106,9 +2105,9 @@ static int proxy_parse_errorfile(char **args, int section, struct proxy *curpx,
ret = -1;
goto out;
}
conf_err->directive = HTTP_ERR_DIRECTIVE_INLINE;
conf_err->type.inl.status = status;
conf_err->type.inl.reply = reply;
conf_err->type = 1;
conf_err->info.errorfile.status = status;
conf_err->info.errorfile.reply = reply;
conf_err->file = strdup(file);
conf_err->line = line;
LIST_APPEND(&curpx->conf.errors, &conf_err->list);
@ -2147,12 +2146,12 @@ static int proxy_parse_errorfiles(char **args, int section, struct proxy *curpx,
memprintf(err, "%s : out of memory.", args[0]);
goto error;
}
conf_err->type = 0;
conf_err->directive = HTTP_ERR_DIRECTIVE_SECTION;
conf_err->type.section.name = name;
conf_err->info.errorfiles.name = name;
if (!*(args[2])) {
for (rc = 0; rc < HTTP_ERR_SIZE; rc++)
conf_err->type.section.status[rc] = HTTP_ERR_IMPORT_IMPLICIT;
conf_err->info.errorfiles.status[rc] = 1;
}
else {
int cur_arg, status;
@ -2161,7 +2160,7 @@ static int proxy_parse_errorfiles(char **args, int section, struct proxy *curpx,
for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
if (http_err_codes[rc] == status) {
conf_err->type.section.status[rc] = HTTP_ERR_IMPORT_EXPLICIT;
conf_err->info.errorfiles.status[rc] = 2;
break;
}
}
@ -2232,16 +2231,16 @@ static int proxy_parse_http_error(char **args, int section, struct proxy *curpx,
if (reply->type == HTTP_REPLY_ERRFILES) {
int rc = http_get_status_idx(reply->status);
conf_err->directive = HTTP_ERR_DIRECTIVE_SECTION;
conf_err->type.section.name = reply->body.http_errors;
conf_err->type.section.status[rc] = HTTP_ERR_IMPORT_EXPLICIT;
conf_err->type = 2;
conf_err->info.errorfiles.name = reply->body.http_errors;
conf_err->info.errorfiles.status[rc] = 2;
reply->body.http_errors = NULL;
release_http_reply(reply);
}
else {
conf_err->directive = HTTP_ERR_DIRECTIVE_INLINE;
conf_err->type.inl.status = reply->status;
conf_err->type.inl.reply = reply;
conf_err->type = 1;
conf_err->info.errorfile.status = reply->status;
conf_err->info.errorfile.reply = reply;
LIST_APPEND(&http_replies_list, &reply->list);
}
conf_err->file = strdup(file);
@ -2261,46 +2260,60 @@ static int proxy_parse_http_error(char **args, int section, struct proxy *curpx,
}
/* Converts <conf_errors> initialized during config parsing for <px> proxy.
* Each one of them is transformed in a http_reply type, stored in proxy
* replies array member. The original <conf_errors> becomes unneeded and is
* thus removed and freed.
*/
static int proxy_finalize_http_errors(struct proxy *px)
/* Check "errorfiles" proxy keyword */
static int proxy_check_errors(struct proxy *px)
{
struct conf_errors *conf_err, *conf_err_back;
struct http_errors *http_errs;
int rc;
int rc, err = ERR_NONE;
list_for_each_entry_safe(conf_err, conf_err_back, &px->conf.errors, list) {
switch (conf_err->directive) {
case HTTP_ERR_DIRECTIVE_INLINE:
rc = http_get_status_idx(conf_err->type.inl.status);
px->replies[rc] = conf_err->type.inl.reply;
if (conf_err->type == 1) {
/* errorfile */
rc = http_get_status_idx(conf_err->info.errorfile.status);
px->replies[rc] = conf_err->info.errorfile.reply;
/* For proxy, to rely on default replies, just don't reference a reply */
if (px->replies[rc]->type == HTTP_REPLY_ERRMSG && !px->replies[rc]->body.errmsg)
px->replies[rc] = NULL;
break;
}
else {
/* errorfiles */
list_for_each_entry(http_errs, &http_errors_list, list) {
if (strcmp(http_errs->id, conf_err->info.errorfiles.name) == 0)
break;
}
case HTTP_ERR_DIRECTIVE_SECTION:
http_errs = conf_err->type.section.resolved;
if (http_errs) {
for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
if (conf_err->type.section.status[rc] == HTTP_ERR_IMPORT_NO)
continue;
/* unknown http-errors section */
if (&http_errs->list == &http_errors_list) {
ha_alert("proxy '%s': unknown http-errors section '%s' (at %s:%d).\n",
px->id, conf_err->info.errorfiles.name, conf_err->file, conf_err->line);
err |= ERR_ALERT | ERR_FATAL;
free(conf_err->info.errorfiles.name);
goto next;
}
free(conf_err->info.errorfiles.name);
for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
if (conf_err->info.errorfiles.status[rc] > 0) {
if (http_errs->replies[rc])
px->replies[rc] = http_errs->replies[rc];
else if (conf_err->info.errorfiles.status[rc] == 2)
ha_warning("config: proxy '%s' : status '%d' not declared in"
" http-errors section '%s' (at %s:%d).\n",
px->id, http_err_codes[rc], http_errs->id,
conf_err->file, conf_err->line);
}
}
}
next:
LIST_DELETE(&conf_err->list);
free(conf_err->file);
free(conf_err);
}
return ERR_NONE;
out:
return err;
}
static int post_check_errors()
@ -2330,55 +2343,6 @@ static int post_check_errors()
return err_code;
}
/* Checks the validity of conf_errors stored in <px> proxy after the
* configuration is completely parsed.
*
* Returns ERR_NONE on success and a combination of ERR_CODE on failure.
*/
int proxy_check_http_errors(struct proxy *px)
{
struct http_errors *http_errs;
struct conf_errors *conf_err;
int section_found;
int rc, err = ERR_NONE;
list_for_each_entry(conf_err, &px->conf.errors, list) {
if (conf_err->directive == HTTP_ERR_DIRECTIVE_SECTION) {
section_found = 0;
list_for_each_entry(http_errs, &http_errors_list, list) {
if (strcmp(http_errs->id, conf_err->type.section.name) == 0) {
section_found = 1;
break;
}
}
if (!section_found) {
ha_alert("proxy '%s': unknown http-errors section '%s' (at %s:%d).\n",
px->id, conf_err->type.section.name, conf_err->file, conf_err->line);
ha_free(&conf_err->type.section.name);
err |= ERR_ALERT | ERR_FATAL;
continue;
}
conf_err->type.section.resolved = http_errs;
ha_free(&conf_err->type.section.name);
for (rc = 0; rc < HTTP_ERR_SIZE; rc++) {
if (conf_err->type.section.status[rc] == HTTP_ERR_IMPORT_EXPLICIT &&
!http_errs->replies[rc]) {
ha_warning("config: proxy '%s' : status '%d' not declared in"
" http-errors section '%s' (at %s:%d).\n",
px->id, http_err_codes[rc], http_errs->id,
conf_err->file, conf_err->line);
err |= ERR_WARN;
}
}
}
}
return err;
}
int proxy_dup_default_conf_errors(struct proxy *curpx, const struct proxy *defpx, char **errmsg)
{
struct conf_errors *conf_err, *new_conf_err = NULL;
@ -2390,22 +2354,19 @@ int proxy_dup_default_conf_errors(struct proxy *curpx, const struct proxy *defpx
memprintf(errmsg, "unable to duplicate default errors (out of memory).");
goto out;
}
new_conf_err->directive = conf_err->directive;
switch (conf_err->directive) {
case HTTP_ERR_DIRECTIVE_INLINE:
new_conf_err->type.inl.status = conf_err->type.inl.status;
new_conf_err->type.inl.reply = conf_err->type.inl.reply;
break;
case HTTP_ERR_DIRECTIVE_SECTION:
new_conf_err->type.section.name = strdup(conf_err->type.section.name);
if (!new_conf_err->type.section.name) {
new_conf_err->type = conf_err->type;
if (conf_err->type == 1) {
new_conf_err->info.errorfile.status = conf_err->info.errorfile.status;
new_conf_err->info.errorfile.reply = conf_err->info.errorfile.reply;
}
else {
new_conf_err->info.errorfiles.name = strdup(conf_err->info.errorfiles.name);
if (!new_conf_err->info.errorfiles.name) {
memprintf(errmsg, "unable to duplicate default errors (out of memory).");
goto out;
}
memcpy(&new_conf_err->type.section.status, &conf_err->type.section.status,
sizeof(conf_err->type.section.status));
break;
memcpy(&new_conf_err->info.errorfiles.status, &conf_err->info.errorfiles.status,
sizeof(conf_err->info.errorfiles.status));
}
new_conf_err->file = strdup(conf_err->file);
new_conf_err->line = conf_err->line;
@ -2424,8 +2385,8 @@ void proxy_release_conf_errors(struct proxy *px)
struct conf_errors *conf_err, *conf_err_back;
list_for_each_entry_safe(conf_err, conf_err_back, &px->conf.errors, list) {
if (conf_err->directive == HTTP_ERR_DIRECTIVE_SECTION)
free(conf_err->type.section.name);
if (conf_err->type == 0)
free(conf_err->info.errorfiles.name);
LIST_DELETE(&conf_err->list);
free(conf_err->file);
free(conf_err);
@ -2544,7 +2505,7 @@ static struct cfg_kw_list cfg_kws = {ILH, {
}};
INITCALL1(STG_REGISTER, cfg_register_keywords, &cfg_kws);
REGISTER_POST_PROXY_CHECK(proxy_finalize_http_errors);
REGISTER_POST_PROXY_CHECK(proxy_check_errors);
REGISTER_POST_CHECK(post_check_errors);
REGISTER_CONFIG_SECTION("http-errors", cfg_parse_http_errors, NULL);

219
src/htx.c
View File

@ -11,7 +11,6 @@
*/
#include <haproxy/chunk.h>
#include <haproxy/dynbuf.h>
#include <haproxy/global.h>
#include <haproxy/htx.h>
#include <haproxy/net_helper.h>
@ -720,163 +719,10 @@ struct htx_blk *htx_replace_blk_value(struct htx *htx, struct htx_blk *blk,
return blk;
}
/* Transfer HTX blocks from <src> to <dst>, stopping if <count> bytes were
* transferred (including payload and meta-data). It returns the number of bytes
* copied. By default, copied blocks are removed from <src> and only full
* headers and trailers part can be moved. <flags> can be set to change the
* default behavior:
* - HTX_XFER_KEEP_SRC_BLKS: source blocks are not removed
* - HTX_XFER_PARTIAL_HDRS_COPY: partial headers and trailers part can be xferred
* - HTX_XFER_HDRS_ONLY: Only the headers part is xferred
*/
size_t htx_xfer(struct htx *dst, struct htx *src, size_t count, unsigned int flags)
{
struct htx_blk *blk, *last_dstblk;
size_t ret = 0;
uint32_t max, last_dstblk_sz;
int dst_full = 0;
last_dstblk = NULL;
last_dstblk_sz = 0;
for (blk = htx_get_head_blk(src); blk && count; blk = htx_get_next_blk(src, blk)) {
struct ist v;
enum htx_blk_type type;
uint32_t sz;
/* Ignore unused block */
type = htx_get_blk_type(blk);
if (type == HTX_BLK_UNUSED)
continue;
if ((flags & HTX_XFER_HDRS_ONLY) &&
type != HTX_BLK_REQ_SL && type != HTX_BLK_RES_SL &&
type != HTX_BLK_HDR && type != HTX_BLK_EOH)
break;
max = htx_get_max_blksz(dst, count);
if (!max)
break;
sz = htx_get_blksz(blk);
switch (type) {
case HTX_BLK_DATA:
v = htx_get_blk_value(src, blk);
if (v.len > max)
v.len = max;
v.len = htx_add_data(dst, v);
if (!v.len) {
dst_full = 1;
goto stop;
}
last_dstblk = htx_get_tail_blk(dst);
last_dstblk_sz = v.len;
count -= sizeof(*blk) + v.len;
ret += sizeof(*blk) + v.len;
if (v.len != sz) {
dst_full = 1;
goto stop;
}
break;
default:
if (sz > max) {
dst_full = 1;
goto stop;
}
last_dstblk = htx_add_blk(dst, type, sz);
if (!last_dstblk) {
dst_full = 1;
goto stop;
}
last_dstblk->info = blk->info;
htx_memcpy(htx_get_blk_ptr(dst, last_dstblk), htx_get_blk_ptr(src, blk), sz);
last_dstblk_sz = sz;
count -= sizeof(*blk) + sz;
ret += sizeof(*blk) + sz;
break;
}
last_dstblk = NULL; /* Reset last_dstblk because it was fully copied */
last_dstblk_sz = 0;
}
stop:
/* Here, if not NULL, <blk> point on the first not fully copied block in
* <src>. And <last_dstblk>, if defined, is the last not fully copied
* block in <dst>. So have:
* - <blk> == NULL: everything was copied. <last_dstblk> must be NULL
* - <blk> != NULL && <last_dstblk> == NULL: partial copy but the last block was fully copied
* - <blk> != NULL && <last_dstblk> != NULL: partial copy and the last block was partially copied (DATA block only)
*/
if (!(flags & HTX_XFER_PARTIAL_HDRS_COPY)) {
/* Partial headers/trailers copy is not supported */
struct htx_blk *dstblk;
enum htx_blk_type type = HTX_BLK_UNUSED;
dstblk = htx_get_tail_blk(dst);
if (dstblk)
type = htx_get_blk_type(dstblk);
/* the last copied block is a start-line, a header or a trailer */
if (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL || type == HTX_BLK_HDR || type == HTX_BLK_TLR) {
/* <src > cannot have partial headers or trailers part */
BUG_ON(blk == NULL);
/* Remove partial headers/trailers from <dst> and rollback on <str> to not remove them later */
while (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL || type == HTX_BLK_HDR || type == HTX_BLK_TLR) {
BUG_ON(type != htx_get_blk_type(blk));
ret -= sizeof(*blk) + htx_get_blksz(blk);
htx_remove_blk(dst, dstblk);
dstblk = htx_get_tail_blk(dst);
blk = htx_get_prev_blk(src, blk);
if (!dstblk)
break;
type = htx_get_blk_type(dstblk);
}
/* Report if the xfer was interrupted because <dst> was
* full but is was originally empty
*/
if (dst_full && htx_is_empty(dst))
src->flags |= HTX_FL_PARSING_ERROR;
}
}
if (!(flags & HTX_XFER_KEEP_SRC_BLKS)) {
/* True xfer performed, remove copied block from <src> */
struct htx_blk *blk2;
/* Remove all fully copied blocks */
if (!blk)
htx_drain(src, src->data);
else {
for (blk2 = htx_get_head_blk(src); blk2 && blk2 != blk; blk2 = htx_remove_blk(src, blk2));
/* If copy was stopped on a DATA block and the last destination
* block is not NULL, it means a partial copy was performed. So
* cut the source block accordingly
*/
if (last_dstblk && blk2 && htx_get_blk_type(blk2) == HTX_BLK_DATA) {
htx_cut_data_blk(src, blk2, last_dstblk_sz);
}
}
}
/* Everything was copied, transfer terminal HTX flags too */
if (!blk) {
dst->flags |= (src->flags & (HTX_FL_EOM|HTX_FL_PARSING_ERROR|HTX_FL_PROCESSING_ERROR));
src->flags = 0;
}
return ret;
}
/* Transfer HTX blocks from <src> to <dst>, stopping once the first block of the
* type <mark> is transferred (typically EOH or EOT) or when <count> bytes were
* moved (including payload and meta-data). It returns the number of bytes moved
* and the last HTX block inserted in <dst>.
*
* DEPRECATED
*/
struct htx_ret htx_xfer_blks(struct htx *dst, struct htx *src, uint32_t count,
enum htx_blk_type mark)
@ -1335,68 +1181,3 @@ int htx_append_msg(struct htx *dst, const struct htx *src)
htx_truncate(dst, offset);
return 0;
}
/* If possible, transfer HTX blocks from <src> to a small buffer. This function
* allocate the small buffer and makes <dst> point on it. If <dst> is not empty
* or if <src> contains to many data, NULL is returned. If the allocation
* failed, NULL is returned. Otherwise <dst> is returned. <flags> instructs how
* the transfer must be performed.
*/
struct buffer *__htx_xfer_to_small_buffer(struct buffer *dst, struct buffer *src, unsigned int flags)
{
struct htx *dst_htx;
struct htx *src_htx = htxbuf(src);
size_t sz = (sizeof(struct htx) + htx_used_space(src_htx));
if (dst->size || sz > global.tune.bufsize_small || !b_alloc_small(dst))
return NULL;
dst_htx = htx_from_buf(dst);
htx_xfer(dst_htx, src_htx, src_htx->size, flags);
htx_to_buf(dst_htx, dst);
return dst;
}
/* If possible, transfer HTX blocks from <src> to a large buffer. This function
* allocate the small buffer and makes <dst> point on it. If <dst> is not empty
* or if <src> contains to many data, NULL is returned. If the allocation
* failed, NULL is returned. Otherwise <dst> is returned. <flags> instructs how
* the transfer must be performed.
*/
struct buffer *__htx_xfer_to_large_buffer(struct buffer *dst, struct buffer *src, unsigned int flags)
{
struct htx *dst_htx;
struct htx *src_htx = htxbuf(src);
size_t sz = (sizeof(struct htx) + htx_used_space(src_htx));
if (dst->size || sz > global.tune.bufsize_large || !b_alloc_large(dst))
return NULL;
dst_htx = htx_from_buf(dst);
htx_xfer(dst_htx, src_htx, src_htx->size, flags);
htx_to_buf(dst_htx, dst);
return dst;
}
/* Move HTX blocks from <src> to <dst>. Relies on __htx_xfer_to_small_buffer() */
struct buffer *htx_move_to_small_buffer(struct buffer *dst, struct buffer *src)
{
return __htx_xfer_to_small_buffer(dst, src, HTX_XFER_DEFAULT);
}
/* Move HTX blocks from <src> to <dst>. Relies on __htx_xfer_to_large_buffer() */
struct buffer *htx_move_to_large_buffer(struct buffer *dst, struct buffer *src)
{
return __htx_xfer_to_large_buffer(dst, src, HTX_XFER_DEFAULT);
}
/* Copy HTX blocks from <src> to <dst>. Relies on __htx_xfer_to_small_buffer() */
struct buffer *htx_copy_to_small_buffer(struct buffer *dst, struct buffer *src)
{
return __htx_xfer_to_small_buffer(dst, src, HTX_XFER_KEEP_SRC_BLKS);
}
/* Copy HTX blocks from <src> to <dst>. Relies on __htx_xfer_to_large_buffer() */
struct buffer *htx_copy_to_large_buffer(struct buffer *dst, struct buffer *src)
{
return __htx_xfer_to_large_buffer(dst, src, HTX_XFER_KEEP_SRC_BLKS);
}

View File

@ -424,7 +424,7 @@ end:
* the one found in the JWE token.
* The tag is built out of a HMAC of some concatenated data taken from the JWE
* token (see https://datatracker.ietf.org/doc/html/rfc7518#section-5.2). The
* first half of the previously decrypted cek is used as HMAC key.
* firest half of the previously decrypted cek is used as HMAC key.
* Returns 0 in case of success, 1 otherwise.
*/
static int build_and_check_tag(jwe_enc enc, struct jwt_item items[JWE_ELT_MAX],
@ -602,7 +602,7 @@ static inline void clear_decoded_items(struct buffer *decoded_items[JWE_ELT_MAX]
/*
* Decrypt the contents of a JWE token thanks to the user-provided base64
* encoded secret. This converter can only be used for tokens that have a
* symmetric algorithm (AESKW, AESGCMKW or "dir" special case).
* symetric algorithm (AESKW, AESGCMKW or "dir" special case).
* Returns the decrypted contents, or nothing if any error happened.
*/
static int sample_conv_jwt_decrypt_secret(const struct arg *args, struct sample *smp, void *private)
@ -1096,7 +1096,7 @@ end:
/*
* Decrypt the contents of a JWE token thanks to the user-provided certificate
* and private key. This converter can only be used for tokens that have an
* asymmetric algorithm (RSA only for now).
* asymetric algorithm (RSA only for now).
* Returns the decrypted contents, or nothing if any error happened.
*/
static int sample_conv_jwt_decrypt_cert(const struct arg *args, struct sample *smp, void *private)
@ -1173,7 +1173,7 @@ static int sample_conv_jwt_decrypt_cert(const struct arg *args, struct sample *s
/* With ECDH-ES no CEK will be provided. */
if (!ec || alg != JWE_ALG_ECDH_ES) {
/* With asymmetric crypto algorithms we should always have a CEK */
/* With asymetric crypto algorithms we should always have a CEK */
if (!items[JWE_ELT_CEK].length)
goto end;

View File

@ -2631,16 +2631,6 @@ static int bind_parse_proto(char **args, int cur_arg, struct proxy *px, struct b
memprintf(err, "'%s' : unknown MUX protocol '%s'", args[cur_arg], args[cur_arg+1]);
return ERR_ALERT | ERR_FATAL;
}
if (conf->mux_proto->mux->flags & MX_FL_EXPERIMENTAL) {
if (!experimental_directives_allowed) {
memprintf(err, "'%s' : '%s' protocol is experimental, must be allowed via a global 'expose-experimental-directives'.",
args[cur_arg], args[cur_arg + 1]);
return ERR_ALERT | ERR_FATAL;
}
mark_tainted(TAINTED_CONFIG_EXP_KW_DECLARED);
}
return 0;
}

137
src/log.c
View File

@ -334,23 +334,6 @@ char default_tcp_log_format[] = "%ci:%cp [%t] %ft %b/%s %Tw/%Tc/%Tt %B %ts %ac/%
char clf_tcp_log_format[] = "%{+Q}o %{-Q}ci - - [%T] \"TCP \" 000 %B \"\" \"\" %cp %ms %ft %b %s %Th %Tw %Tc %Tt %U %ts-- %ac %fc %bc %sc %rc %sq %bq \"\" \"\" ";
char *log_format = NULL;
char keylog_format_bc[] = "CLIENT_EARLY_TRAFFIC_SECRET %[ssl_bc_client_random,hex] %[ssl_bc_client_early_traffic_secret]\n"
"CLIENT_HANDSHAKE_TRAFFIC_SECRET %[ssl_bc_client_random,hex] %[ssl_bc_client_handshake_traffic_secret]\n"
"SERVER_HANDSHAKE_TRAFFIC_SECRET %[ssl_bc_client_random,hex] %[ssl_bc_server_handshake_traffic_secret]\n"
"CLIENT_TRAFFIC_SECRET_0 %[ssl_bc_client_random,hex] %[ssl_bc_client_traffic_secret_0]\n"
"SERVER_TRAFFIC_SECRET_0 %[ssl_bc_client_random,hex] %[ssl_bc_server_traffic_secret_0]\n"
"EXPORTER_SECRET %[ssl_bc_client_random,hex] %[ssl_bc_exporter_secret]\n"
"EARLY_EXPORTER_SECRET %[ssl_bc_client_random,hex] %[ssl_bc_early_exporter_secret]";
char keylog_format_fc[] = "CLIENT_EARLY_TRAFFIC_SECRET %[ssl_fc_client_random,hex] %[ssl_fc_client_early_traffic_secret]\n"
"CLIENT_HANDSHAKE_TRAFFIC_SECRET %[ssl_fc_client_random,hex] %[ssl_fc_client_handshake_traffic_secret]\n"
"SERVER_HANDSHAKE_TRAFFIC_SECRET %[ssl_fc_client_random,hex] %[ssl_fc_server_handshake_traffic_secret]\n"
"CLIENT_TRAFFIC_SECRET_0 %[ssl_fc_client_random,hex] %[ssl_fc_client_traffic_secret_0]\n"
"SERVER_TRAFFIC_SECRET_0 %[ssl_fc_client_random,hex] %[ssl_fc_server_traffic_secret_0]\n"
"EXPORTER_SECRET %[ssl_fc_client_random,hex] %[ssl_fc_exporter_secret]\n"
"EARLY_EXPORTER_SECRET %[ssl_fc_client_random,hex] %[ssl_fc_early_exporter_secret]";
/* Default string used for structured-data part in RFC5424 formatted
* syslog messages.
*/
@ -368,9 +351,7 @@ static inline int logformat_str_isdefault(const char *str)
str == clf_http_log_format ||
str == default_tcp_log_format ||
str == clf_tcp_log_format ||
str == default_rfc5424_sd_log_format ||
str == keylog_format_bc ||
str == keylog_format_fc;
str == default_rfc5424_sd_log_format;
}
/* free logformat str if it is not a default (static) one */
@ -2932,7 +2913,6 @@ static inline void __send_log_set_metadata_sd(struct ist *metadata, char *sd, si
struct process_send_log_ctx {
struct session *sess;
struct stream *stream;
struct log_profile *profile;
struct log_orig origin;
};
@ -2962,10 +2942,6 @@ static inline void _process_send_log_override(struct process_send_log_ctx *ctx,
enum log_orig_id orig = (ctx) ? ctx->origin.id : LOG_ORIG_UNSPEC;
uint16_t orig_fl = (ctx) ? ctx->origin.flags : LOG_ORIG_FL_NONE;
/* ctx->profile gets priority over logger profile */
if (ctx && ctx->profile)
prof = ctx->profile;
BUG_ON(!prof);
if (!b_is_null(&prof->log_tag))
@ -3119,8 +3095,8 @@ static void process_send_log(struct process_send_log_ctx *ctx,
nblogger += 1;
/* caller or default logger may use a profile to override a few things */
if (unlikely(logger->prof || (ctx && ctx->profile)))
/* logger may use a profile to override a few things */
if (unlikely(logger->prof))
_process_send_log_override(ctx, logger, hdr, message, size, nblogger);
else
_process_send_log_final(logger, hdr, message, size, nblogger);
@ -5224,11 +5200,17 @@ out:
}
static void do_log_ctx(struct process_send_log_ctx *ctx)
/*
* opportunistic log when at least the session is known to exist
* <s> may be NULL
*
* Will not log if the frontend has no log defined. By default it will
* try to emit the log as INFO, unless the stream already exists and
* set-log-level was used.
*/
void do_log(struct session *sess, struct stream *s, struct log_orig origin)
{
struct stream *s = ctx->stream;
struct session *sess = ctx->sess;
struct log_orig origin = ctx->origin;
struct process_send_log_ctx ctx;
int size;
int sd_size = 0;
int level = -1;
@ -5260,27 +5242,11 @@ static void do_log_ctx(struct process_send_log_ctx *ctx)
size = sess_build_logline_orig(sess, s, logline, global.max_syslog_len, &sess->fe->logformat, origin);
__send_log(ctx, &sess->fe->loggers, &sess->fe->log_tag, level,
logline, size, logline_rfc5424, sd_size);
}
/*
* opportunistic log when at least the session is known to exist
* <s> may be NULL
*
* Will not log if the frontend has no log defined. By default it will
* try to emit the log as INFO, unless the stream already exists and
* set-log-level was used.
*/
void do_log(struct session *sess, struct stream *s, struct log_orig origin)
{
struct process_send_log_ctx ctx;
ctx.origin = origin;
ctx.sess = sess;
ctx.stream = s;
ctx.profile = NULL;
do_log_ctx(&ctx);
__send_log(&ctx, &sess->fe->loggers, &sess->fe->log_tag, level,
logline, size, logline_rfc5424, sd_size);
}
/*
@ -5331,7 +5297,6 @@ void strm_log(struct stream *s, struct log_orig origin)
ctx.origin = origin;
ctx.sess = sess;
ctx.stream = s;
ctx.profile = NULL;
__send_log(&ctx, &sess->fe->loggers, &sess->fe->log_tag, level,
logline, size, logline_rfc5424, sd_size);
s->logs.logwait = 0;
@ -5399,7 +5364,6 @@ void _sess_log(struct session *sess, int embryonic)
ctx.origin = orig;
ctx.sess = sess;
ctx.stream = NULL;
ctx.profile = NULL;
__send_log(&ctx, &sess->fe->loggers,
&sess->fe->log_tag, level,
logline, size, logline_rfc5424, sd_size);
@ -6946,87 +6910,24 @@ static int px_parse_log_steps(char **args, int section_type, struct proxy *curpx
static enum act_return do_log_action(struct act_rule *rule, struct proxy *px,
struct session *sess, struct stream *s, int flags)
{
struct process_send_log_ctx ctx;
/* do_log() expects valid session pointer */
BUG_ON(sess == NULL);
ctx.origin = log_orig(rule->arg.do_log.orig, LOG_ORIG_FL_NONE);
ctx.sess = sess;
ctx.stream = s;
ctx.profile = rule->arg.do_log.profile;
do_log_ctx(&ctx);
do_log(sess, s, log_orig(rule->arg.expr_int.value, LOG_ORIG_FL_NONE));
return ACT_RET_CONT;
}
static int do_log_action_check(struct act_rule *rule, struct proxy *px, char **err)
{
if (rule->arg.do_log.profile_name) {
struct log_profile *prof;
prof = log_profile_find_by_name(rule->arg.do_log.profile_name);
if (!prof) {
memprintf(err, "do-log action: profile '%s' is invalid", rule->arg.do_log.profile_name);
ha_free(&rule->arg.do_log.profile_name);
return 0;
}
ha_free(&rule->arg.do_log.profile_name);
if (!log_profile_postcheck(px, prof, err)) {
memprintf(err, "do-log action on %s %s uses incompatible log-profile '%s': %s", proxy_type_str(px), px->id, prof->id, *err);
return 0;
}
rule->arg.do_log.profile = prof;
}
return 1; // success
}
static void do_log_action_release(struct act_rule *rule)
{
ha_free(&rule->arg.do_log.profile_name);
}
/* Parse a "do_log" action. It takes optional "log-profile" argument to
* specifically use a given log-profile when generating the log message
*
/* Parse a "do_log" action. It doesn't take any argument
* May be used from places where per-context actions are usually registered
*/
enum act_parse_ret do_log_parse_act(enum log_orig_id id,
const char **args, int *orig_arg, struct proxy *px,
struct act_rule *rule, char **err)
{
int cur_arg = *orig_arg;
rule->action_ptr = do_log_action;
rule->action = ACT_CUSTOM;
rule->check_ptr = do_log_action_check;
rule->release_ptr = do_log_action_release;
rule->arg.do_log.orig = id;
while (*args[*orig_arg]) {
if (!strcmp(args[*orig_arg], "profile")) {
if (!*args[*orig_arg + 1]) {
memprintf(err,
"action '%s': 'profile' expects argument.",
args[cur_arg-1]);
return ACT_RET_PRS_ERR;
}
rule->arg.do_log.profile_name = strdup(args[*orig_arg + 1]);
if (!rule->arg.do_log.profile_name) {
memprintf(err,
"action '%s': memory error when setting 'profile'",
args[cur_arg-1]);
return ACT_RET_PRS_ERR;
}
*orig_arg += 2;
}
else
break;
}
rule->release_ptr = NULL;
rule->arg.expr_int.value = id;
return ACT_RET_PRS_OK;
}

View File

@ -621,9 +621,6 @@ static int cli_parse_get_map(char **args, char *payload, struct appctx *appctx,
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0 || strcmp(args[1], "acl") == 0) {
/* Set flags. */
if (args[1][0] == 'm')
@ -667,9 +664,6 @@ static int cli_parse_prepare_map(char **args, char *payload, struct appctx *appc
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0 ||
strcmp(args[1], "acl") == 0) {
uint next_gen;
@ -712,9 +706,6 @@ static int cli_parse_show_map(char **args, char *payload, struct appctx *appctx,
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0 ||
strcmp(args[1], "acl") == 0) {
const char *gen = NULL;
@ -769,9 +760,6 @@ static int cli_parse_set_map(char **args, char *payload, struct appctx *appctx,
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0) {
char *err;
@ -844,9 +832,6 @@ static int cli_parse_add_map(char **args, char *payload, struct appctx *appctx,
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0 ||
strcmp(args[1], "acl") == 0) {
const char *gen = NULL;
@ -977,9 +962,6 @@ static int cli_parse_del_map(char **args, char *payload, struct appctx *appctx,
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (args[1][0] == 'm')
ctx->display_flags = PAT_REF_MAP;
else
@ -1075,9 +1057,6 @@ static int cli_parse_clear_map(char **args, char *payload, struct appctx *appctx
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0 || strcmp(args[1], "acl") == 0) {
const char *gen = NULL;
@ -1134,9 +1113,6 @@ static int cli_parse_commit_map(char **args, char *payload, struct appctx *appct
{
struct show_map_ctx *ctx = applet_reserve_svcctx(appctx, sizeof(*ctx));
if (!cli_has_level(appctx, ACCESS_LVL_ADMIN))
return 1;
if (strcmp(args[1], "map") == 0 || strcmp(args[1], "acl") == 0) {
const char *gen = NULL;
uint genid;

View File

@ -4672,7 +4672,6 @@ static void h1_detach(struct sedesc *sd)
if (h1c->state == H1_CS_RUNNING && !(h1c->flags & H1C_F_IS_BACK) && h1s->req.state != H1_MSG_DONE) {
h1c->state = H1_CS_DRAINING;
h1c->flags &= ~H1C_F_WANT_FASTFWD;
h1c_report_term_evt(h1c, muxc_tevt_type_graceful_shut);
COUNT_IF(1, "Deferring H1S destroy to drain message");
TRACE_DEVEL("Deferring H1S destroy to drain message", H1_EV_STRM_END, h1s->h1c->conn, h1s);
@ -4886,14 +4885,8 @@ static size_t h1_snd_buf(struct stconn *sc, struct buffer *buf, size_t count, in
/* Inherit some flags from the upper layer */
h1c->flags &= ~(H1C_F_CO_MSG_MORE|H1C_F_CO_STREAMER);
if (flags & CO_SFL_MSG_MORE) {
/* Don't set H1C_F_CO_MSG_MORE when sending a bodyless response to client.
* We must do that if the response is not finished, regardless it a bodyless
* response, to be sure to send it ASAP.
*/
if ((h1c->flags & H1C_F_IS_BACK) || !(h1s->flags & H1S_F_BODYLESS_RESP))
h1c->flags |= H1C_F_CO_MSG_MORE;
}
if (flags & CO_SFL_MSG_MORE)
h1c->flags |= H1C_F_CO_MSG_MORE;
if (flags & CO_SFL_STREAMER)
h1c->flags |= H1C_F_CO_STREAMER;

View File

@ -489,9 +489,6 @@ static int h2_be_glitches_threshold = 0; /* backend's max glitches
static int h2_fe_glitches_threshold = 0; /* frontend's max glitches: unlimited */
static uint h2_be_rxbuf = 0; /* backend's default total rxbuf (bytes) */
static uint h2_fe_rxbuf = 0; /* frontend's default total rxbuf (bytes) */
static unsigned int h2_be_max_frames_at_once = 0; /* backend value: 0=no limit */
static unsigned int h2_fe_max_frames_at_once = 0; /* frontend value: 0=no limit */
static unsigned int h2_fe_max_rst_at_once = 0; /* frontend value: 0=no limit */
static unsigned int h2_settings_max_concurrent_streams = 100; /* default value */
static unsigned int h2_be_settings_max_concurrent_streams = 0; /* backend value */
static unsigned int h2_fe_settings_max_concurrent_streams = 0; /* frontend value */
@ -3453,7 +3450,6 @@ static int h2c_handle_priority(struct h2c *h2c)
static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
{
int rst_code;
int ret = 1;
TRACE_ENTER(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
@ -3482,16 +3478,6 @@ static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
if (h2s_sc(h2s)) {
se_fl_set_error(h2s->sd);
if (unlikely(!se_fl_test(h2s->sd, SE_FL_APP_STARTED))) {
/* the application layer has not yet started to read! */
TRACE_STATE("received early RST_STREAM", H2_EV_RX_FRAME|H2_EV_RX_RST, h2c->conn);
if (h2c_report_glitch(h2c, 1, "received early RST_STREAM, attack suspected")) {
TRACE_DEVEL("too many glitches, leaving on error", H2_EV_RX_FRAME|H2_EV_RX_RST, h2c->conn, h2s);
ret = 0; // report the error
}
}
se_report_term_evt(h2s->sd, se_tevt_type_rst_rcvd);
if (!h2s->sd->abort_info.info) {
h2s->sd->abort_info.info = (SE_ABRT_SRC_MUX_H2 << SE_ABRT_SRC_SHIFT);
@ -3502,7 +3488,7 @@ static int h2c_handle_rst_stream(struct h2c *h2c, struct h2s *h2s)
h2s->flags |= H2_SF_RST_RCVD;
TRACE_LEAVE(H2_EV_RX_FRAME|H2_EV_RX_RST|H2_EV_RX_EOI, h2c->conn, h2s);
return ret;
return 1;
}
/* processes a HEADERS frame. Returns h2s on success or NULL on missing data.
@ -4253,8 +4239,6 @@ static void h2_process_demux(struct h2c *h2c)
struct h2_fh hdr;
unsigned int padlen = 0;
int32_t old_iw = h2c->miw;
uint frames_budget = 0;
uint rst_budget = 0;
TRACE_ENTER(H2_EV_H2C_WAKE, h2c->conn);
@ -4343,14 +4327,6 @@ static void h2_process_demux(struct h2c *h2c)
}
}
if (h2c->flags & H2_CF_IS_BACK) {
frames_budget = h2_be_max_frames_at_once;
}
else {
frames_budget = h2_fe_max_frames_at_once;
rst_budget = h2_fe_max_rst_at_once;
}
/* process as many incoming frames as possible below */
while (1) {
int ret = 0;
@ -4653,29 +4629,6 @@ static void h2_process_demux(struct h2c *h2c)
h2c->st0 = H2_CS_FRAME_H;
}
}
/* If more frames remain in the buffer, let's first check if we've
* depleted the frames processing budget. Consuming the RST budget
* makes the tasklet go to TL_BULK to make it less priority than
* other processing since it's often used by attacks, while other
* frame types just yield normally.
*/
if (b_data(&h2c->dbuf)) {
if (h2c->dft == H2_FT_RST_STREAM && (rst_budget && !--rst_budget)) {
/* we've consumed all RST frames permitted by
* the budget, we have to yield now.
*/
tasklet_wakeup(h2c->wait_event.tasklet, 0);
break;
}
else if ((frames_budget && !--frames_budget)) {
/* we've consumed all frames permitted by the
* budget, we have to yield now.
*/
tasklet_wakeup(h2c->wait_event.tasklet);
break;
}
}
}
if (h2c_update_strm_rx_win(h2c) &&
@ -7877,6 +7830,7 @@ static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, in
struct htx *h2s_htx = NULL;
struct htx *buf_htx = NULL;
struct buffer *rxbuf = NULL;
struct htx_ret htxret;
size_t ret = 0;
uint prev_h2c_flags = h2c->flags;
unsigned long long prev_body_len = h2s->body_len;
@ -7911,7 +7865,17 @@ static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, in
goto end;
}
count -= htx_xfer(buf_htx, h2s_htx, count, HTX_XFER_DEFAULT);
htxret = htx_xfer_blks(buf_htx, h2s_htx, count, HTX_BLK_UNUSED);
count -= htxret.ret;
if (h2s_htx->flags & HTX_FL_PARSING_ERROR) {
buf_htx->flags |= HTX_FL_PARSING_ERROR;
if (htx_is_empty(buf_htx))
se_fl_set(h2s->sd, SE_FL_EOI);
}
else if (htx_is_empty(h2s_htx)) {
buf_htx->flags |= (h2s_htx->flags & HTX_FL_EOM);
}
htx_to_buf(buf_htx, buf);
htx_to_buf(h2s_htx, rxbuf);
@ -7940,7 +7904,13 @@ static size_t h2_rcv_buf(struct stconn *sc, struct buffer *buf, size_t count, in
/* tell the stream layer whether there are data left or not */
if (h2s_rxbuf_cnt(h2s)) {
/* Note that parsing errors can also arrive here, we may need
* to propagate errors upstream otherwise no new activity will
* unblock them.
*/
se_fl_set(h2s->sd, SE_FL_RCV_MORE | SE_FL_WANT_ROOM);
if (h2s_htx && h2s_htx->flags & HTX_FL_PARSING_ERROR)
h2s_propagate_term_flags(h2c, h2s);
BUG_ON_HOT(!buf->data);
}
else {
@ -8830,30 +8800,6 @@ static int h2_parse_max_total_streams(char **args, int section_type, struct prox
return 0;
}
/* config parser for global "tune.h2.{be.,fe.,}max-{frames,rst}-at-once" */
static int h2_parse_max_frames_at_once(char **args, int section_type, struct proxy *curpx,
const struct proxy *defpx, const char *file, int line,
char **err)
{
uint *vptr;
/* backend/frontend/default */
if (strcmp(args[0], "tune.h2.be.max-frames-at-once") == 0)
vptr = &h2_be_max_frames_at_once;
else if (strcmp(args[0], "tune.h2.fe.max-frames-at-once") == 0)
vptr = &h2_fe_max_frames_at_once;
else if (strcmp(args[0], "tune.h2.fe.max-rst-at-once") == 0)
vptr = &h2_fe_max_rst_at_once;
else
BUG_ON(1, "unhandled keyword");
if (too_many_args(1, args, err, NULL))
return -1;
*vptr = atoi(args[1]);
return 0;
}
/* config parser for global "tune.h2.max-frame-size" */
static int h2_parse_max_frame_size(char **args, int section_type, struct proxy *curpx,
const struct proxy *defpx, const char *file, int line,
@ -8952,13 +8898,10 @@ static struct cfg_kw_list cfg_kws = {ILH, {
{ CFG_GLOBAL, "tune.h2.be.glitches-threshold", h2_parse_glitches_threshold },
{ CFG_GLOBAL, "tune.h2.be.initial-window-size", h2_parse_initial_window_size },
{ CFG_GLOBAL, "tune.h2.be.max-concurrent-streams", h2_parse_max_concurrent_streams },
{ CFG_GLOBAL, "tune.h2.be.max-frames-at-once", h2_parse_max_frames_at_once },
{ CFG_GLOBAL, "tune.h2.be.rxbuf", h2_parse_rxbuf },
{ CFG_GLOBAL, "tune.h2.fe.glitches-threshold", h2_parse_glitches_threshold },
{ CFG_GLOBAL, "tune.h2.fe.initial-window-size", h2_parse_initial_window_size },
{ CFG_GLOBAL, "tune.h2.fe.max-concurrent-streams", h2_parse_max_concurrent_streams },
{ CFG_GLOBAL, "tune.h2.fe.max-frames-at-once", h2_parse_max_frames_at_once },
{ CFG_GLOBAL, "tune.h2.fe.max-rst-at-once", h2_parse_max_frames_at_once },
{ CFG_GLOBAL, "tune.h2.fe.max-total-streams", h2_parse_max_total_streams },
{ CFG_GLOBAL, "tune.h2.fe.rxbuf", h2_parse_rxbuf },
{ CFG_GLOBAL, "tune.h2.header-table-size", h2_parse_header_table_size },

View File

@ -1,5 +1,4 @@
#include <haproxy/mux_quic.h>
#include <haproxy/mux_quic_priv.h>
#include <import/eb64tree.h>
@ -11,7 +10,6 @@
#include <haproxy/global-t.h>
#include <haproxy/h3.h>
#include <haproxy/list.h>
#include <haproxy/mux_quic_qstrm.h>
#include <haproxy/ncbuf.h>
#include <haproxy/pool.h>
#include <haproxy/proxy.h>
@ -34,7 +32,6 @@
#include <haproxy/stconn.h>
#include <haproxy/time.h>
#include <haproxy/trace.h>
#include <haproxy/xprt_qstrm.h>
#include <haproxy/xref.h>
DECLARE_TYPED_POOL(pool_head_qcc, "qcc", struct qcc);
@ -47,7 +44,7 @@ static void qmux_ctrl_room(struct qc_stream_desc *, uint64_t room);
/* Returns true if pacing should be used for <conn> connection. */
static int qcc_is_pacing_active(const struct connection *conn)
{
return conn_is_quic(conn) && quic_tune_conn_test(QUIC_TUNE_FB_TX_PACING, conn);
return quic_tune_conn_test(QUIC_TUNE_FB_TX_PACING, conn);
}
/* Free <rxbuf> instance and its inner data storage attached to <qcs> stream. */
@ -103,9 +100,9 @@ static void qcs_free(struct qcs *qcs)
qcc->app_ops->detach(qcs);
/* Release qc_stream_desc buffer from quic-conn layer. */
if (conn_is_quic(qcc->conn) && qcs->tx.stream) {
qc_stream_desc_sub_send(qcs->tx.stream, NULL);
qc_stream_desc_release(qcs->tx.stream, qcs->tx.fc.off_real, qcc);
if (qcs->stream) {
qc_stream_desc_sub_send(qcs->stream, NULL);
qc_stream_desc_release(qcs->stream, qcs->tx.fc.off_real, qcc);
}
/* Free Rx buffer. */
@ -136,7 +133,7 @@ static struct qcs *qcs_new(struct qcc *qcc, uint64_t id, enum qcs_type type)
return NULL;
}
qcs->tx.stream = NULL;
qcs->stream = NULL;
qcs->qcc = qcc;
qcs->sess = NULL;
qcs->sd = NULL;
@ -200,25 +197,15 @@ static struct qcs *qcs_new(struct qcc *qcc, uint64_t id, enum qcs_type type)
/* Allocate transport layer stream descriptor. Only needed for TX. */
if (!quic_stream_is_uni(id) || !quic_stream_is_remote(qcc, id)) {
if (conn_is_quic(qcc->conn)) {
struct quic_conn *qc = qcc->conn->handle.qc;
qcs->tx.stream = qc_stream_desc_new(id, type, qcs, qc);
if (!qcs->tx.stream) {
TRACE_ERROR("qc_stream_desc alloc failure", QMUX_EV_QCS_NEW, qcc->conn, qcs);
goto err;
}
struct quic_conn *qc = qcc->conn->handle.qc;
qcs->stream = qc_stream_desc_new(id, type, qcs, qc);
if (!qcs->stream) {
TRACE_ERROR("qc_stream_desc alloc failure", QMUX_EV_QCS_NEW, qcc->conn, qcs);
goto err;
}
qc_stream_desc_sub_send(qcs->tx.stream, qmux_ctrl_send);
qc_stream_desc_sub_room(qcs->tx.stream, qmux_ctrl_room);
}
else {
qcs->tx.qstrm_buf = BUF_NULL;
b_alloc(&qcs->tx.qstrm_buf, DB_MUX_TX);
if (!b_size(&qcs->tx.qstrm_buf)) {
TRACE_ERROR("tx buf alloc failure", QMUX_EV_QCS_NEW, qcc->conn, qcs);
goto err;
}
}
qc_stream_desc_sub_send(qcs->stream, qmux_ctrl_send);
qc_stream_desc_sub_room(qcs->stream, qmux_ctrl_room);
}
if (qcc->app_ops->attach && qcc->app_ops->attach(qcs, qcc->ctx)) {
@ -411,7 +398,7 @@ static void qcc_refresh_timeout(struct qcc *qcc)
/* Mark a stream as open if it was idle. This can be used on every
* successful emission/reception operation to update the stream state.
*/
void qcs_idle_open(struct qcs *qcs)
static void qcs_idle_open(struct qcs *qcs)
{
/* This operation must not be used if the stream is already closed. */
BUG_ON_HOT(qcs->st == QC_SS_CLO);
@ -423,7 +410,7 @@ void qcs_idle_open(struct qcs *qcs)
}
/* Close the local channel of <qcs> instance. */
void qcs_close_local(struct qcs *qcs)
static void qcs_close_local(struct qcs *qcs)
{
TRACE_STATE("closing stream locally", QMUX_EV_QCS_SEND, qcs->qcc->conn, qcs);
@ -447,7 +434,7 @@ void qcs_close_local(struct qcs *qcs)
}
/* Returns true if <qcs> can be purged. */
int qcs_is_completed(struct qcs *qcs)
static int qcs_is_completed(struct qcs *qcs)
{
/* A stream is completed if fully closed and stconn released, or simply
* detached and everything already sent.
@ -583,36 +570,19 @@ void qcs_notify_send(struct qcs *qcs)
}
}
const struct buffer *qcs_tx_buf_const(const struct qcs *qcs)
{
return conn_is_quic(qcs->qcc->conn) ?
qc_stream_buf_get(qcs->tx.stream) : &qcs->tx.qstrm_buf;
}
struct buffer *qcs_tx_buf(struct qcs *qcs)
{
return conn_is_quic(qcs->qcc->conn) ?
qc_stream_buf_get(qcs->tx.stream) : &qcs->tx.qstrm_buf;
}
/* Returns total number of bytes not already sent to quic-conn layer. */
uint64_t qcs_prep_bytes(const struct qcs *qcs)
static uint64_t qcs_prep_bytes(const struct qcs *qcs)
{
const struct buffer *out = qcs_tx_buf_const(qcs);
struct buffer *out = qc_stream_buf_get(qcs->stream);
uint64_t diff, base_off;
if (!out)
return 0;
if (conn_is_quic(qcs->qcc->conn)) {
/* if ack_offset < buf_offset, it points to an older buffer. */
base_off = MAX(qcs->tx.stream->buf_offset, qcs->tx.stream->ack_offset);
diff = qcs->tx.fc.off_real - base_off;
return b_data(out) - diff;
}
else {
return b_data(out);
}
/* if ack_offset < buf_offset, it points to an older buffer. */
base_off = MAX(qcs->stream->buf_offset, qcs->stream->ack_offset);
diff = qcs->tx.fc.off_real - base_off;
return b_data(out) - diff;
}
/* Used as a callback for qc_stream_desc layer to notify about emission of a
@ -666,8 +636,8 @@ static void qmux_ctrl_send(struct qc_stream_desc *stream, uint64_t data, uint64_
}
/* Release buffer if everything sent and buf is full or stream is waiting for room. */
if (!qcs_prep_bytes(qcs) &&
(b_full(&qcs->tx.stream->buf->buf) || qcs->flags & QC_SF_BLK_MROOM)) {
qc_stream_buf_release(qcs->tx.stream);
(b_full(&qcs->stream->buf->buf) || qcs->flags & QC_SF_BLK_MROOM)) {
qc_stream_buf_release(qcs->stream);
qcs->flags &= ~QC_SF_BLK_MROOM;
qcs_notify_send(qcs);
}
@ -678,7 +648,7 @@ static void qmux_ctrl_send(struct qc_stream_desc *stream, uint64_t data, uint64_
increment_send_rate(diff, 0);
}
if (!qc_stream_buf_get(qcs->tx.stream) || !qcs_prep_bytes(qcs)) {
if (!qc_stream_buf_get(qcs->stream) || !qcs_prep_bytes(qcs)) {
/* Remove stream from send_list if all was sent. */
LIST_DEL_INIT(&qcs->el_send);
TRACE_STATE("stream sent done", QMUX_EV_QCS_SEND, qcc->conn, qcs);
@ -688,13 +658,13 @@ static void qmux_ctrl_send(struct qc_stream_desc *stream, uint64_t data, uint64_
qcs_close_local(qcs);
if (qcs->flags & QC_SF_FIN_STREAM) {
qcs->tx.stream->flags |= QC_SD_FL_WAIT_FOR_FIN;
qcs->stream->flags |= QC_SD_FL_WAIT_FOR_FIN;
/* Reset flag to not emit multiple FIN STREAM frames. */
qcs->flags &= ~QC_SF_FIN_STREAM;
}
/* Unsubscribe from streamdesc when everything sent. */
qc_stream_desc_sub_send(qcs->tx.stream, NULL);
qc_stream_desc_sub_send(qcs->stream, NULL);
if (qcs_is_completed(qcs)) {
TRACE_STATE("add stream in purg_list", QMUX_EV_QCS_SEND, qcc->conn, qcs);
@ -940,16 +910,13 @@ static struct qcs *qcc_init_stream_remote(struct qcc *qcc, uint64_t id)
*/
void qcs_send_metadata(struct qcs *qcs)
{
if (conn_is_quic(qcs->qcc->conn)) {
/* Reserved for stream with Tx capability. */
BUG_ON(!qcs->tx.stream);
/* Cannot use if some data already transferred for this stream. */
BUG_ON(qcs->tx.stream->ack_offset || !eb_is_empty(&qcs->tx.stream->buf_tree));
qc_stream_desc_sub_room(qcs->tx.stream, NULL);
}
/* Reserved for stream with Tx capability. */
BUG_ON(!qcs->stream);
/* Cannot use if some data already transferred for this stream. */
BUG_ON(qcs->stream->ack_offset || !eb_is_empty(&qcs->stream->buf_tree));
qcs->flags |= QC_SF_TXBUB_OOB;
qc_stream_desc_sub_room(qcs->stream, NULL);
}
/* Instantiate a streamdesc instance for <qcs> stream. This is necessary to
@ -1471,7 +1438,7 @@ struct buffer *qcc_get_stream_rxbuf(struct qcs *qcs)
struct buffer *qcc_get_stream_txbuf(struct qcs *qcs, int *err, int small)
{
struct qcc *qcc = qcs->qcc;
struct buffer *out = qcs_tx_buf(qcs);
struct buffer *out = qc_stream_buf_get(qcs->stream);
/* Stream must not try to reallocate a buffer if currently waiting for one. */
BUG_ON(LIST_INLIST(&qcs->el_buf));
@ -1495,7 +1462,7 @@ struct buffer *qcc_get_stream_txbuf(struct qcs *qcs, int *err, int small)
}
}
out = qc_stream_buf_alloc(qcs->tx.stream, qcs->tx.fc.off_real, small);
out = qc_stream_buf_alloc(qcs->stream, qcs->tx.fc.off_real, small);
if (!out) {
TRACE_ERROR("stream desc alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
*err = 1;
@ -1520,7 +1487,7 @@ struct buffer *qcc_get_stream_txbuf(struct qcs *qcs, int *err, int small)
struct buffer *qcc_realloc_stream_txbuf(struct qcs *qcs)
{
struct qcc *qcc = qcs->qcc;
struct buffer *out = qc_stream_buf_get(qcs->tx.stream);
struct buffer *out = qc_stream_buf_get(qcs->stream);
/* Stream must not try to reallocate a buffer if currently waiting for one. */
BUG_ON(LIST_INLIST(&qcs->el_buf));
@ -1533,7 +1500,7 @@ struct buffer *qcc_realloc_stream_txbuf(struct qcs *qcs)
qcc->tx.buf_in_flight -= b_size(out);
}
out = qc_stream_buf_realloc(qcs->tx.stream);
out = qc_stream_buf_realloc(qcs->stream);
if (!out) {
TRACE_ERROR("buffer alloc failure", QMUX_EV_QCS_SEND, qcc->conn, qcs);
goto out;
@ -1580,7 +1547,7 @@ int qcc_release_stream_txbuf(struct qcs *qcs)
return 1;
}
qc_stream_buf_release(qcs->tx.stream);
qc_stream_buf_release(qcs->stream);
return 0;
}
@ -1611,7 +1578,7 @@ static void qcc_clear_frms(struct qcc *qcc)
TRACE_STATE("resetting STREAM frames list", QMUX_EV_QCC_SEND, qcc->conn);
while (!LIST_ISEMPTY(&qcc->tx.frms)) {
struct quic_frame *frm = LIST_ELEM(qcc->tx.frms.n, struct quic_frame *, list);
qc_frm_free(conn_is_quic(qcc->conn) ? qcc->conn->handle.qc : NULL, &frm);
qc_frm_free(qcc->conn->handle.qc, &frm);
}
}
@ -1699,8 +1666,7 @@ void qcc_send_stream(struct qcs *qcs, int urg, int count)
if (count) {
qfctl_sinc(&qcc->tx.fc, count);
qfctl_sinc(&qcs->tx.fc, count);
if (conn_is_quic(qcc->conn))
bdata_ctr_add(&qcs->tx.stream->data, count);
bdata_ctr_add(&qcs->stream->data, count);
}
TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
@ -2502,8 +2468,7 @@ static int qcs_build_stream_frm(struct qcs *qcs, struct buffer *out, char fin,
goto err;
}
frm->stream.stream =
conn_is_quic(qcc->conn) ? (void *)qcs->tx.stream : (void *)qcs;
frm->stream.stream = qcs->stream;
frm->stream.id = qcs->id;
frm->stream.offset = 0;
frm->stream.dup = 0;
@ -2569,13 +2534,24 @@ static int qcc_subscribe_send(struct qcc *qcc)
return 1;
}
static int qcc_quic_send_frames(struct qcc *qcc, struct list *frms, int stream)
/* Wrapper for send on transport layer. Send a list of frames <frms> for the
* connection <qcc>.
*
* Returns 0 if all data sent with success. On fatal error, a negative error
* code is returned. A positive 1 is used if emission should be paced.
*/
static int qcc_send_frames(struct qcc *qcc, struct list *frms, int stream)
{
enum quic_tx_err ret;
struct quic_pacer *pacer = NULL;
TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
if (LIST_ISEMPTY(frms)) {
TRACE_DEVEL("leaving on no frame to send", QMUX_EV_QCC_SEND, qcc->conn);
return -1;
}
if (stream && qcc_is_pacing_active(qcc->conn))
pacer = &qcc->tx.pacer;
@ -2603,23 +2579,6 @@ static int qcc_quic_send_frames(struct qcc *qcc, struct list *frms, int stream)
return -1;
}
/* Wrapper for send on transport layer. Send a list of frames <frms> for the
* connection <qcc>.
*
* Returns 0 if all data sent with success. On fatal error, a negative error
* code is returned. A positive 1 is used if emission should be paced.
*/
static int qcc_send_frames(struct qcc *qcc, struct list *frms, int stream)
{
if (LIST_ISEMPTY(frms)) {
TRACE_DEVEL("leaving on no frame to send", QMUX_EV_QCC_SEND, qcc->conn);
return -1;
}
return conn_is_quic(qcc->conn) ? qcc_quic_send_frames(qcc, frms, stream) :
qcc_qstrm_send_frames(qcc, frms);
}
/* Emit a RESET_STREAM on <qcs>.
*
* Returns 0 if the frame has been successfully sent else non-zero.
@ -2718,7 +2677,7 @@ static int qcs_send_stop_sending(struct qcs *qcs)
static int qcs_send(struct qcs *qcs, struct list *frms, uint64_t window_conn)
{
struct qcc *qcc = qcs->qcc;
struct buffer *out = qcs_tx_buf(qcs);
struct buffer *out = qc_stream_buf_get(qcs->stream);
int flen = 0;
const char fin = qcs->flags & QC_SF_FIN_STREAM;
@ -2820,7 +2779,7 @@ static int qcc_emit_rs_ss(struct qcc *qcc)
list_for_each_entry_safe(qcs, qcs_tmp, &qcc->send_list, el_send) {
/* Stream must not be present in send_list if it has nothing to send. */
BUG_ON(!(qcs->flags & (QC_SF_FIN_STREAM|QC_SF_TO_STOP_SENDING|QC_SF_TO_RESET)) &&
((conn_is_quic(qcc->conn) && !qcs->tx.stream) || !qcs_prep_bytes(qcs)));
(!qcs->stream || !qcs_prep_bytes(qcs)));
/* Interrupt looping for the first stream where no RS nor SS is
* necessary and is not use for "metadata" transfer. These
@ -2846,7 +2805,7 @@ static int qcc_emit_rs_ss(struct qcc *qcc)
/* Remove stream from send_list if only SS was necessary. */
if (!(qcs->flags & (QC_SF_FIN_STREAM|QC_SF_TO_RESET)) &&
((conn_is_quic(qcc->conn) && !qcs->tx.stream) || !qcs_prep_bytes(qcs))) {
(!qcs->stream || !qcs_prep_bytes(qcs))) {
LIST_DEL_INIT(&qcs->el_send);
continue;
}
@ -2915,7 +2874,7 @@ static int qcc_build_frms(struct qcc *qcc, struct list *qcs_failed)
/* Streams with RS/SS must be handled via qcc_emit_rs_ss(). */
BUG_ON(qcs->flags & (QC_SF_TO_STOP_SENDING|QC_SF_TO_RESET));
/* Stream must not be present in send_list if it has nothing to send. */
BUG_ON(!(qcs->flags & QC_SF_FIN_STREAM) && ((conn_is_quic(qcc->conn) && !qcs->tx.stream) || !qcs_prep_bytes(qcs)));
BUG_ON(!(qcs->flags & QC_SF_FIN_STREAM) && (!qcs->stream || !qcs_prep_bytes(qcs)));
/* Total sent bytes must not exceed connection window. */
BUG_ON(total > window_conn);
@ -3080,12 +3039,6 @@ static int qcc_io_send(struct qcc *qcc)
* flow-control limit reached.
*/
while ((ret = qcc_send_frames(qcc, frms, 1)) == 0 && !qfctl_rblocked(&qcc->tx.fc)) {
/* TODO should this check also be performed for QUIC ? */
if (!conn_is_quic(qcc->conn) && (qcc->conn->flags & CO_FL_ERROR)) {
TRACE_DEVEL("connection on error", QMUX_EV_QCC_SEND, qcc->conn);
goto out;
}
window_conn = qfctl_rcap(&qcc->tx.fc);
resent = 0;
@ -3097,8 +3050,7 @@ static int qcc_io_send(struct qcc *qcc)
* new qc_stream_desc should be present in send_list as
* long as transport layer can handle all data.
*/
BUG_ON((!conn_is_quic(qcc->conn) || qcs->tx.stream->buf) &&
!qfctl_rblocked(&qcs->tx.fc));
BUG_ON(qcs->stream->buf && !qfctl_rblocked(&qcs->tx.fc));
/* Total sent bytes must not exceed connection window. */
BUG_ON(resent > window_conn);
@ -3198,11 +3150,6 @@ static int qcc_io_recv(struct qcc *qcc)
if ((qcc->flags & QC_CF_WAIT_HS) && !(qcc->wait_event.events & SUB_RETRY_RECV))
qcc_wait_for_hs(qcc);
if (!conn_is_quic(qcc->conn)) {
if (!(qcc->wait_event.events & SUB_RETRY_RECV))
qcc_qstrm_recv(qcc);
}
while (!LIST_ISEMPTY(&qcc->recv_list)) {
qcs = LIST_ELEM(qcc->recv_list.n, struct qcs *, el_recv);
/* No need to add an uni local stream in recv_list. */
@ -3298,17 +3245,15 @@ static void qcc_shutdown(struct qcc *qcc)
qcc->err = quic_err_transport(QC_ERR_NO_ERROR);
}
if (conn_is_quic(qcc->conn)) {
/* Register "no error" code at transport layer. Do not use
* quic_set_connection_close() as retransmission may be performed to
* finalized transfers. Do not overwrite quic-conn existing code if
* already set.
*
* TODO implement a wrapper function for this in quic-conn module
*/
if (!(qcc->conn->handle.qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE))
qcc->conn->handle.qc->err = qcc->err;
}
/* Register "no error" code at transport layer. Do not use
* quic_set_connection_close() as retransmission may be performed to
* finalized transfers. Do not overwrite quic-conn existing code if
* already set.
*
* TODO implement a wrapper function for this in quic-conn module
*/
if (!(qcc->conn->handle.qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE))
qcc->conn->handle.qc->err = qcc->err;
/* A connection is not reusable if app layer is closed. */
if (qcc->flags & QC_CF_IS_BACK)
@ -3366,7 +3311,7 @@ static int qcc_io_process(struct qcc *qcc)
/* If using listener socket, soft-stop is not supported. The
* connection must be closed immediately.
*/
if (conn_is_quic(qcc->conn) && !qc_test_fd(qcc->conn->handle.qc)) {
if (!qc_test_fd(qcc->conn->handle.qc)) {
TRACE_DEVEL("proxy disabled with listener socket, closing connection", QMUX_EV_QCC_WAKE, qcc->conn);
qcc->conn->flags |= (CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH);
qcc_io_send(qcc);
@ -3430,7 +3375,7 @@ static void qcc_release(struct qcc *qcc)
qcs_free(qcs);
}
if (conn && conn_is_quic(conn)) {
if (conn) {
qc = conn->handle.qc;
/* unsubscribe from all remaining qc_stream_desc */
@ -3463,7 +3408,7 @@ static void qcc_release(struct qcc *qcc)
if (qcc->app_ops) {
if (qcc->app_ops->release)
qcc->app_ops->release(qcc->ctx);
if (conn_is_quic(conn) && conn->handle.qc)
if (conn->handle.qc)
conn->handle.qc->strm_reject = qcc->app_ops->strm_reject;
}
TRACE_PROTO("application layer released", QMUX_EV_QCC_END, conn);
@ -3702,7 +3647,7 @@ static int qmux_init(struct connection *conn, struct proxy *prx,
struct session *sess, struct buffer *input)
{
struct qcc *qcc;
const struct quic_transport_params *lparams, *rparams;
struct quic_transport_params *lparams, *rparams;
void *conn_ctx = conn->ctx;
TRACE_ENTER(QMUX_EV_QCC_NEW);
@ -3721,49 +3666,26 @@ static int qmux_init(struct connection *conn, struct proxy *prx,
qcc->glitches = 0;
qcc->err = quic_err_transport(QC_ERR_NO_ERROR);
if (conn_is_quic(conn)) {
/* Server parameters, params used for RX flow control. */
lparams = &conn->handle.qc->rx.params;
/* Server parameters, params used for RX flow control. */
lparams = &conn->handle.qc->rx.params;
qcc->lfctl.ms_bidi = qcc->lfctl.ms_bidi_init = lparams->initial_max_streams_bidi;
qcc->lfctl.ms_uni = lparams->initial_max_streams_uni;
qcc->lfctl.msd_bidi_l = lparams->initial_max_stream_data_bidi_local;
qcc->lfctl.msd_bidi_r = lparams->initial_max_stream_data_bidi_remote;
qcc->lfctl.msd_uni_r = lparams->initial_max_stream_data_uni;
qcc->lfctl.cl_bidi_r = 0;
qcc->lfctl.ms_bidi = qcc->lfctl.ms_bidi_init = lparams->initial_max_streams_bidi;
qcc->lfctl.ms_uni = lparams->initial_max_streams_uni;
qcc->lfctl.msd_bidi_l = lparams->initial_max_stream_data_bidi_local;
qcc->lfctl.msd_bidi_r = lparams->initial_max_stream_data_bidi_remote;
qcc->lfctl.msd_uni_r = lparams->initial_max_stream_data_uni;
qcc->lfctl.cl_bidi_r = 0;
qcc->lfctl.md = qcc->lfctl.md_init = lparams->initial_max_data;
qcc->lfctl.offsets_recv = qcc->lfctl.offsets_consume = 0;
qcc->lfctl.md = qcc->lfctl.md_init = lparams->initial_max_data;
qcc->lfctl.offsets_recv = qcc->lfctl.offsets_consume = 0;
rparams = &conn->handle.qc->tx.params;
qfctl_init(&qcc->tx.fc, rparams->initial_max_data);
qcc->rfctl.ms_uni = rparams->initial_max_streams_uni;
qcc->rfctl.ms_bidi = rparams->initial_max_streams_bidi;
qcc->rfctl.msd_bidi_l = rparams->initial_max_stream_data_bidi_local;
qcc->rfctl.msd_bidi_r = rparams->initial_max_stream_data_bidi_remote;
qcc->rfctl.msd_uni_l = rparams->initial_max_stream_data_uni;
}
else {
rparams = xprt_qstrm_rparams(conn->xprt_ctx);
qfctl_init(&qcc->tx.fc, rparams->initial_max_data);
qcc->rfctl.ms_uni = rparams->initial_max_streams_uni;
qcc->rfctl.ms_bidi = rparams->initial_max_streams_bidi;
qcc->rfctl.msd_bidi_l = rparams->initial_max_stream_data_bidi_local;
qcc->rfctl.msd_bidi_r = rparams->initial_max_stream_data_bidi_remote;
qcc->rfctl.msd_uni_l = rparams->initial_max_stream_data_uni;
/* TODO */
qcc->lfctl.ms_bidi = qcc->lfctl.ms_bidi_init = 16384;
qcc->lfctl.ms_uni = 3;
qcc->lfctl.msd_bidi_l = 16384;
qcc->lfctl.msd_bidi_r = 16384;
qcc->lfctl.msd_uni_r = 16384;
qcc->lfctl.cl_bidi_r = 0;
qcc->lfctl.md = qcc->lfctl.md_init = 16384;
qcc->lfctl.offsets_recv = qcc->lfctl.offsets_consume = 0;
}
rparams = &conn->handle.qc->tx.params;
qfctl_init(&qcc->tx.fc, rparams->initial_max_data);
qcc->rfctl.ms_uni = rparams->initial_max_streams_uni;
qcc->rfctl.ms_bidi = rparams->initial_max_streams_bidi;
qcc->rfctl.msd_bidi_l = rparams->initial_max_stream_data_bidi_local;
qcc->rfctl.msd_bidi_r = rparams->initial_max_stream_data_bidi_remote;
qcc->rfctl.msd_uni_l = rparams->initial_max_stream_data_uni;
qcc->tx.buf_in_flight = 0;
@ -3783,22 +3705,6 @@ static int qmux_init(struct connection *conn, struct proxy *prx,
qcc->pacing_task->state |= TASK_F_WANTS_TIME;
}
if (!conn_is_quic(conn)) {
qcc->tx.qstrm_buf = BUF_NULL;
b_alloc(&qcc->tx.qstrm_buf, DB_MUX_TX);
if (!b_size(&qcc->tx.qstrm_buf)) {
TRACE_ERROR("tx qstrm buf alloc failure", QMUX_EV_QCC_NEW);
goto err;
}
qcc->rx.qstrm_buf = BUF_NULL;
b_alloc(&qcc->rx.qstrm_buf, DB_MUX_RX);
if (!b_size(&qcc->rx.qstrm_buf)) {
TRACE_ERROR("rx qstrm buf alloc failure", QMUX_EV_QCC_NEW);
goto err;
}
}
if (conn_is_back(conn)) {
qcc->next_bidi_l = 0x00;
qcc->largest_bidi_r = 0x01;
@ -3857,8 +3763,7 @@ static int qmux_init(struct connection *conn, struct proxy *prx,
qcc_reset_idle_start(qcc);
LIST_INIT(&qcc->opening_list);
if (conn_is_quic(conn))
HA_ATOMIC_STORE(&conn->handle.qc->qcc, qcc);
HA_ATOMIC_STORE(&conn->handle.qc->qcc, qcc);
/* Register conn as app_ops may use it. */
qcc->conn = conn;
@ -3878,20 +3783,18 @@ static int qmux_init(struct connection *conn, struct proxy *prx,
/* init read cycle */
tasklet_wakeup(qcc->wait_event.tasklet);
if (conn_is_quic(conn)) {
/* MUX is initialized before QUIC handshake completion if early data
* received. Flag connection to delay stream processing if
* wait-for-handshake is active.
*/
if (conn->handle.qc->state < QUIC_HS_ST_COMPLETE) {
if (!(conn->flags & CO_FL_EARLY_SSL_HS)) {
TRACE_STATE("flag connection with early data", QMUX_EV_QCC_WAKE, conn);
conn->flags |= CO_FL_EARLY_SSL_HS;
/* subscribe for handshake completion */
conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV,
&qcc->wait_event);
qcc->flags |= QC_CF_WAIT_HS;
}
/* MUX is initialized before QUIC handshake completion if early data
* received. Flag connection to delay stream processing if
* wait-for-handshake is active.
*/
if (conn->handle.qc->state < QUIC_HS_ST_COMPLETE) {
if (!(conn->flags & CO_FL_EARLY_SSL_HS)) {
TRACE_STATE("flag connection with early data", QMUX_EV_QCC_WAKE, conn);
conn->flags |= CO_FL_EARLY_SSL_HS;
/* subscribe for handshake completion */
conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV,
&qcc->wait_event);
qcc->flags |= QC_CF_WAIT_HS;
}
}
}
@ -3935,13 +3838,11 @@ static int qmux_init(struct connection *conn, struct proxy *prx,
return 0;
err:
if (conn_is_quic(conn)) {
/* Prepare CONNECTION_CLOSE, using INTERNAL_ERROR as fallback code if unset. */
if (!(conn->handle.qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE)) {
struct quic_err err = qcc && qcc->err.code ?
qcc->err : quic_err_transport(QC_ERR_INTERNAL_ERROR);
quic_set_connection_close(conn->handle.qc, err);
}
/* Prepare CONNECTION_CLOSE, using INTERNAL_ERROR as fallback code if unset. */
if (!(conn->handle.qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE)) {
struct quic_err err = qcc && qcc->err.code ?
qcc->err : quic_err_transport(QC_ERR_INTERNAL_ERROR);
quic_set_connection_close(conn->handle.qc, err);
}
if (qcc) {
@ -4655,8 +4556,8 @@ void qcc_show_quic(struct qcc *qcc)
}
if (!quic_stream_is_uni(qcs->id) || !quic_stream_is_remote(qcc, qcs->id)) {
if (qcs->tx.stream)
bdata_ctr_print(&trash, &qcs->tx.stream->data, "txb=");
if (qcs->stream)
bdata_ctr_print(&trash, &qcs->stream->data, "txb=");
chunk_appendf(&trash, " txoff=%llu(%llu) msd=%llu",
(ullong)qcs->tx.fc.off_real,
(ullong)qcs->tx.fc.off_soft - (ullong)qcs->tx.fc.off_real,
@ -4671,32 +4572,3 @@ static struct mux_proto_list mux_proto_quic =
{ .token = IST("quic"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &qmux_ops };
INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_quic);
static const struct mux_ops qstrm_ops = {
.init = qmux_init,
.destroy = qmux_destroy,
.detach = qmux_strm_detach,
.rcv_buf = qmux_strm_rcv_buf,
.snd_buf = qmux_strm_snd_buf,
.nego_fastfwd = qmux_strm_nego_ff,
.done_fastfwd = qmux_strm_done_ff,
.resume_fastfwd = qmux_strm_resume_ff,
.subscribe = qmux_strm_subscribe,
.unsubscribe = qmux_strm_unsubscribe,
.wake = qmux_wake,
.avail_streams = qmux_avail_streams,
.used_streams = qmux_used_streams,
.takeover = NULL, /* QUIC takeover support not implemented yet */
.attach = qmux_strm_attach,
.shut = qmux_strm_shut,
.ctl = qmux_ctl,
.sctl = qmux_sctl,
.show_sd = qmux_strm_show_sd,
.flags = MX_FL_HTX|MX_FL_NO_UPG|MX_FL_EXPERIMENTAL,
.name = "QMUX",
};
static struct mux_proto_list mux_proto_qstrm =
{ .token = IST("qmux"), .mode = PROTO_MODE_HTTP, .side = PROTO_SIDE_BOTH, .mux = &qstrm_ops };
INITCALL1(STG_REGISTER, register_mux_proto, &mux_proto_qstrm);

View File

@ -1,317 +0,0 @@
#include <haproxy/mux_quic_qstrm.h>
#include <haproxy/api.h>
#include <haproxy/buf.h>
#include <haproxy/chunk.h>
#include <haproxy/connection.h>
#include <haproxy/mux_quic.h>
#include <haproxy/mux_quic_priv.h>
#include <haproxy/proxy.h>
#include <haproxy/qmux_trace.h>
#include <haproxy/quic_fctl.h>
#include <haproxy/quic_frame.h>
#include <haproxy/trace.h>
/* Returns true if <frm> type can be used for QMux protocol. */
static int qstrm_is_frm_valid(const struct quic_frame *frm)
{
return frm->type == QUIC_FT_PADDING ||
frm->type == QUIC_FT_RESET_STREAM ||
frm->type == QUIC_FT_STOP_SENDING ||
(frm->type >= QUIC_FT_STREAM_8 && frm->type <= QUIC_FT_STREAM_F) ||
frm->type == QUIC_FT_MAX_DATA ||
frm->type == QUIC_FT_MAX_STREAM_DATA ||
frm->type == QUIC_FT_MAX_STREAMS_BIDI ||
frm->type == QUIC_FT_MAX_STREAMS_UNI ||
frm->type == QUIC_FT_DATA_BLOCKED ||
frm->type == QUIC_FT_STREAM_DATA_BLOCKED ||
frm->type == QUIC_FT_STREAMS_BLOCKED_BIDI ||
frm->type == QUIC_FT_STREAMS_BLOCKED_UNI ||
frm->type == QUIC_FT_CONNECTION_CLOSE ||
frm->type == QUIC_FT_CONNECTION_CLOSE_APP;
}
/* Parse the next frame in <buf> and handle it by the MUX layer.
*
* Returns the frame length on success. If frame is truncated, 0 is returned.
* A negative error code is used for fatal failures.
*/
static int qstrm_parse_frm(struct qcc *qcc, struct buffer *buf)
{
struct quic_frame frm;
const unsigned char *pos, *old, *end;
int ret;
old = pos = (unsigned char *)b_head(buf);
end = (unsigned char *)b_head(buf) + b_data(buf);
ret = qc_parse_frm_type(&frm, &pos, end, NULL);
if (!ret)
return 0;
if (!qstrm_is_frm_valid(&frm)) {
/* TODO close connection with FRAME_ENCODING_ERROR */
b_reset(buf);
return -1;
}
ret = qc_parse_frm_payload(&frm, &pos, end, NULL);
if (!ret)
return 0;
if (frm.type >= QUIC_FT_STREAM_8 &&
frm.type <= QUIC_FT_STREAM_F) {
struct qf_stream *strm_frm = &frm.stream;
qcc_recv(qcc, strm_frm->id, strm_frm->len, strm_frm->offset,
(frm.type & QUIC_STREAM_FRAME_TYPE_FIN_BIT), (char *)strm_frm->data);
}
else if (frm.type == QUIC_FT_RESET_STREAM) {
struct qf_reset_stream *rst_frm = &frm.reset_stream;
qcc_recv_reset_stream(qcc, rst_frm->id, rst_frm->app_error_code, rst_frm->final_size);
}
else if (frm.type == QUIC_FT_MAX_DATA) {
struct qf_max_data *md_frm = &frm.max_data;
qcc_recv_max_data(qcc, md_frm->max_data);
}
else if (frm.type == QUIC_FT_MAX_STREAM_DATA) {
struct qf_max_stream_data *msd_frm = &frm.max_stream_data;
qcc_recv_max_stream_data(qcc, msd_frm->id, msd_frm->max_stream_data);
}
else {
ABORT_NOW();
}
return pos - old;
}
/* Perform data reception for <qcc> connection. Content is parsed as QMux
* frames. These operations are performed in loop until read returns no data.
*
* Returns the total amount of read data or -1 on error.
*/
int qcc_qstrm_recv(struct qcc *qcc)
{
struct connection *conn = qcc->conn;
struct buffer *buf = &qcc->rx.qstrm_buf;
int total = 0, frm_ret;
size_t ret;
TRACE_ENTER(QMUX_EV_QCC_RECV, qcc->conn);
do {
recv:
/* Wrapping is not supported for QMux reception. */
BUG_ON(b_data(buf) != b_contig_data(buf, 0));
/* Checks if there is no more room before wrapping position. */
if (b_head(buf) + b_contig_data(buf, 0) == b_wrap(buf)) {
if (!b_room(buf)) {
/* TODO frame bigger than buffer, connection must be closed */
ABORT_NOW();
}
/* Realign data in the buffer to have more room. */
memmove(b_orig(buf), b_head(buf), b_data(buf));
buf->head = 0;
}
else {
/* Ensure maximum room is always available. */
b_realign_if_empty(buf);
}
ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, b_contig_space(buf), NULL, 0, 0);
BUG_ON(conn->flags & CO_FL_ERROR);
total += ret;
while (b_data(buf)) {
frm_ret = qstrm_parse_frm(qcc, buf);
BUG_ON(frm_ret < 0); /* TODO handle fatal errors */
if (!frm_ret) {
/* Checks if wrapping position is reached, requires realign. */
if (b_head(buf) + b_contig_data(buf, 0) == b_wrap(buf))
goto recv;
/* Truncated frame read but room still left, subscribe to retry later. */
break;
}
b_del(buf, frm_ret);
}
} while (ret > 0);
if (!conn_xprt_read0_pending(qcc->conn)) {
conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV,
&qcc->wait_event);
}
TRACE_LEAVE(QMUX_EV_QCC_RECV, qcc->conn);
return total;
err:
return -1;
}
/* Updates a <qcs> stream after a successful emission of data of length <data>. */
static void qstrm_ctrl_send(struct qcs *qcs, uint64_t data)
{
struct qcc *qcc = qcs->qcc;
struct quic_fctl *fc_conn = &qcc->tx.fc;
struct quic_fctl *fc_strm = &qcs->tx.fc;
TRACE_ENTER(QMUX_EV_QCS_SEND, qcc->conn, qcs);
qcs_idle_open(qcs);
/* Ensure real offset never exceeds soft value. */
BUG_ON(fc_conn->off_real + data > fc_conn->off_soft);
BUG_ON(fc_strm->off_real + data > fc_strm->off_soft);
/* increase offset on connection */
if (qfctl_rinc(fc_conn, data)) {
TRACE_STATE("connection flow-control reached",
QMUX_EV_QCS_SEND, qcc->conn);
}
/* increase offset on stream */
if (qfctl_rinc(fc_strm, data)) {
TRACE_STATE("stream flow-control reached",
QMUX_EV_QCS_SEND, qcc->conn, qcs);
}
b_del(&qcs->tx.qstrm_buf, data);
/* Release buffer if everything sent and stream is waiting for room. */
if (!qcs_prep_bytes(qcs) && (qcs->flags & QC_SF_BLK_MROOM)) {
qcs->flags &= ~QC_SF_BLK_MROOM;
qcs_notify_send(qcs);
}
/* Add measurement for send rate. This is done at the MUX layer
* to account only for STREAM frames without retransmission.
*/
increment_send_rate(data, 0);
if (!qcs_prep_bytes(qcs)) {
/* Remove stream from send_list if all was sent. */
LIST_DEL_INIT(&qcs->el_send);
TRACE_STATE("stream sent done", QMUX_EV_QCS_SEND, qcc->conn, qcs);
if (qcs->flags & (QC_SF_FIN_STREAM|QC_SF_DETACH)) {
/* Close stream locally. */
qcs_close_local(qcs);
if (qcs->flags & QC_SF_FIN_STREAM) {
/* Reset flag to not emit multiple FIN STREAM frames. */
qcs->flags &= ~QC_SF_FIN_STREAM;
}
if (qcs_is_completed(qcs)) {
TRACE_STATE("add stream in purg_list", QMUX_EV_QCS_SEND, qcc->conn, qcs);
LIST_APPEND(&qcc->purg_list, &qcs->el_send);
}
}
}
TRACE_LEAVE(QMUX_EV_QCS_SEND, qcc->conn, qcs);
}
/* Sends <frms> list of frames for <qcc> connection.
*
* Returns 0 if all data are emitted or a positive value if sending should be
* retry later. A negative error code is used for a fatal failure.
*/
int qcc_qstrm_send_frames(struct qcc *qcc, struct list *frms)
{
struct connection *conn = qcc->conn;
struct quic_frame *frm, *frm_old;
struct quic_frame *split_frm, *next_frm;
struct buffer *buf = &qcc->tx.qstrm_buf;
unsigned char *pos, *old, *end;
size_t ret;
TRACE_ENTER(QMUX_EV_QCC_SEND, qcc->conn);
if (b_data(buf)) {
ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), NULL, 0, 0);
if (!ret) {
TRACE_DEVEL("snd_buf interrupted", QMUX_EV_QCC_SEND, qcc->conn);
goto out;
}
if (ret != b_data(buf)) {
/* TODO */
ABORT_NOW();
}
}
b_reset(buf);
list_for_each_entry_safe(frm, frm_old, frms, list) {
loop:
split_frm = next_frm = NULL;
b_reset(buf);
old = pos = (unsigned char *)b_orig(buf);
end = (unsigned char *)b_wrap(buf);
BUG_ON(!frm);
TRACE_PRINTF(TRACE_LEVEL_DEVELOPER, QMUX_EV_QCC_SEND, qcc->conn, 0, 0, 0,
"frm type %02llx", (ullong)frm->type);
if (frm->type >= QUIC_FT_STREAM_8 && frm->type <= QUIC_FT_STREAM_F) {
size_t flen, split_size;
flen = quic_strm_frm_fillbuf(end - pos, frm, &split_size);
if (!flen)
continue;
if (split_size) {
split_frm = quic_strm_frm_split(frm, split_size);
if (!split_frm) {
ABORT_NOW();
continue;
}
next_frm = frm;
frm = split_frm;
}
}
qc_build_frm(frm, &pos, end, NULL);
BUG_ON(pos - old > global.tune.bufsize);
BUG_ON(pos == old);
b_add(buf, pos - old);
ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, buf, b_data(buf), NULL, 0, 0);
if (!ret) {
TRACE_DEVEL("snd_buf interrupted", QMUX_EV_QCC_SEND, qcc->conn);
if (split_frm)
LIST_INSERT(frms, &split_frm->list);
break;
}
if (ret != b_data(buf)) {
/* TODO */
ABORT_NOW();
}
if (frm->type >= QUIC_FT_STREAM_8 && frm->type <= QUIC_FT_STREAM_F)
qstrm_ctrl_send(frm->stream.stream, frm->stream.len);
LIST_DEL_INIT(&frm->list);
if (split_frm) {
frm = next_frm;
goto loop;
}
}
out:
if (conn->flags & CO_FL_ERROR) {
/* TODO */
//ABORT_NOW();
}
else if (!LIST_ISEMPTY(frms) && !(qcc->wait_event.events & SUB_RETRY_SEND)) {
conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &qcc->wait_event);
return 1;
}
TRACE_LEAVE(QMUX_EV_QCC_SEND, qcc->conn);
return 0;
}

View File

@ -776,7 +776,7 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
/* kind1 = NOP and is a single byte, others have a length field */
if (smp->data.u.str.area[ofs] == 1)
next = ofs + 1;
else if (ofs + 1 < tcplen)
else if (ofs + 1 <= tcplen)
next = ofs + smp->data.u.str.area[ofs + 1];
else
break;
@ -790,10 +790,10 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
if (mode & 2) // mode & 2: append tcp.options_list
trash->area[trash->data++] = opt;
if (opt == 2 && (ofs + 3 < tcplen) /* MSS value starts at ofs + 2 and is 2 Bytes long */) {
if (opt == 2 /* MSS */) {
tcpmss = read_n16(smp->data.u.str.area + ofs + 2);
}
else if (opt == 3 && (ofs + 2 < tcplen) /* WS value 1 Byte is at ofs + 2 */) {
else if (opt == 3 /* WS */) {
tcpws = (uchar)smp->data.u.str.area[ofs + 2];
/* output from 1 to 15, thus 0=not found */
tcpws = tcpws > 14 ? 15 : tcpws + 1;
@ -813,7 +813,7 @@ static int sample_conv_ip_fp(const struct arg *arg_p, struct sample *smp, void *
write_n16(trash->area + 3, tcpwin);
write_n16(trash->area + 5, tcpmss);
/* then the bit mask of present options */
/* the the bit mask of present options */
trash->area[7] = opts;
/* mode 4: append source IP address */

View File

@ -1577,9 +1577,7 @@ void init_new_proxy(struct proxy *p)
LIST_INIT(&p->conf.args.list);
LIST_INIT(&p->conf.lf_checks);
LIST_INIT(&p->filter_configs);
LIST_INIT(&p->tcpcheck.preset_vars);
LIST_INIT(&p->filter_sequence.req);
LIST_INIT(&p->filter_sequence.res);
LIST_INIT(&p->tcpcheck_rules.preset_vars);
MT_LIST_INIT(&p->lbprm.lb_free_list);
@ -1678,17 +1676,11 @@ int proxy_finalize(struct proxy *px, int *err_code)
}
if (bind_conf->mux_proto) {
int is_quic;
if ((bind_conf->options & (BC_O_USE_SOCK_DGRAM | BC_O_USE_XPRT_STREAM)) == (BC_O_USE_SOCK_DGRAM | BC_O_USE_XPRT_STREAM))
is_quic = 1;
else
is_quic = 0;
/* it is possible that an incorrect mux was referenced
* due to the proxy's mode not being taken into account
* on first pass. Let's adjust it now.
*/
mux_ent = conn_get_best_mux_entry(bind_conf->mux_proto->token, PROTO_SIDE_FE, is_quic, mode);
mux_ent = conn_get_best_mux_entry(bind_conf->mux_proto->token, PROTO_SIDE_FE, mode);
if (!mux_ent || !isteq(mux_ent->token, bind_conf->mux_proto->token)) {
ha_alert("%s '%s' : MUX protocol '%.*s' is not usable for 'bind %s' at [%s:%d].\n",
@ -1878,12 +1870,28 @@ int proxy_finalize(struct proxy *px, int *err_code)
else if (px->options & PR_O_TRANSP)
px->options &= ~PR_O_DISPATCH;
if ((px->tcpcheck.flags & TCPCHK_FL_UNUSED_HTTP_RS)) {
if ((px->tcpcheck_rules.flags & TCPCHK_RULES_UNUSED_HTTP_RS)) {
ha_warning("%s '%s' uses http-check rules without 'option httpchk', so the rules are ignored.\n",
proxy_type_str(px), px->id);
*err_code |= ERR_WARN;
}
if ((px->options2 & PR_O2_CHK_ANY) == PR_O2_TCPCHK_CHK &&
(px->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) != TCPCHK_RULES_HTTP_CHK) {
if (px->options & PR_O_DISABLE404) {
ha_warning("'%s' will be ignored for %s '%s' (requires 'option httpchk').\n",
"disable-on-404", proxy_type_str(px), px->id);
*err_code |= ERR_WARN;
px->options &= ~PR_O_DISABLE404;
}
if (px->options2 & PR_O2_CHK_SNDST) {
ha_warning("'%s' will be ignored for %s '%s' (requires 'option httpchk').\n",
"send-state", proxy_type_str(px), px->id);
*err_code |= ERR_WARN;
px->options2 &= ~PR_O2_CHK_SNDST;
}
}
if ((px->options2 & PR_O2_CHK_ANY) == PR_O2_EXT_CHK) {
if (!global.external_check) {
ha_alert("Proxy '%s' : '%s' unable to find required 'global.external-check'.\n",
@ -2429,7 +2437,7 @@ int proxy_finalize(struct proxy *px, int *err_code)
if ((px->cap & PR_CAP_BE) && !px->timeout.queue)
px->timeout.queue = px->timeout.connect;
if (px->tcpcheck.flags & TCPCHK_FL_UNUSED_TCP_RS) {
if ((px->tcpcheck_rules.flags & TCPCHK_RULES_UNUSED_TCP_RS)) {
ha_warning("%s '%s' uses tcp-check rules without 'option tcp-check', so the rules are ignored.\n",
proxy_type_str(px), px->id);
*err_code |= ERR_WARN;
@ -2539,16 +2547,12 @@ int proxy_finalize(struct proxy *px, int *err_code)
srv_minmax_conn_apply(newsrv);
*err_code |= check_server_tcpcheck(newsrv);
if (*err_code & (ERR_ABORT|ERR_FATAL))
goto out;
/* this will also properly set the transport layer for
* prod and checks
* if default-server have use_ssl, prerare ssl init
* without activating it */
if (newsrv->use_ssl == 1 || newsrv->check.use_ssl == 1 ||
(newsrv->check.tcpcheck->flags & TCPCHK_FL_USE_SSL) ||
(newsrv->proxy->options & PR_O_TCPCHK_SSL) ||
((newsrv->flags & SRV_F_DEFSRV_USE_SSL) && newsrv->use_ssl != 1)) {
if (xprt_get(XPRT_SSL) && xprt_get(XPRT_SSL)->prepare_srv)
cfgerr += xprt_get(XPRT_SSL)->prepare_srv(newsrv);
@ -2668,8 +2672,6 @@ int proxy_finalize(struct proxy *px, int *err_code)
*err_code |= ERR_WARN;
}
*err_code |= proxy_check_http_errors(px);
if (px->mode != PR_MODE_HTTP && !(px->options & PR_O_HTTP_UPG)) {
int optnum;
@ -2875,7 +2877,7 @@ int proxy_finalize(struct proxy *px, int *err_code)
* due to the proxy's mode not being taken into account
* on first pass. Let's adjust it now.
*/
mux_ent = conn_get_best_mux_entry(newsrv->mux_proto->token, PROTO_SIDE_BE, srv_is_quic(newsrv), mode);
mux_ent = conn_get_best_mux_entry(newsrv->mux_proto->token, PROTO_SIDE_BE, mode);
if (!mux_ent || !isteq(mux_ent->token, newsrv->mux_proto->token)) {
ha_alert("%s '%s' : MUX protocol '%.*s' is not usable for server '%s' at [%s:%d].\n",
@ -2914,6 +2916,7 @@ int proxy_finalize(struct proxy *px, int *err_code)
if (px->cap & PR_CAP_BE) {
if (!(px->options2 & PR_O2_CHK_ANY)) {
struct tcpcheck_ruleset *rs = NULL;
struct tcpcheck_rules *rules = &px->tcpcheck_rules;
px->options2 |= PR_O2_TCPCHK_CHK;
@ -2926,8 +2929,10 @@ int proxy_finalize(struct proxy *px, int *err_code)
cfgerr++;
}
}
px->tcpcheck.rs = rs;
free_tcpcheck_vars(&px->tcpcheck.preset_vars);
free_tcpcheck_vars(&rules->preset_vars);
rules->list = &rs->rules;
rules->flags = 0;
}
}
@ -3160,7 +3165,7 @@ int proxy_ref_defaults(struct proxy *px, struct proxy *defpx, char **errmsg)
defaults_px_ref(defpx, px);
}
if (defpx->tcpcheck.rs && (defpx->tcpcheck.rs->flags & TCPCHK_RULES_PROTO_CHK) &&
if ((defpx->tcpcheck_rules.flags & TCPCHK_RULES_PROTO_CHK) &&
(px->cap & PR_CAP_LISTEN) == PR_CAP_BE) {
/* If the current default proxy defines tcpcheck rules, the
* current proxy will keep a reference on it. but only if the
@ -3398,12 +3403,14 @@ static int proxy_defproxy_cpy(struct proxy *curproxy, const struct proxy *defpro
curproxy->redispatch_after = defproxy->redispatch_after;
curproxy->max_ka_queue = defproxy->max_ka_queue;
curproxy->tcpcheck.flags = (defproxy->tcpcheck.flags & ~TCPCHK_FL_UNUSED_RS);
curproxy->tcpcheck.rs = defproxy->tcpcheck.rs;
if (!dup_tcpcheck_vars(&curproxy->tcpcheck.preset_vars,
&defproxy->tcpcheck.preset_vars)) {
memprintf(errmsg, "proxy '%s': failed to duplicate tcpcheck preset-vars", curproxy->id);
return 1;
curproxy->tcpcheck_rules.flags = (defproxy->tcpcheck_rules.flags & ~TCPCHK_RULES_UNUSED_RS);
curproxy->tcpcheck_rules.list = defproxy->tcpcheck_rules.list;
if (!LIST_ISEMPTY(&defproxy->tcpcheck_rules.preset_vars)) {
if (!dup_tcpcheck_vars(&curproxy->tcpcheck_rules.preset_vars,
&defproxy->tcpcheck_rules.preset_vars)) {
memprintf(errmsg, "proxy '%s': failed to duplicate tcpcheck preset-vars", curproxy->id);
return 1;
}
}
curproxy->ck_opts = defproxy->ck_opts;
@ -4901,7 +4908,7 @@ static int cli_parse_add_backend(char **args, char *payload, struct appctx *appc
return 1;
}
if (!(defpx->flags & PR_FL_DEF_EXPLICIT_MODE) && !mode) {
cli_dynerr(appctx, memprintf(&msg, "Mode is required as '%s' default proxy does not explicitly defines it.\n", def_name));
cli_dynerr(appctx, memprintf(&msg, "Mode is required as '%s' default proxy does not explicitely defines it.\n", def_name));
return 1;
}
if (defpx->mode != PR_MODE_TCP && defpx->mode != PR_MODE_HTTP) {
@ -4909,6 +4916,10 @@ static int cli_parse_add_backend(char **args, char *payload, struct appctx *appc
def_name, proxy_mode_str(defpx->mode)));
return 1;
}
if (!LIST_ISEMPTY(&defpx->conf.errors)) {
cli_dynerr(appctx, memprintf(&msg, "Dynamic backends cannot inherit from default proxy '%s' because it references HTTP errors.\n", def_name));
return 1;
}
thread_isolate();

View File

@ -44,7 +44,7 @@ size_t qcs_http_rcv_buf(struct qcs *qcs, struct buffer *buf, size_t count,
goto end;
}
htx_xfer(cs_htx, qcs_htx, count, HTX_XFER_DEFAULT);
htx_xfer_blks(cs_htx, qcs_htx, count, HTX_BLK_UNUSED);
BUG_ON(qcs_htx->flags & HTX_FL_PARSING_ERROR);
/* Copy EOM from src to dst buffer if all data copied. */

View File

@ -143,19 +143,18 @@ static char *qcc_app_st_to_str(const enum qcc_app_st st)
void qmux_dump_qcc_info(struct buffer *msg, const struct qcc *qcc)
{
const struct quic_conn *qc = conn_is_quic(qcc->conn) ? qcc->conn->handle.qc : NULL;
const struct quic_conn *qc = qcc->conn->handle.qc;
chunk_appendf(msg, " qcc=%p(%c)", qcc, (qcc->flags & QC_CF_IS_BACK) ? 'B' : 'F');
if (qc)
if (qcc->conn->handle.qc)
chunk_appendf(msg, " qc=%p", qcc->conn->handle.qc);
chunk_appendf(msg, " .st=%s .sc=%llu .hreq=%llu .flg=0x%04x",
qcc_app_st_to_str(qcc->app_st), (ullong)qcc->nb_sc,
(ullong)qcc->nb_hreq, qcc->flags);
chunk_appendf(msg, " .tx=%llu %llu/%llu",
(ullong)qcc->tx.fc.off_soft, (ullong)qcc->tx.fc.off_real, (ullong)qcc->tx.fc.limit);
if (qc)
chunk_appendf(msg, " bwnd=%llu/%llu", (ullong)qcc->tx.buf_in_flight, (ullong)qc->path->cwnd);
chunk_appendf(msg, " .tx=%llu %llu/%llu bwnd=%llu/%llu",
(ullong)qcc->tx.fc.off_soft, (ullong)qcc->tx.fc.off_real, (ullong)qcc->tx.fc.limit,
(ullong)qcc->tx.buf_in_flight, (ullong)qc->path->cwnd);
}
void qmux_dump_qcs_info(struct buffer *msg, const struct qcs *qcs)
@ -170,8 +169,8 @@ void qmux_dump_qcs_info(struct buffer *msg, const struct qcs *qcs)
(ullong)qcs->tx.fc.off_real,
(ullong)qcs->tx.fc.limit);
if (conn_is_quic(qcs->qcc->conn) && qcs->tx.stream)
bdata_ctr_print(msg, &qcs->tx.stream->data, " buf=");
if (qcs->stream)
bdata_ctr_print(msg, &qcs->stream->data, " buf=");
chunk_appendf(msg, " .ti=%u/%u/%u",
tot_time_read(&qcs->timer.base),

View File

@ -61,10 +61,10 @@
static uint64_t qpack_get_varint(const unsigned char **buf, uint64_t *len_in, int b)
{
uint64_t ret = 0;
uint64_t len = *len_in;
int len = *len_in;
const uint8_t *raw = *buf;
uint64_t v, limit = (1ULL << 62) - 1;
int shift = 0;
uint64_t v, max = ~0;
uint8_t shift = 0;
if (len == 0)
goto too_short;
@ -77,26 +77,24 @@ static uint64_t qpack_get_varint(const unsigned char **buf, uint64_t *len_in, in
do {
if (!len)
goto too_short;
v = *raw++;
len--;
/* This check is sufficient to prevent any overflow
* and implicitly limits shift to 63.
*/
if ((v & 127) > (limit - ret) >> shift)
goto too_large;
ret += (v & 127) << shift;
if (v & 127) { // make UBSan happy
if ((v & 127) > max)
goto too_large;
ret += (v & 127) << shift;
}
max >>= 7;
shift += 7;
} while (v & 128);
end:
end:
*buf = raw;
*len_in = len;
return ret;
too_large:
too_short:
too_large:
too_short:
*len_in = (uint64_t)-1;
return 0;
}
@ -404,10 +402,7 @@ int qpack_decode_fs(const unsigned char *raw, uint64_t len, struct buffer *tmp,
n = efl_type & 0x20;
static_tbl = efl_type & 0x10;
index = qpack_get_varint(&raw, &len, 4);
/* There must be at least one byte available for <h> value after this
* decoding before the next call to qpack_get_varint().
*/
if ((int64_t)len <= 0) {
if (len == (uint64_t)-1) {
qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
ret = -QPACK_RET_TRUNCATED;
goto out;
@ -479,10 +474,7 @@ int qpack_decode_fs(const unsigned char *raw, uint64_t len, struct buffer *tmp,
n = *raw & 0x10;
hname = *raw & 0x08;
name_len = qpack_get_varint(&raw, &len, 3);
/* There must be at least one byte available for <hvalue> after this
* decoding before the next call to qpack_get_varint().
*/
if ((int64_t)len < (int64_t)name_len + 1) {
if (len == (uint64_t)-1 || len < name_len) {
qpack_debug_printf(stderr, "##ERR@%d\n", __LINE__);
ret = -QPACK_RET_TRUNCATED;
goto out;

Some files were not shown because too many files have changed in this diff Show More