Compare commits

..

No commits in common. "master" and "v3.3-dev14" have entirely different histories.

348 changed files with 4653 additions and 10558 deletions

View File

@ -19,7 +19,7 @@ defaults
frontend h2
mode http
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/certs/common.pem alpn h2,http/1.1
bind 127.0.0.1:8443 ssl crt reg-tests/ssl/common.pem alpn h2,http/1.1
default_backend h2b
backend h2b

View File

@ -28,7 +28,7 @@ jobs:
run: env SSL_LIB=${HOME}/opt/ scripts/build-curl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) CC=gcc TARGET=linux-glibc \
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 USE_ECH=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \

View File

@ -1,77 +0,0 @@
name: openssl master
on:
schedule:
- cron: "0 3 * * *"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
if: ${{ github.repository_owner == 'haproxy' || github.event_name == 'workflow_dispatch' }}
steps:
- uses: actions/checkout@v5
- name: Install apt dependencies
run: |
sudo apt-get update -o Acquire::Languages=none -o Acquire::Translation=none
sudo apt-get --no-install-recommends -y install socat gdb
sudo apt-get --no-install-recommends -y install libpsl-dev
- uses: ./.github/actions/setup-vtest
- name: Install OpenSSL master
run: env OPENSSL_VERSION="git-master" GIT_TYPE="branch" scripts/build-ssl.sh
- name: Compile HAProxy
run: |
make -j$(nproc) ERR=1 CC=gcc TARGET=linux-glibc \
USE_QUIC=1 USE_OPENSSL=1 \
SSL_LIB=${HOME}/opt/lib SSL_INC=${HOME}/opt/include \
DEBUG="-DDEBUG_POOL_INTEGRITY -DDEBUG_UNIT" \
ADDLIB="-Wl,-rpath,/usr/local/lib/ -Wl,-rpath,$HOME/opt/lib/"
sudo make install
- name: Show HAProxy version
id: show-version
run: |
ldd $(which haproxy)
haproxy -vv
echo "version=$(haproxy -v |awk 'NR==1{print $3}')" >> $GITHUB_OUTPUT
- name: Install problem matcher for VTest
run: echo "::add-matcher::.github/vtest.json"
- name: Run VTest for HAProxy
id: vtest
run: |
# This is required for macOS which does not actually allow to increase
# the '-n' soft limit to the hard limit, thus failing to run.
ulimit -n 65536
# allow to catch coredumps
ulimit -c unlimited
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel
- name: Show VTest results
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
for folder in ${TMPDIR:-/tmp}/haregtests-*/vtc.*; do
printf "::group::"
cat $folder/INFO
cat $folder/LOG
echo "::endgroup::"
done
exit 1
- name: Run Unit tests
id: unittests
run: |
make unit-tests
- name: Show coredumps
if: ${{ failure() && steps.vtest.outcome == 'failure' }}
run: |
failed=false
shopt -s nullglob
for file in /tmp/core.*; do
failed=true
printf "::group::"
gdb -ex 'thread apply all bt full' ./haproxy $file
echo "::endgroup::"
done
if [ "$failed" = true ]; then
exit 1;
fi

View File

@ -0,0 +1,32 @@
#
# special purpose CI: test against OpenSSL built in "no-deprecated" mode
# let us run those builds weekly
#
# for example, OpenWRT uses such OpenSSL builds (those builds are smaller)
#
#
# some details might be found at NL: https://www.mail-archive.com/haproxy@formilux.org/msg35759.html
# GH: https://github.com/haproxy/haproxy/issues/367
name: openssl no-deprecated
on:
schedule:
- cron: "0 0 * * 4"
workflow_dispatch:
permissions:
contents: read
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: ./.github/actions/setup-vtest
- name: Compile HAProxy
run: |
make DEFINE="-DOPENSSL_API_COMPAT=0x10100000L -DOPENSSL_NO_DEPRECATED" -j3 CC=gcc ERR=1 TARGET=linux-glibc USE_OPENSSL=1
- name: Run VTest
run: |
make reg-tests VTEST_PROGRAM=../vtest/vtest REGTESTS_TYPES=default,bug,devel

175
CHANGELOG
View File

@ -1,181 +1,6 @@
ChangeLog :
===========
2026/01/07 : 3.4-dev2
- BUG/MEDIUM: mworker/listener: ambiguous use of RX_F_INHERITED with shards
- BUG/MEDIUM: http-ana: Properly detect client abort when forwarding response (v2)
- BUG/MEDIUM: stconn: Don't report abort from SC if read0 was already received
- BUG/MEDIUM: quic: Don't try to use hystart if not implemented
- CLEANUP: backend: Remove useless test on server's xprt
- CLEANUP: tcpcheck: Remove useless test on the xprt used for healthchecks
- CLEANUP: ssl-sock: Remove useless tests on connection when resuming TLS session
- REGTESTS: quic: fix a TLS stack usage
- REGTESTS: list all skipped tests including 'feature cmd' ones
- CI: github: remove openssl no-deprecated job
- CI: github: add a job to test the master branch of OpenSSL
- CI: github: openssl-master.yml misses actions/checkout
- BUG/MEDIUM: backend: Do not remove CO_FL_SESS_IDLE in assign_server()
- CI: github: use git prefix for openssl-master.yml
- BUG/MEDIUM: mux-h2: synchronize all conditions to create a new backend stream
- REGTESTS: fix error when no test are skipped
- MINOR: cpu-topo: Turn the cpu policy configuration into a struct
- MEDIUM: cpu-topo: Add a "threads-per-core" keyword to cpu-policy
- MEDIUM: cpu-topo: Add a "cpu-affinity" option
- MEDIUM: cpu-topo: Add a new "max-threads-per-group" global keyword
- MEDIUM: cpu-topo: Add the "per-thread" cpu_affinity
- MEDIUM: cpu-topo: Add the "per-ccx" cpu_affinity
- BUG/MINOR: cpu-topo: fix -Wlogical-not-parentheses build with clang
- DOC: config: fix number of values for "cpu-affinity"
- MINOR: tools: add a secure implementation of memset
- MINOR: mux-h2: add missing glitch count for non-decodable H2 headers
- MINOR: mux-h2: perform a graceful close at 75% glitches threshold
- MEDIUM: mux-h1: implement basic glitches support
- MINOR: mux-h1: perform a graceful close at 75% glitches threshold
- MEDIUM: cfgparse: acknowledge that proxy ID auto numbering starts at 2
- MINOR: cfgparse: remove useless checks on no server in backend
- OPTIM/MINOR: proxy: do not init proxy management task if unused
- MINOR: patterns: preliminary changes for reorganization
- MEDIUM: patterns: reorganize pattern reference elements
- CLEANUP: patterns: remove dead code
- OPTIM: patterns: cache the current generation
- MINOR: tcp: add new bind option "tcp-ss" to instruct the kernel to save the SYN
- MINOR: protocol: support a generic way to call getsockopt() on a connection
- MINOR: tcp: implement the get_opt() function
- MINOR: tcp_sample: implement the fc_saved_syn sample fetch function
- CLEANUP: assorted typo fixes in the code, commits and doc
- BUG/MEDIUM: cpu-topo: Don't forget to reset visited_ccx.
- BUG/MAJOR: set the correct generation ID in pat_ref_append().
- BUG/MINOR: backend: fix the conn_retries check for TFO
- BUG/MINOR: backend: inspect request not response buffer to check for TFO
- MINOR: net_helper: add sample converters to decode ethernet frames
- MINOR: net_helper: add sample converters to decode IP packet headers
- MINOR: net_helper: add sample converters to decode TCP headers
- MINOR: net_helper: add ip.fp() to build a simplified fingerprint of a SYN
- MINOR: net_helper: prepare the ip.fp() converter to support more options
- MINOR: net_helper: add an option to ip.fp() to append the TTL to the fingerprint
- MINOR: net_helper: add an option to ip.fp() to append the source address
- DOC: config: fix the length attribute name for stick tables of type binary / string
- MINOR: mworker/cli: only keep positive PIDs in proc_list
- CLEANUP: mworker: remove duplicate list.h include
- BUG/MINOR: mworker/cli: fix show proc pagination using reload counter
- MINOR: mworker/cli: extract worker "show proc" row printer
- MINOR: cpu-topo: Factorize code
- MINOR: cpu-topo: Rename variables to better fit their usage
- BUG/MEDIUM: peers: Properly handle shutdown when trying to get a line
- BUG/MEDIUM: mux-h1: Take care to update <kop> value during zero-copy forwarding
- MINOR: threads: Avoid using a thread group mask when stopping.
- MINOR: hlua: Add support for lua 5.5
- MEDIUM: cpu-topo: Add an optional directive for per-group affinity
- BUG/MEDIUM: mworker: can't use signals after a failed reload
- BUG/MEDIUM: stconn: Move data from <kip> to <kop> during zero-copy forwarding
- DOC: config: fix a few typos and refine cpu-affinity
- MINOR: receiver: Remove tgroup_mask from struct shard_info
- BUG/MINOR: quic: fix deprecated warning for window size keyword
2025/12/10 : 3.4-dev1
- BUG/MINOR: jwt: Missing "case" in switch statement
- DOC: configuration: ECH support details
- Revert "MINOR: quic: use dynamic cc_algo on bind_conf"
- MINOR: quic: define quic_cc_algo as const
- MINOR: quic: extract cc-algo parsing in a dedicated function
- MINOR: quic: implement cc-algo server keyword
- BUG/MINOR: quic-be: Missing keywords array NULL termination
- REGTESTS: ssl enable tls12_reuse.vtc for AWS-LC
- REGTESTS: ssl: split tls*_reuse in stateless and stateful resume tests
- BUG/MEDIUM: connection: fix "bc_settings_streams_limit" typo
- BUG/MEDIUM: config: ignore empty args in skipped blocks
- DOC: config: mention clearer that the cache's total-max-size is mandatory
- DOC: config: reorder the cache section's keywords
- BUG/MINOR: quic/ssl: crash in ClientHello callback ssl traces
- BUG/MINOR: quic-be: handshake errors without connection stream closure
- MINOR: quic: Add useful debugging traces in qc_idle_timer_do_rearm()
- REGTESTS: ssl: Move all the SSL certificates, keys, crt-lists inside "certs" directory
- REGTESTS: quic/ssl: ssl/del_ssl_crt-list.vtc supported by QUIC
- REGTESTS: quic: dynamic_server_ssl.vtc supported by QUIC
- REGTESTS: quic: issuers_chain_path.vtc supported by QUIC
- REGTESTS: quic: new_del_ssl_cafile.vtc supported by QUIC
- REGTESTS: quic: ocsp_auto_update.vtc supported by QUIC
- REGTESTS: quic: set_ssl_bug_2265.vtc supported by QUIC
- MINOR: quic: avoid code duplication in TLS alert callback
- BUG/MINOR: quic-be: missing connection stream closure upon TLS alert to send
- REGTESTS: quic: set_ssl_cafile.vtc supported by QUIC
- REGTESTS: quic: set_ssl_cert_noext.vtc supported by QUIC
- REGTESTS: quic: set_ssl_cert.vtc supported by QUIC
- REGTESTS: quic: set_ssl_crlfile.vtc supported by QUIC
- REGTESTS: quic: set_ssl_server_cert.vtc supported by QUIC
- REGTESTS: quic: show_ssl_ocspresponse.vtc supported by QUIC
- REGTESTS: quic: ssl_client_auth.vtc supported by QUIC
- REGTESTS: quic: ssl_client_samples.vtc supported by QUIC
- REGTESTS: quic: ssl_default_server.vtc supported by QUIC
- REGTESTS: quic: new_del_ssl_crlfile.vtc supported by QUIC
- REGTESTS: quic: ssl_frontend_samples.vtc supported by QUIC
- REGTESTS: quic: ssl_server_samples.vtc supported by QUIC
- REGTESTS: quic: ssl_simple_crt-list.vtc supported by QUIC
- REGTESTS: quic: ssl_sni_auto.vtc code provision for QUIC
- REGTESTS: quic: ssl_curve_name.vtc supported by QUIC
- REGTESTS: quic: add_ssl_crt-list.vtc supported by QUIC
- REGTESTS: add ssl_ciphersuites.vtc (TCP & QUIC)
- BUG/MINOR: quic: do not set first the default QUIC curves
- REGTESTS: quic/ssl: Add ssl_curves_selection.vtc
- BUG/MINOR: ssl: Don't allow to set NULL sni
- MEDIUM: quic: Add connection as argument when qc_new_conn() is called
- MINOR: ssl: Add a function to hash SNIs
- MINOR: ssl: Store hash of the SNI for cached TLS sessions
- MINOR: ssl: Compare hashes instead of SNIs when a session is cached
- MINOR: connection/ssl: Store the SNI hash value in the connection itself
- MEDIUM: tcpcheck/backend: Get the connection SNI before initializing SSL ctx
- BUG/MEDIUM: ssl: Don't reuse TLS session if the connection's SNI differs
- MEDIUM: ssl/server: No longer store the SNI of cached TLS sessions
- BUG/MINOR: log: Dump good %B and %U values in logs
- BUG/MEDIUM: http-ana: Don't close server connection on read0 in TUNNEL mode
- DOC: config: Fix description of the spop mode
- DOC: config: Improve spop mode documentation
- MINOR: ssl: Split ssl_crt-list_filters.vtc in two files by TLS version
- REGTESTS: quic: tls13_ssl_crt-list_filters.vtc supported by QUIC
- BUG/MEDIUM: h3: do not access QCS <sd> if not allocated
- CLEANUP: mworker/cli: remove useless variable
- BUG/MINOR: mworker/cli: 'show proc' is limited by buffer size
- BUG/MEDIUM: ssl: Always check the ALPN after handshake
- MINOR: connections: Add a new CO_FL_SSL_NO_CACHED_INFO flag
- BUG/MEDIUM: ssl: Don't store the ALPN for check connections
- BUG/MEDIUM: ssl: Don't resume session for check connections
- CLEANUP: improvements to the alignment macros
- CLEANUP: use the automatic alignment feature
- CLEANUP: more conversions and cleanups for alignment
- BUG/MEDIUM: h3: fix access to QCS <sd> definitely
- MINOR: h2/trace: emit a trace of the received RST_STREAM type
2025/11/26 : 3.4-dev0
- MINOR: version: mention that it's development again
2025/11/26 : 3.3.0
- BUG/MINOR: acme: better challenge_ready processing
- BUG/MINOR: acme: warning ctx may be used uninitialized
- MINOR: httpclient: complete the https log
- BUG/MEDIUM: server: do not use default SNI if manually set
- BUG/MINOR: freq_ctr: Prevent possible signed overflow in freq_ctr_overshoot_period
- DOC: ssl: Document the restrictions on 0RTT.
- DOC: ssl: Note that 0rtt works fork QUIC with QuicTLS too.
- BUG/MEDIUM: quic: do not prevent sending if no BE token
- BUG/MINOR: quic/server: free quic_retry_token on srv drop
- MINOR: quic: split global CID tree between FE and BE sides
- MINOR: quic: use separate global quic_conns FE/BE lists
- MINOR: quic: add "clo" filter on show quic
- MINOR: quic: dump backend connections on show quic
- MINOR: quic: mark backend conns on show quic
- BUG/MINOR: quic: fix uninit list on show quic handler
- BUG/MINOR: quic: release BE quic_conn on connect failure
- BUG/MINOR: server: fix srv_drop() crash on partially init srv
- BUG/MINOR: h3: do no crash on forwarding multiple chained response
- BUG/MINOR: h3: handle properly buf alloc failure on response forwarding
- BUG/MEDIUM: server/ssl: Unset the SNI for new server connections if none is set
- BUG/MINOR: acme: fix ha_alert() call
- Revert "BUG/MEDIUM: server/ssl: Unset the SNI for new server connections if none is set"
- BUG/MINOR: sock-inet: ignore conntrack for transparent sockets on Linux
- DEV: patchbot: prepare for new version 3.4-dev
- DOC: update INSTALL with the range of gcc compilers and openssl versions
- MINOR: version: mention that 3.3 is stable now
2025/11/21 : 3.3-dev14
- MINOR: stick-tables: Rename stksess shards to use buckets
- MINOR: quic: do not use quic_newcid_from_hash64 on BE side

18
INSTALL
View File

@ -111,7 +111,7 @@ HAProxy requires a working GCC or Clang toolchain and GNU make :
may want to retry with "gmake" which is the name commonly used for GNU make
on BSD systems.
- GCC >= 4.7 (up to 15 tested). Older versions are no longer supported due to
- GCC >= 4.7 (up to 14 tested). Older versions are no longer supported due to
the latest mt_list update which only uses c11-like atomics. Newer versions
may sometimes break due to compiler regressions or behaviour changes. The
version shipped with your operating system is very likely to work with no
@ -237,7 +237,7 @@ to forcefully enable it using "USE_LIBCRYPT=1".
-----------------
For SSL/TLS, it is necessary to use a cryptography library. HAProxy currently
supports the OpenSSL library, and is known to build and work with branches
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.6. It is recommended to use
1.0.0, 1.0.1, 1.0.2, 1.1.0, 1.1.1, and 3.0 to 3.5. It is recommended to use
at least OpenSSL 1.1.1 to have support for all SSL keywords and configuration
in HAProxy. OpenSSL follows a long-term support cycle similar to HAProxy's,
and each of the branches above receives its own fixes, without forcing you to
@ -259,15 +259,11 @@ reported to work as well. While there are some efforts from the community to
ensure they work well, OpenSSL remains the primary target and this means that
in case of conflicting choices, OpenSSL support will be favored over other
options. Note that QUIC is not fully supported when haproxy is built with
OpenSSL < 3.5.2 version. In this case, QUICTLS or AWS-LC are the preferred
alternatives. As of writing this, the QuicTLS project follows OpenSSL very
closely and provides update simultaneously, but being a volunteer-driven
project, its long-term future does not look certain enough to convince
operating systems to package it, so it needs to be build locally. Recent
versions of AWS-LC (>= 1.22 and the FIPS branches) are pretty complete and
generally more performant than other OpenSSL derivatives, but may behave
slightly differently, particularly when dealing with outdated setups. See
the section about QUIC in this document.
OpenSSL < 3.5 version. In this case, QUICTLS is the preferred alternative.
As of writing this, the QuicTLS project follows OpenSSL very closely and provides
update simultaneously, but being a volunteer-driven project, its long-term future
does not look certain enough to convince operating systems to package it, so it
needs to be build locally. See the section about QUIC in this document.
A fifth option is wolfSSL (https://github.com/wolfSSL/wolfssl). It is the only
supported alternative stack not based on OpenSSL, yet which implements almost

View File

@ -643,7 +643,7 @@ ifneq ($(USE_OPENSSL:0=),)
OPTIONS_OBJS += src/ssl_sock.o src/ssl_ckch.o src/ssl_ocsp.o src/ssl_crtlist.o \
src/ssl_sample.o src/cfgparse-ssl.o src/ssl_gencert.o \
src/ssl_utils.o src/jwt.o src/ssl_clienthello.o src/jws.o src/acme.o \
src/ssl_trace.o src/jwe.o
src/ssl_trace.o
endif
ifneq ($(USE_ENGINE:0=),)
@ -992,7 +992,7 @@ OBJS += src/mux_h2.o src/mux_h1.o src/mux_fcgi.o src/log.o \
src/cfgcond.o src/proto_udp.o src/lb_fwlc.o src/ebmbtree.o \
src/proto_uxdg.o src/cfgdiag.o src/sock_unix.o src/sha1.o \
src/lb_fas.o src/clock.o src/sock_inet.o src/ev_select.o \
src/lb_map.o src/shctx.o src/hpack-dec.o src/net_helper.o \
src/lb_map.o src/shctx.o src/hpack-dec.o \
src/arg.o src/signal.o src/fix.o src/dynbuf.o src/guid.o \
src/cfgparse-tcp.o src/lb_ss.o src/chunk.o src/counters.o \
src/cfgparse-unix.o src/regex.o src/fcgi.o src/uri_auth.o \

View File

@ -1,2 +1,2 @@
$Format:%ci$
2026/01/07
2025/11/21

View File

@ -1 +1 @@
3.4-dev2
3.3-dev14

View File

@ -55,7 +55,7 @@ usage() {
echo " -S, --master-socket <path> Use the master socket at <path> (default: ${MASTER_SOCKET})"
echo " -d, --debug Debug mode, set -x"
echo " -t, --timeout Timeout (socat -t) (default: ${TIMEOUT})"
echo " -s, --silent Silent mode (no output)"
echo " -s, --silent Slient mode (no output)"
echo " -v, --verbose Verbose output (output from haproxy on failure)"
echo " -vv Even more verbose output (output from haproxy on success and failure)"
echo " -h, --help This help"

View File

@ -6,9 +6,9 @@ Wants=network-online.target
[Service]
EnvironmentFile=-/etc/default/haproxy
EnvironmentFile=-/etc/sysconfig/haproxy
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "CFGDIR=/etc/haproxy/conf.d" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -f $CFGDIR -c $EXTRAOPTS
Environment="CONFIG=/etc/haproxy/haproxy.cfg" "PIDFILE=/run/haproxy.pid" "EXTRAOPTS=-S /run/haproxy-master.sock"
ExecStart=@SBINDIR@/haproxy -Ws -f $CONFIG -p $PIDFILE $EXTRAOPTS
ExecReload=@SBINDIR@/haproxy -Ws -f $CONFIG -c $EXTRAOPTS
ExecReload=/bin/kill -USR2 $MAINPID
KillMode=mixed
Restart=always

View File

@ -59,9 +59,9 @@ struct ring_v2 {
struct ring_v2a {
size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps)
size_t tail ALIGNED(64); // storage tail
size_t head ALIGNED(64); // storage head
char area[0] ALIGNED(64); // storage area begins immediately here
size_t tail __attribute__((aligned(64))); // storage tail
size_t head __attribute__((aligned(64))); // storage head
char area[0] __attribute__((aligned(64))); // storage area begins immediately here
};
/* display the message and exit with the code */

View File

@ -1,70 +0,0 @@
BEGININPUT
BEGINCONTEXT
HAProxy's development cycle consists in one development branch, and multiple
maintenance branches.
All the development is made into the development branch exclusively. This
includes mostly new features, doc updates, cleanups and or course, fixes.
The maintenance branches, also called stable branches, never see any
development, and only receive ultra-safe fixes for bugs that affect them,
that are picked from the development branch.
Branches are numbered in 0.1 increments. Every 6 months, upon a new major
release, the development branch enters maintenance and a new development branch
is created with a new, higher version. The current development branch is
3.4-dev, and maintenance branches are 3.3 and below.
Fixes created in the development branch for issues that were introduced in an
earlier branch are applied in descending order to each and every version till
that branch that introduced the issue: 3.3 first, then 3.2, then 3.1, then 3.0
and so on. This operation is called "backporting". A fix for an issue is never
backported beyond the branch that introduced the issue. An important point is
that the project maintainers really aim at zero regression in maintenance
branches, so they're never willing to take any risk backporting patches that
are not deemed strictly necessary.
Fixes consist of patches managed using the Git version control tool and are
identified by a Git commit ID and a commit message. For this reason we
indistinctly talk about backporting fixes, commits, or patches; all mean the
same thing. When mentioning commit IDs, developers always use a short form
made of the first 8 characters only, and expect the AI assistant to do the
same.
It seldom happens that some fixes depend on changes that were brought by other
patches that were not in some branches and that will need to be backported as
well for the fix to work. In this case, such information is explicitly provided
in the commit message by the patch's author in natural language.
Developers are serious and always indicate if a patch needs to be backported.
Sometimes they omit the exact target branch, or they will say that the patch is
"needed" in some older branch, but it means the same. If a commit message
doesn't mention any backport instructions, it means that the commit does not
have to be backported. And patches that are not strictly bug fixes nor doc
improvements are normally not backported. For example, fixes for design
limitations, architectural improvements and performance optimizations are
considered too risky for a backport. Finally, all bug fixes are tagged as
"BUG" at the beginning of their subject line. Patches that are not tagged as
such are not bugs, and must never be backported unless their commit message
explicitly requests so.
ENDCONTEXT
A developer is reviewing the development branch, trying to spot which commits
need to be backported to maintenance branches. This person is already expert
on HAProxy and everything related to Git, patch management, and the risks
associated with backports, so he doesn't want to be told how to proceed nor to
review the contents of the patch.
The goal for this developer is to get some help from the AI assistant to save
some precious time on this tedious review work. In order to do a better job, he
needs an accurate summary of the information and instructions found in each
commit message. Specifically he needs to figure if the patch fixes a problem
affecting an older branch or not, if it needs to be backported, if so to which
branches, and if other patches need to be backported along with it.
The indented text block below after an "id" line and starting with a Subject line
is a commit message from the HAProxy development branch that describes a patch
applied to that branch, starting with its subject line, please read it carefully.

View File

@ -1,29 +0,0 @@
ENDINPUT
BEGININSTRUCTION
You are an AI assistant that follows instruction extremely well. Help as much
as you can, responding to a single question using a single response.
The developer wants to know if he needs to backport the patch above to fix
maintenance branches, for which branches, and what possible dependencies might
be mentioned in the commit message. Carefully study the commit message and its
backporting instructions if any (otherwise it should probably not be backported),
then provide a very concise and short summary that will help the developer decide
to backport it, or simply to skip it.
Start by explaining in one or two sentences what you recommend for this one and why.
Finally, based on your analysis, give your general conclusion as "Conclusion: X"
where X is a single word among:
- "yes", if you recommend to backport the patch right now either because
it explicitly states this or because it's a fix for a bug that affects
a maintenance branch (3.3 or lower);
- "wait", if this patch explicitly mentions that it must be backported, but
only after waiting some time.
- "no", if nothing clearly indicates a necessity to backport this patch (e.g.
lack of explicit backport instructions, or it's just an improvement);
- "uncertain" otherwise for cases not covered above
ENDINSTRUCTION
Explanation:

View File

@ -2,8 +2,8 @@
HAProxy
Configuration Manual
----------------------
version 3.4
2026/01/07
version 3.3
2025/11/21
This document covers the configuration language as implemented in the version
@ -647,8 +647,8 @@ which must be placed before other sections, but it may be repeated if needed.
In addition, some automatic identifiers may automatically be assigned to some
of the created objects (e.g. proxies), and by reordering sections, their
identifiers will change. These ones appear in the statistics for example. As
such, the configuration below will assign "foo" an ID number smaller than its
"bar" counterpart. This will be swapped if the two sections are reversed:
such, the configuration below will assign "foo" ID number 1 and "bar" ID number
2, which will be swapped if the two sections are reversed:
listen foo
bind :80
@ -1747,7 +1747,6 @@ The following keywords are supported in the "global" section :
- ca-base
- chroot
- cluster-secret
- cpu-affinity
- cpu-map
- cpu-policy
- cpu-set
@ -1787,7 +1786,6 @@ The following keywords are supported in the "global" section :
- lua-load
- lua-load-per-thread
- lua-prepend-path
- max-thread-per-group
- mworker-max-reloads
- nbthread
- node
@ -1877,8 +1875,6 @@ The following keywords are supported in the "global" section :
- tune.events.max-events-at-once
- tune.fail-alloc
- tune.fd.edge-triggered
- tune.h1.be.glitches-threshold
- tune.h1.fe.glitches-threshold
- tune.h1.zero-copy-fwd-recv
- tune.h1.zero-copy-fwd-send
- tune.h2.be.glitches-threshold
@ -2227,30 +2223,7 @@ cpu-map [auto:]<thread-group>[/<thread-set>] <cpu-set>[,...] [...]
cpu-map 4/1-40 40-79,120-159
cpu-affinity <affinity>
Defines how you want threads to be bound to cpus.
It currently accepts the following values :
- per-core: each thread will be bound to all the hardware threads of one core.
- per-group: each thread will be bound to all the hardware threads of the
group. This is the default unless "threads-per-core 1" is used in
"cpu-policy". "per-group" accepts an optional argument, to specify how CPUs
should be allocated. When a list of CPUs is larger than the maximum allowed
number of CPUs per group and has to be split between multiple groups, an
extra option allows to choose how the groups will be bound to those CPUs:
- auto: each thread group will only be assigned a fair share of contiguous
CPU cores that are dedicated to it and not shared with other groups. This
is the default as it generally is more optimal.
- loose: each group will still be allowed to use any CPU in the list. This
generally causes more contention, but may sometimes help deal better with
parasitic loads running on the same CPUs.
- auto: "per-group" will be used, unless "threads-per-core 1" is used in
"cpu-policy", in which case "per-core" will be used. This is the default.
- per-thread: that will bind one thread to one hardware thread only. If
"threads-per-core 1" is used in "cpu-policy", then each thread will be
bound to one hardware thread of a different core.
- per-ccx: each thread will be bound to all the hardware threads of a CCX.
cpu-policy <policy> [threads-per-core 1 | auto]
cpu-policy <policy>
Selects the CPU allocation policy to be used.
On multi-CPU systems, there can be plenty of reasons for not using all
@ -2402,13 +2375,6 @@ cpu-policy <policy> [threads-per-core 1 | auto]
easily. Note that if a single cluster is present, it
will still be fully used.
An optional keyword can be added, "threads-per-core". It can accept two
values, "1" and "auto". If set to 1, then only one thread per core will be
created, unrespective of how many hardware threads the core has. If set
to auto, then one thread per hardware thread will be created.
If no affinity is specified, and threads-per-core 1 is used, then by
default the affinity will be per-core.
See also: "cpu-map", "cpu-set", "nbthread"
cpu-set <directive>...
@ -2879,7 +2845,7 @@ limited-quic
layer supports most of the necessary TLS operations, albeit without QUIC
0-RTT capability.
This feature is primarily targeted for OpenSSL prior to version 3.5.2, where
This feature is primarily targetted for OpenSSL prior to version 3.5.2, where
QUIC API was not implemented or only partially. The compatibility layer can
still be activated for version 3.5.2 and above, but this is probably
unnecessary.
@ -3014,14 +2980,6 @@ master-worker no-exit-on-failure
it is only meant for debugging and could put the master process in an
abnormal state.
max-threads-per-group <number>
Defines the maximum number of threads in a thread group. Unless the number
of thread groups is fixed with the thread-groups directive, haproxy will
create more thread groups if needed. The default and maximum value is 64.
Having a lower value means more groups will potentially be created, which
can help improve performances, as a number of data structures are per
thread group, and that will mean less contention
mworker-max-reloads <number>
In master-worker mode, this option limits the number of time a worker can
survive to a reload. If the worker did not leave after a reload, once its
@ -4205,49 +4163,9 @@ tune.glitches.kill.cpu-usage <number>
will automatically get killed. A rule of thumb would be to set this value to
twice the usually observed CPU usage, or the commonly observed CPU usage plus
half the idle one (i.e. if CPU commonly reaches 60%, setting 80 here can make
sense). This parameter has no effect without tune.h2.fe.glitches-threshold,
tune.quic.fe.sec.glitches-threshold or tune.h1.fe.glitches-threshold. See
also the global parameters "tune.h2.fe.glitches-threshold",
"tune.h1.fe.glitches-threshold" and "tune.quic.fe.sec.glitches-threshold".
tune.h1.be.glitches-threshold <number>
Sets the threshold for the number of glitches on a HTTP/1 backend connection,
after which that connection will automatically be killed. This allows to
automatically kill misbehaving connections without having to write explicit
rules for them. The default value is zero, indicating that no threshold is
set so that no event will cause a connection to be closed. Typical events
include improperly formatted headers that had been nevertheless accepted by
"accept-unsafe-violations-in-http-response". Any non-zero value here should
probably be in the hundreds or thousands to be effective without affecting
slightly bogus servers. It is also possible to only kill connections when the
CPU usage crosses a certain level, by using "tune.glitches.kill.cpu-usage".
Note that a graceful close is attempted at 75% of the configured threshold by
advertising a GOAWAY for a future stream. This ensures that a slightly faulty
connection will stop being used after some time without risking to interrupt
ongoing transfers.
See also: tune.h1.fe.glitches-threshold, bc_glitches, and
tune.glitches.kill.cpu-usage
tune.h1.fe.glitches-threshold <number>
Sets the threshold for the number of glitches on a HTTP/1 frontend connection
after which that connection will automatically be killed. This allows to
automatically kill misbehaving connections without having to write explicit
rules for them. The default value is zero, indicating that no threshold is
set so that no event will cause a connection to be closed. Typical events
include improperly formatted headers that had been nevertheless accepted by
"accept-unsafe-violations-in-http-request". Any non-zero value here should
probably be in the hundreds or thousands to be effective without affecting
slightly bogus clients. It is also possible to only kill connections when the
CPU usage crosses a certain level, by using "tune.glitches.kill.cpu-usage".
Note that a graceful close is attempted at 75% of the configured threshold by
advertising a GOAWAY for a future stream. This ensures that a slightly non-
compliant client will have the opportunity to create a new connection and
continue to work unaffected without ever triggering the hard close thus
risking to interrupt ongoing transfers.
See also: tune.h1.be.glitches-threshold, fc_glitches, and
tune.glitches.kill.cpu-usage
sense). This parameter has no effect without tune.h2.fe.glitches-threshold or
tune.quic.fe.sec.glitches-threshold. See also the global parameters
"tune.h2.fe.glitches-threshold" and "tune.quic.fe.sec.glitches-threshold".
tune.h1.zero-copy-fwd-recv { on | off }
Enables ('on') of disabled ('off') the zero-copy receives of data for the H1
@ -4271,10 +4189,7 @@ tune.h2.be.glitches-threshold <number>
zero value here should probably be in the hundreds or thousands to be
effective without affecting slightly bogus servers. It is also possible to
only kill connections when the CPU usage crosses a certain level, by using
"tune.glitches.kill.cpu-usage". Note that a graceful close is attempted at
75% of the configured threshold by advertising a GOAWAY for a future stream.
This ensures that a slightly faulty connection will stop being used after
some time without risking to interrupt ongoing transfers.
"tune.glitches.kill.cpu-usage".
See also: tune.h2.fe.glitches-threshold, bc_glitches, and
tune.glitches.kill.cpu-usage
@ -4331,11 +4246,7 @@ tune.h2.fe.glitches-threshold <number>
zero value here should probably be in the hundreds or thousands to be
effective without affecting slightly bogus clients. It is also possible to
only kill connections when the CPU usage crosses a certain level, by using
"tune.glitches.kill.cpu-usage". Note that a graceful close is attempted at
75% of the configured threshold by advertising a GOAWAY for a future stream.
This ensures that a slightly non-compliant client will have the opportunity
to create a new connection and continue to work unaffected without ever
triggering the hard close thus risking to interrupt ongoing transfers.
"tune.glitches.kill.cpu-usage".
See also: tune.h2.be.glitches-threshold, fc_glitches, and
tune.glitches.kill.cpu-usage
@ -4923,7 +4834,7 @@ tune.quic.fe.cc.max-win-size <size>
The default value is 480k.
See also the "quic-cc-algo" bind and server options.
See also the "quic-cc-algo" bind option.
tune.quic.frontend.default-max-window-size <size> (deprecated)
This keyword has been deprecated in 3.3 and will be removed in 3.5. It is
@ -5111,7 +5022,7 @@ tune.quic.fe.tx.pacing { on | off }
deactivate it for networks with very high bandwidth/low latency
characteristics to prevent unwanted delay and reduce CPU consumption.
See also the "quic-cc-algo" bind and server options.
See also the "quic-cc-algo" bind option.
tune.quic.disable-tx-pacing (deprecated)
This keyword has been deprecated in 3.3 and will be removed in 3.5. It is
@ -5820,7 +5731,6 @@ errorloc302 X X X X
errorloc303 X X X X
error-log-format X X X -
force-persist - - X X
force-be-switch - X X -
filter - X X X
fullconn X - X X
guid - X X X
@ -7104,9 +7014,6 @@ default_backend <backend>
used when no rule has matched. It generally is the dynamic backend which
will catch all undetermined requests.
If a backend is disabled or unpublished, default_backend rules targetting it
will be ignored and stream processing will remain on the original proxy.
Example :
use_backend dynamic if url_dyn
@ -7150,11 +7057,7 @@ disabled
is possible to disable many instances at once by adding the "disabled"
keyword in a "defaults" section.
By default, a disabled backend cannot be selected for content-switching.
However, a portion of the traffic can ignore this when "force-be-switch" is
used.
See also : "enabled", "force-be-switch"
See also : "enabled"
dispatch <address>:<port> (deprecated)
@ -7564,19 +7467,6 @@ force-persist { if | unless } <condition>
and section 7 about ACL usage.
force-be-switch { if | unless } <condition>
Allow content switching to select a backend instance even if it is disabled
or unpublished. This rule can be used by admins to test traffic to services
prior to expose them to the outside world.
May be used in the following contexts: tcp, http
May be used in sections: defaults | frontend | listen | backend
no | yes | yes | no
See also : "disabled"
filter <name> [param*]
Add the filter <name> in the filter list attached to the proxy.
@ -8723,11 +8613,9 @@ id <value>
Arguments : none
Set a persistent ID for the proxy. This ID must be unique and positive. An
unused ID will automatically be assigned if unset. Due to an historical
behavior, value 1 is not used unless explicitly set. Thus, the lowest value
automatically assigned will be 2. This ID is currently only returned in
statistics.
Set a persistent ID for the proxy. This ID must be unique and positive.
An unused ID will automatically be assigned if unset. The first assigned
value will be 1. This ID is currently only returned in statistics.
ignore-persist { if | unless } <condition>
@ -9250,10 +9138,8 @@ mode { tcp|http|log|spop }
server features are supported, but not TCP or HTTP specific ones.
spop When used in a backend section, it will turn the backend into a
spop backend. This mode is mandatory if the backend contains
SPOA servers, but when mode is tcp, it will automatically be
converted to mode spop if such servers are detected.
log backend. This mode is mandatory and automatically set, if
necessary, for backends referenced by SPOE engines.
When doing content switching, it is mandatory that the frontend and the
backend are in the same mode (generally HTTP), otherwise the configuration
@ -14773,17 +14659,14 @@ use_backend <backend> [{if | unless} <condition>]
There may be as many "use_backend" rules as desired. All of these rules are
evaluated in their declaration order, and the first one which matches will
assign the backend. This is even the case if the backend is considered as
down. However, if a matching rule targets a disabled or unpublished backend,
it is ignored instead and rules evaluation continue.
assign the backend.
In the first form, the backend will be used if the condition is met. In the
second form, the backend will be used if the condition is not met. If no
condition is valid, the backend defined with "default_backend" will be used
unless it is disabled or unpublished. If no default backend is available,
either the servers in the same section are used (in case of a "listen"
section) or, in case of a frontend, no server is used and a 503 service
unavailable response is returned.
condition is valid, the backend defined with "default_backend" will be used.
If no default backend is defined, either the servers in the same section are
used (in case of a "listen" section) or, in case of a frontend, no server is
used and a 503 service unavailable response is returned.
Note that it is possible to switch from a TCP frontend to an HTTP backend. In
this case, either the frontend has already checked that the protocol is HTTP,
@ -16630,10 +16513,6 @@ allow-0rtt
you should only allow if for requests that are safe to replay, i.e. requests
that are idempotent. You can use the "wait-for-handshake" action for any
request that wouldn't be safe with early data.
With QUIC, 0rtt is supported with QuicTLS, OpenSSL >= 3.5.2 and AWS-LC.
With TCP/TLS, 0rtt is only supported with OpenSSL, and requires that the
client sends an ALPN, otherwise the early data won't be considered before
the handshake happens.
alpn <protocols>
This enables the TLS ALPN extension and advertises the specified protocol
@ -17058,10 +16937,9 @@ ech <dir> [ EXPERIMENTAL ]
See https://datatracker.ietf.org/doc/draft-ietf-tls-esni/
This is an experimental feature, which requires the
"expose-experimental-directives" option in the global section.
It also necessitates an OpenSSL version that supports ECH
( https://github.com/openssl/openssl/tree/feature/ech), and HAProxy must be
compiled with USE_ECH=1. The ECH API of AWS-LC is not supported.
"expose-experimental-directives" option in the global section. It also
necessitates an OpenSSL version that supports ECH, and HAProxy must be
compiled with USE_ECH=1.
Example:
$ openssl ech -public_name foobar.com -out /etc/haproxy/echkeydir/foobar.com.ech
@ -17546,19 +17424,6 @@ tcp-md5sig <password>
introduction of spoofed TCP segments into the connection stream. But it can
be useful for any very long-lived TCP connections.
tcp-ss <mode>
Sets the TCP Save SYN option for all incoming connections instantiated from
this listening socket. This option is available on Linux since version 4.3.
It instructs the kernel to try to keep a copy of the incoming IP packet
containing the TCP SYN flag, for later inspection via the "fc_saved_syn"
sample fetch function. The option knows 3 modes:
- 0 SYN packet saving is disabled, this is the default
- 1 SYN packet saving is enabled, and contains IP and TCP headers
- 2 SYN packet saving is enabled, and contains ETH, IP and TCP headers
This only works for regular TCP connections, and is ignored for other
protocols (e.g. UNIX sockets). See also "fc_saved_syn".
tcp-ut <delay>
Sets the TCP User Timeout for all incoming connections instantiated from this
listening socket. This option is available on Linux since version 2.6.37. It
@ -17876,8 +17741,6 @@ allow-0rtt
Allow sending early data to the server when using TLS 1.3.
Note that early data will be sent only if the client used early data, or
if the backend uses "retry-on" with the "0rtt-rejected" keyword.
With QUIC, 0rtt is supported with QuicTLS, OpenSSL >= 3.5.2 and AWS-LC.
With TCP/TLS, 0rtt is only supported with OpenSSL.
alpn <protocols>
May be used in the following contexts: tcp, http
@ -18952,16 +18815,6 @@ proto <name>
See also "ws" to use an alternative protocol for websocket streams.
quic-cc-algo { cubic | newreno | bbr | nocc }[(<args,...>)]
This is a QUIC specific setting to select the congestion control algorithm
for any connection targeting this server. They are similar to those used by
TCP. See the bind option with a similar name for a complete description of
all customization options.
Default value: cubic
See also: "tune.quic.be.tx.pacing" and "tune.quic.be.cc.max-win-size"
redir <prefix>
May be used in the following contexts: http
@ -19801,7 +19654,16 @@ the corresponding http-request and http-response actions.
cache <name>
Declare a cache section, allocate a shared cache memory named <name>, the
size of cache is mandatory (see keyword "total-max-size" below).
size of cache is mandatory.
total-max-size <megabytes>
Define the size in RAM of the cache in megabytes. This size is split in
blocks of 1kB which are used by the cache entries. Its maximum value is 4095.
max-object-size <bytes>
Define the maximum size of the objects to be cached. Must not be greater than
an half of "total-max-size". If not set, it equals to a 256th of the cache size.
All objects with sizes larger than "max-object-size" will not be cached.
max-age <seconds>
Define the maximum expiration duration. The expiration is set as the lowest
@ -19810,16 +19672,6 @@ max-age <seconds>
seconds, which means that you can't cache an object more than 60 seconds by
default.
max-object-size <bytes>
Define the maximum size of the objects to be cached. Must not be greater than
an half of "total-max-size". If not set, it equals to a 256th of the cache size.
All objects with sizes larger than "max-object-size" will not be cached.
max-secondary-entries <number>
Define the maximum number of simultaneous secondary entries with the same primary
key in the cache. This needs the vary support to be enabled. Its default value is 10
and should be passed a strictly positive integer.
process-vary <on/off>
Enable or disable the processing of the Vary header. When disabled, a response
containing such a header will never be cached. When enabled, we need to calculate
@ -19829,9 +19681,10 @@ process-vary <on/off>
the contents of the 'accept-encoding', 'referer' and 'origin' headers for
now. The default value is off (disabled).
total-max-size <megabytes>
Define the size in RAM of the cache in megabytes. This size is split in
blocks of 1kB which are used by the cache entries. Its maximum value is 4095.
max-secondary-entries <number>
Define the maximum number of simultaneous secondary entries with the same primary
key in the cache. This needs the vary support to be enabled. Its default value is 10
and should be passed a strictly positive integer.
6.2.2. Proxy section
@ -20480,8 +20333,6 @@ The following keywords are supported:
51d.single(prop[,prop*]) string string
add(value) integer integer
add_item(delim[,var[,suff]]) string string
aes_cbc_dec(bits,nonce,key[,<aad>]) binary binary
aes_cbc_enc(bits,nonce,key[,<aad>]) binary binary
aes_gcm_dec(bits,nonce,key,aead_tag[,aad]) binary binary
aes_gcm_enc(bits,nonce,key,aead_tag[,aad]) binary binary
and(value) integer integer
@ -20507,12 +20358,6 @@ debug([prefix][,destination]) any same
digest(algorithm) binary binary
div(value) integer integer
djb2([avalanche]) binary integer
eth.data binary binary
eth.dst binary binary
eth.hdr binary binary
eth.proto binary integer
eth.src binary binary
eth.vlan binary integer
even integer boolean
field(index,delimiters[,count]) string string
fix_is_valid binary boolean
@ -20525,21 +20370,9 @@ htonl integer integer
http_date([offset[,unit]]) integer string
iif(true,false) boolean string
in_table([table]) any boolean
ip.data binary binary
ip.df binary integer
ip.dst binary address
ip.fp binary binary
ip.hdr binary binary
ip.proto binary integer
ip.src binary address
ip.tos binary integer
ip.ttl binary integer
ip.ver binary integer
ipmask(mask4[,mask6]) address address
json([input-code]) string string
json_query(json_path[,output_type]) string _outtype_
jwt_decrypt_cert(<cert>) string binary
jwt_decrypt_secret(<secret>) string binary
jwt_header_query([json_path[,output_type]]) string string
jwt_payload_query([json_path[,output_type]]) string string
-- keyword -------------------------------------+- input type + output type -
@ -20622,18 +20455,6 @@ table_server_id([table]) any integer
table_sess_cnt([table]) any integer
table_sess_rate([table]) any integer
table_trackers([table]) any integer
tcp.dst binary integer
tcp.flags binary integer
tcp.options.mss binary integer
tcp.options.sack binary integer
tcp.options.tsopt binary integer
tcp.options.tsval binary integer
tcp.options.wscale binary integer
tcp.options.wsopt binary integer
tcp.options_list binary binary
tcp.seq binary integer
tcp.src binary integer
tcp.win binary integer
ub64dec string string
ub64enc string string
ungrpc(field_number[,field_type]) binary binary / int
@ -20708,31 +20529,6 @@ add_item(<delim>[,<var>[,<suff>]])
http-request set-var(req.tagged) 'var(req.tagged),add_item(",",req.score1),add_item(",",req.score2)'
http-request set-var(req.tagged) 'var(req.tagged),add_item(",",,(site1))' if src,in_table(site1)
aes_cbc_dec(<bits>,<nonce>,<key>[,<aad>])
Decrypts the raw byte input using the AES128-CBC, AES192-CBC or AES256-CBC
algorithm, depending on the <bits> parameter. All other parameters need to be
base64 encoded and the returned result is in raw byte format. The <aad>
parameter is optional. If the <aad> validation fails, the converter doesn't
return any data.
The <nonce>, <key> and <aad> can either be strings or variables. This
converter requires at least OpenSSL 1.0.1.
Example:
http-response set-header X-Decrypted-Text %[var(txn.enc),\
aes_cbc_dec(128,txn.nonce,Zm9vb2Zvb29mb29wZm9vbw==)]
aes_cbc_enc(<bits>,<nonce>,<key>[,<aad>])
Encrypts the raw byte input using the AES128-CBC, AES192-CBC or AES256-CBC
algorithm, depending on the <bits> parameter. <nonce>, <key> and <aad>
parameters must be base64 encoded.
The <aad> parameter is optional. The returned result is in raw byte format.
The <nonce>, <key> and <aad> can either be strings or variables. This
converter requires at least OpenSSL 1.0.1.
Example:
http-response set-header X-Encrypted-Text %[var(txn.plain),\
aes_cbc_enc(128,txn.nonce,Zm9vb2Zvb29mb29wZm9vbw==)]
aes_gcm_dec(<bits>,<nonce>,<key>,<aead_tag>[,<aad>])
Decrypts the raw byte input using the AES128-GCM, AES192-GCM or AES256-GCM
algorithm, depending on the <bits> parameter. All other parameters need to be
@ -20981,48 +20777,6 @@ djb2([<avalanche>])
32-bit hash is trivial to break. See also "crc32", "sdbm", "wt6", "crc32c",
and the "hash-type" directive.
eth.data
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "2".
It skips all the Ethernet header including possible VLANs and returns a block
of binary data starting at the layer 3 protocol (usually IPv4 or IPv6). See
also "fc_saved_syn" and "tcp-ss".
eth.dst
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "2".
It returns the 6 bytes of the Ethernet header corresponding to the
destination address of the frame, as a binary block. See also "fc_saved_syn"
and "tcp-ss".
eth.hdr
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "2".
It trims anything past the Ethernet header but keeps possible VLANs, and
returns this header as a block of binary data. See also "fc_saved_syn" and
"tcp-ss".
eth.proto
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "2".
It returns the protocol number (also known as EtherType) found in a Ethernet
header after any optional VLAN as an integer value. It should normally be
either 0x800 for IPv4 or 0x86DD for IPv6. See also "fc_saved_syn" and
"tcp-ss".
eth.src
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "2".
It returns the 6 bytes of the Ethernet header corresponding to the source
address of the frame, as a binary block. See also "fc_saved_syn" and
"tcp-ss".
eth.vlan
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "2".
It returns the last VLAN ID found in a Ethernet header as an integer value.
See also "fc_saved_syn" and "tcp-ss".
even
Returns a boolean TRUE if the input value of type signed integer is even
otherwise returns FALSE. It is functionally equivalent to "not,and(1),bool".
@ -21151,132 +20905,6 @@ in_table([<table>])
elements (e.g. whether or not a source IP address or an Authorization header
was already seen).
ip.data
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It skips the IP header and any optional
options or extensions, and returns a block of binary data starting at the
transport protocol (usually TCP or UDP). See also "fc_saved_syn", "tcp-ss",
and "eth.data".
ip.df
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It returns integer value 1 if the DF (don't
fragment) flag is set in the IP header, 0 otherwise. IPv6 does not have a DF
flag, and doesn't fragment by default so it always returns 1. See also
"fc_saved_syn", "tcp-ss", and "eth.data".
ip.dst
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It returns the IPv4 or IPv6 destination
address from the IPv4/v6 header. See also "fc_saved_syn", "tcp-ss", and
"eth.data".
ip.fp([<mode>])
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It inspects various parts of the IP header
and the TCP header to construct sort of a fingerprint of invariant parts that
can be used to distinguish between multiple apparently identical hosts. The
real-world use case is to refine the identification of misbehaving hosts
between a shared IP address to avoid blocking legitimate users when only one
is misbehaving and needs to be blocked. The converter builds a 7-byte binary
block based on the input. The bytes of the fingerprint are arranged like
this:
- byte 0: IP TOS field (see ip.tos)
- byte 1:
- bit 7: IPv6 (1) / IPv4 (0)
- bit 6: ip.df
- bit 5..4: 0:ip.ttl<=32; 1:ip.ttl<=64; 2:ip.ttl<=128; 3:ip.ttl<=255
- bit 3: IP options present (1) / absent (0)
- bit 2: TCP data present (1) / absent (0)
- bit 1: TCP.flags has CWR set (1) / cleared (0)
- bit 0: TCP.flags has ECE set (1) / cleared (0)
- byte 2:
- bits 7..4: TCP header length in 4-byte words
- bits 3..0: TCP window scaling + 1 (1..15) / 0 (no WS advertised)
- byte 3..4: tcp.win
- byte 5..6: tcp.options.mss, or zero if absent
The <mode> argument permits to append more information to the fingerprint. By
default, when the <mode> argument is not set or is zero, the fingerprint is
solely made of the 7 bytes described above. If <mode> is specified as another
value, it then corresponds to the sum of the following values, and the
respective components will be concatenated to the fingerprint, in the order
below:
- 1: the received TTL value is appended to the fingerprint (1 byte)
- 2: the list of TCP option kinds, as returned by "tcp.options_list",
made of 0 to 40 extra bytes, is appended to the fingerprint
- 4: the source IP address is appended to the fingerprint, which adds
4 bytes for IPv4 and 16 for IPv6.
Example: make a 12..24 bytes fingerprint using the base FP, the TTL and the
source address (1+4=5):
frontend test
mode http
bind :4445 tcp-ss 1
tcp-request connection set-var(sess.syn) fc_saved_syn
http-request return status 200 content-type text/plain lf-string \
"src=%[var(sess.syn),ip.src] fp=%[var(sess.syn),ip.fp(5),hex]\n"
See also "fc_saved_syn", "tcp-ss", "eth.data", "ip.df", "ip.ttl", "tcp.win",
"tcp.options.mss", and "tcp.options_list".
ip.hdr
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It returns a block of binary data starting
with the IP header and stopping after the last option or extension, and
before the transport protocol header. See also "fc_saved_syn", "tcp-ss", and
"eth.data".
ip.proto
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It returns the transport protocol number,
usually 6 for TCP or 17 for UDP. See also "fc_saved_syn", "tcp-ss", and
"eth.data".
ip.src
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It returns the IPv4 or IPv6 source address
from the IPv4/v6 header. See also "fc_saved_syn", "tcp-ss", and "eth.data".
ip.tos
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". It returns an integer corresponding to the
value of the type-of-service (TOS) field in the IPv4 header or traffic class
(TC) field in the IPv6 header. Note that in the modern internet, this field
most often contains a DSCP (Differentiated Services Codepoint) value in the
6 upper bits and the two lower are either not used, or used by IP ECN. Please
refer to RFC2474 and RFC8436 for DSCP values, and RFC3168 for IP ECN fields.
See also "fc_saved_syn", "tcp-ss", and "eth.data".
ip.ttl
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". This returns an integer corresponding to
the TTL (Time To Live) or HL (Hop Limit) field in the IPv4/IPv6 header. This
value is usually preset to a fixed value and decremented by each router that
the packet crosses. It can help infer how far a client connects from when the
initial value is known. Note that most modern operating systems start with an
initial value of 64. See also "fc_saved_syn", "tcp-ss", and "eth.data".
ip.ver
This is used with an input sample representing a binary Ethernet frame, as
returned by "fc_saved_syn" combined with the "tcp-ss" bind option set to "1",
or with the output of "eth.data". This returns the IP version from the IP
header, normally either 4 or 6. Note that this doesn't check whether the
protocol number in the upper layer Ethernet frame matches, but since this is
expected to be used with valid packets, it is expected that the operating
system has already verified this. See also "fc_saved_syn", "tcp-ss", and
"eth.data".
ipmask(<mask4>[,<mask6>])
Apply a mask to an IP address, and use the result for lookups and storage.
This can be used to make all hosts within a certain mask to share the same
@ -21364,72 +20992,22 @@ json_query(<json_path>[,<output_type>])
# get the value of the key 'iss' from a JWT Bearer token
http-request set-var(txn.token_payload) req.hdr(Authorization),word(2,.),ub64dec,json_query('$.iss')
jwt_decrypt_cert(<cert>)
Performs a signature validation of a JSON Web Token following the JSON Web
Encryption format (see RFC 7516) given in input and return its content
decrypted thanks to the certificate provided.
The <cert> parameter must be a path to an already loaded certificate (that
can be dumped via the "dump ssl cert" CLI command). The certificate must have
its "jwt" option explicitely set to "on" (see "jwt" crt-list option). It can
be provided directly or via a variable.
The only tokens managed yet are the ones using the Compact Serialization
format (five dot-separated base64-url encoded strings).
This converter can be used for tokens that have an algorithm ("alg" field of
the JOSE header) among the following: RSA1_5, RSA-OAEP or RSA-OAEP-256.
The JWE token must be provided base64url-encoded and the output will be
provided "raw". If an error happens during token parsing, signature
verification or content decryption, an empty string will be returned.
Example:
# Get a JWT from the authorization header, put its decrypted content in an
# HTTP header
http-request set-var(txn.bearer) http_auth_bearer
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_cert("/foo/bar.pem")]
jwt_decrypt_secret(<secret>)
Performs a signature validation of a JSON Web Token following the JSON Web
Encryption format (see RFC 7516) given in input and return its content
decrypted thanks to the base64-encoded secret provided. The secret can be
given as a string or via a variable.
The only tokens managed yet are the ones using the Compact Serialization
format (five dot-separated base64-url encoded strings).
This converter can be used for tokens that have an algorithm ("alg" field of
the JOSE header) among the following: A128KW, A192KW, A256KW, A128GCMKW,
A192GCMKW, A256GCMKW, dir. Please note that the A128KW and A192KW algorithms
are not available on AWS-LC and decryption will not work.
The JWE token must be provided base64url-encoded and the output will be
provided "raw". If an error happens during token parsing, signature
verification or content decryption, an empty string will be returned.
Example:
# Get a JWT from the authorization header, put its decrypted content in an
# HTTP header
http-request set-var(txn.bearer) http_auth_bearer
http-request set-header X-Decrypted %[var(txn.bearer),jwt_decrypt_secret("GawgguFyGrWKav7AX4VKUg")]
jwt_header_query([<json_path>[,<output_type>]])
When given a JSON Web Token (JWT) in input, either returns the decoded header
part of the token (the first base64-url encoded part of the JWT) if no
parameter is given, or performs a json_query on the decoded header part of
the token. See "json_query" converter for details about the accepted
json_path and output_type parameters.
This converter can be used with tokens that are either JWS or JWE tokens as
long as they are in the Compact Serialization format.
Please note that this converter is only available when HAProxy has been
compiled with USE_OPENSSL.
jwt_payload_query([<json_path>[,<output_type>]])
When given a JSON Web Token (JWT) of the JSON Web Signed (JWS) format in
input, either returns the decoded payload part of the token (the second
base64-url encoded part of the JWT) if no parameter is given, or performs a
json_query on the decoded payload part of the token. See "json_query"
converter for details about the accepted json_path and output_type
parameters.
When given a JSON Web Token (JWT) in input, either returns the decoded
payload part of the token (the second base64-url encoded part of the JWT) if
no parameter is given, or performs a json_query on the decoded payload part
of the token. See "json_query" converter for details about the accepted
json_path and output_type parameters.
Please note that this converter is only available when HAProxy has been
compiled with USE_OPENSSL.
@ -22536,88 +22114,6 @@ table_trackers([<table>])
concurrent connections there are from a given address for example. See also
the sc_trackers sample fetch keyword.
tcp.dst
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It returns an integer representing the destination
port present in the TCP header. See also "fc_saved_syn", "tcp-ss", and
"ip.data".
tcp.flags
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It returns an integer representing the TCP flags
from this TCP header. All 8 flags from FIN to CWR are retrieved. Each flag
may be tested using the "and()" converter. Please refer to RFC9293 for the
value of each flag. See also "fc_saved_syn", "tcp-ss", and "ip.data".
tcp.options.mss
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It looks for a TCP option of kind "MSS", and if found,
it returns an integer value corresponding to the advertised value in that
option, otherwise zero. The MSS is the Maximum Segment Size and indicates the
largest segment the peer may receive, in bytes. See also "fc_saved_syn",
"tcp-ss", and "ip.data".
tcp.options.sack
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It looks for a TCP option of kind "Sack-Permitted",
and if found, returns 1, otherwise zero. See also "fc_saved_syn", "tcp-ss",
and "ip.data".
tcp.options.tsopt
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It looks for a TCP option of kind "Timestamp", and if
found, returns 1, otherwise zero. See also "fc_saved_syn", "tcp-ss", and
"ip.data".
tcp.options.tsval
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It looks for a TCP option of kind "Timestamp", and if
found, returns the timestamp value emitted by the peer, otherwise does not
return anything. Note that timestamps are 32-bit unsigned values with no
particular unit that only the peer decides on, and timestamps are expected to
be independent between different connections. See also "fc_saved_syn",
"tcp-ss", and "ip.data".
tcp.options.wscale
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It looks for a TCP option of kind "Window Scale", and
if found, returns the window scaling value emitted by the peer, otherwise
zero. Note that values are not expected to be beyond 14 though no technical
limitation prevents them from being sent. In order to detect if the window
scale option was used, please use "tcp.options.wsopt". See also "tcp-ss",
"fc_saved_syn", "ip.data", and "tcp.options.wsopt".
tcp.options.wsopt
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It looks for a TCP option of kind "Window Scale", and
if found, returns 1 otherwise 0. See also "fc_saved_syn", "tcp-ss", "ip.data"
"tcp.options.wscale".
tcp.options_list
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It builds a binary sequence of all TCP option kinds in
the same order as they appear in the TCP header. It can produce from 0 to 60
bytes (in the worst case). The End-of-options is not emitted. See also
"fc_saved_syn", "tcp-ss", and "ip.data".
tcp.seq
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It returns an integer representing the sequence number
used by the peer in the TCP header. Sequence numbers are 32-bit unsigned
values. See also "fc_saved_syn", "tcp-ss", and "ip.data".
tcp.src
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It returns an integer representing the source port
present in the TCP header. See also "fc_saved_syn", "tcp-ss", and "ip.data".
tcp.win
This is used with an input sample representing a binary TCP header, as
returned by "ip.data". It returns an integer representing the window size
advertised by the peer in the TCP header. The value is provided as-is, as a
16-bit unsigned quantity, without applying the window scaling factor. See
also "fc_saved_syn", "tcp-ss", and "ip.data".
ub64dec
This converter is the base64url variant of b64dec converter. base64url
encoding is the "URL and Filename Safe Alphabet" variant of base64 encoding.
@ -23669,7 +23165,6 @@ fc_retrans integer
fc_rtt(<unit>) integer
fc_rttvar(<unit>) integer
fc_sacked integer
fc_saved_syn binary
fc_settings_streams_limit integer
fc_src ip
fc_src_is_local boolean
@ -24268,80 +23763,6 @@ fc_sacked : integer
if the operating system does not support TCP_INFO, for example Linux kernels
before 2.4, the sample fetch fails.
fc_saved_syn : binary
Returns a copy of the saved SYN packet that was preserved by the system
during the incoming connection setup. This requires that the "tcp-ss" option
was present on the "bind" line, and a Linux kernel 4.3 minimum. When "tcp-ss"
is set to 1, only the IP and TCP headers are present. When "tcp-ss" is set to
2, then the Ethernet header is also present before the IP header, and may be
used to control or log source MAC address or VLANs for example. Note that
there is no guarantee that a SYN will be saved. For example, if SYN cookies
are used, the SYN packet is not preserved and the connection is established
on the matching ACK packet. In addition, the system doesn't guarantee to
preserve the copy beyond the first read. As such it is strongly recommended
to copy it into a variable in scope "sess" from a "tcp-request connection"
rule and only use that variable for further manipulations. It is worth noting
that on the loopback interface a dummy 14-byte ethernet header is constructed
by the system where both the source and destination addresses are zero, and
only the protocol is set. It is convenient to convert such samples to
hexadecimal using the "hex" converter during debugging. Example (fields
manually separated and commented below):
frontend test
mode http
bind :::4445 tcp-ss 2
tcp-request connection set-var(sess.syn) fc_saved_syn
http-request return status 200 content-type text/plain \
lf-string "%[var(sess.syn),hex]\n"
$ curl '0:4445'
000000000000 000000000000 0800 \ # MAC_DST MAC_SRC PROTO=IPv4
4500003C0A65400040063255 \ # IPv4 header, proto=6 (TCP)
7F000001 7F000001 \ # IP_SRC=127.0.0.1 IP_DST=127.0.0.1
E1F2 115D 01AF4E3E 00000000 \ # TCP_SPORT=57842 TCP_DPORT=4445, SEQ
A0 02 FFD7 FE300000 \ # OPT_LEN=20 TCP_FLAGS=SYN WIN=65495
0204FFD70402080A01C2A71A0000000001030307 # MSS=65495, TS, SACK, WSCALE 7
$ curl '[::1]:4445'
000000000000 000000000000 86DD \ # MAC_DST MAC_SRC PROTO=IPv6
6008018F00280640 \ # IPv6 header, proto=6 (TCP)
00000000000000000000000000000001 \ # SRC=::1
00000000000000000000000000000001 \ # DST=::1
9758 115D B5511F5D 00000000 \ # TCP_SPORT=38744 TCP_DPORT=4445, SEQ
A0 02 FFC4 00300000 \ # OPT_LEN=20 TCP_FLAGS=SYN WIN=65476
0204FFC40402080A9C231D680000000001030307 # MSS=65476, TS, SACK, WSCALE 7
The "bytes()" converter helps extract specific fields from the packet. The
be2dec() also permits to read chunks and emit them in integer form. For more
accurate extraction, please refer to the "eth.XXX" converters.
Example with IPv4 input:
frontend test
mode http
bind :4445 tcp-ss 2
tcp-request connection set-var(sess.syn) fc_saved_syn
http-request return status 200 content-type text/plain lf-string \
"mac_dst=%[var(sess.syn),eth.dst,hex] \
mac_src=%[var(sess.syn),eth.src,hex] \
proto=%[var(sess.syn),eth.proto,bytes(6),be2hex(,2)] \
ipv4h=%[var(sess.syn),eth.data,bytes(0,12),hex] \
ipv4_src=%[var(sess.syn),eth.data,ip.src] \
ipv4_dst=%[var(sess.syn),eth.data,ip.dst] \
tcp_spt=%[var(sess.syn),eth.data,ip.data,tcp.src] \
tcp_dpt=%[var(sess.syn),eth.data,ip.data,tcp.dst] \
tcp_win=%[var(sess.syn),eth.data,ip.data,tcp.win] \
tcp_opt=%[var(sess.syn),eth.data,ip.data,bytes(20),hex]\n"
$ curl '0:4445'
mac_dst=000000000000 mac_src=000000000000 proto=0800 \
ipv4h=4500003CC9B7400040067302 ipv4_src=127.0.0.1 ipv4_dst=127.0.0.1 \
tcp_spt=43970 tcp_dpt=4445 tcp_win=65495 \
tcp_opt=0204FFD70402080A01DC0D410000000001030307
See also the "set-var" action, the "be2dec", "bytes", "hex", "eth.XXX",
"ip.XXX", and "tcp.XXX" converters.
fc_settings_streams_limit : integer
Returns the maximum number of streams allowed on the frontend connection. For
TCP and HTTP/1.1 connections, it is always 1. For other protocols, it depends
@ -30320,7 +29741,7 @@ Arguments: (mandatory ones first, then alphabetically sorted):
which can represent a client identifier found in a request for
instance.
* string [len <len>]
* string [length <len>]
A table declared with "type string" will store substrings of
up to <len> characters. If the string provided by the pattern
extractor is larger than <len>, it will be truncated before
@ -30330,7 +29751,7 @@ Arguments: (mandatory ones first, then alphabetically sorted):
limited to 32 characters. Increasing the length can have a
non-negligible memory usage impact.
* binary [len <len>]
* binary [length <len>]
A table declared with "type binary" will store binary blocks
of <len> bytes. If the block provided by the pattern
extractor is larger than <len>, it will be truncated before
@ -31604,9 +31025,8 @@ ocsp-update [ off | on ]
failure" or "Error during insertion" errors.
jwt [ off | on ]
Allow for this certificate to be used for JWT validation or decryption via
the "jwt_verify_cert" or "jwt_decrypt_cert" converters when set to 'on'. Its
value defaults to 'off'.
Allow for this certificate to be used for JWT validation via the
"jwt_verify_cert" converter when set to 'on'. Its value default to 'off'.
When set to 'on' for a given certificate, the CLI command "del ssl cert" will
not work. In order to be deleted, a certificate must not be used, either for

View File

@ -1,7 +1,7 @@
-----------------------
HAProxy Starter Guide
-----------------------
version 3.4
version 3.3
This document is an introduction to HAProxy for all those who don't know it, as

View File

@ -1,7 +1,7 @@
------------------------
HAProxy Management Guide
------------------------
version 3.4
version 3.3
This document describes how to start, stop, manage, and troubleshoot HAProxy,
@ -2474,11 +2474,6 @@ prompt [help | n | i | p | timed]*
advanced scripts, and the non-interactive mode (default) to basic scripts.
Note that the non-interactive mode is not available for the master socket.
publish backend <backend>
Activates content switching to a backend instance. This is the reverse
operation of "unpublish backend" command. This command is restricted and can
only be issued on sockets configured for levels "operator" or "admin".
quit
Close the connection when in interactive mode.
@ -2847,13 +2842,6 @@ operator
increased. It also drops expert and experimental mode. See also "show cli
level".
unpublish backend <backend>
Marks the backend as unqualified for future traffic selection. In effect,
use_backend / default_backend rules which reference it are ignored and the
next content switching rules are evaluated. Contrary to disabled backends,
servers health checks remain active. This command is restricted and can only
be issued on sockets configured for levels "operator" or "admin".
user
Decrease the CLI level of the current CLI session to user. It can't be
increased. It also drops expert and experimental mode. See also "show cli
@ -3354,10 +3342,9 @@ show quic [<format>] [<filter>]
in the format will instead show a more detailed help message.
The final argument is used to restrict or extend the connection list. By
default, active frontend connections only are displayed. Use the extra
argument "clo" to list instead closing frontend connections, "be" for backend
connections or "all" for every categories. It's also possible to restrict to
a single connection by specifying its hexadecimal address.
default, connections on closing or draining state are not displayed. Use the
extra argument "all" to include them in the output. It's also possible to
restrict to a single connection by specifying its hexadecimal address.
show servers conn [<backend>]
Dump the current and idle connections state of the servers belonging to the

View File

@ -125,8 +125,8 @@ struct activity {
unsigned int ctr2; // general purposee debug counter
#endif
char __pad[0]; // unused except to check remaining room
char __end[0] THREAD_ALIGNED();
} THREAD_ALIGNED();
char __end[0] __attribute__((aligned(64))); // align size to 64.
};
/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8
@ -146,7 +146,7 @@ struct sched_activity {
uint64_t lkw_time; /* lock waiting time */
uint64_t lkd_time; /* locked time */
uint64_t mem_time; /* memory ops wait time */
} THREAD_ALIGNED();
};
#endif /* _HAPROXY_ACTIVITY_T_H */

View File

@ -366,7 +366,7 @@ static inline size_t applet_output_data(const struct appctx *appctx)
* This is useful when data have been read directly from the buffer. It is
* illegal to call this function with <len> causing a wrapping at the end of the
* buffer. It's the caller's responsibility to ensure that <len> is never larger
* than available output data.
* than available ouput data.
*
* This function is not HTX aware.
*/
@ -392,7 +392,7 @@ static inline void applet_reset_input(struct appctx *appctx)
co_skip(sc_oc(appctx_sc(appctx)), co_data(sc_oc(appctx_sc(appctx))));
}
/* Returns the amount of space available at the HTX output buffer (see applet_get_outbuf).
/* Returns the amout of space available at the HTX output buffer (see applet_get_outbuf).
*/
static inline size_t applet_htx_output_room(const struct appctx *appctx)
{
@ -402,7 +402,7 @@ static inline size_t applet_htx_output_room(const struct appctx *appctx)
return channel_recv_max(sc_ic(appctx_sc(appctx)));
}
/* Returns the amount of space available at the output buffer (see applet_get_outbuf).
/* Returns the amout of space available at the output buffer (see applet_get_outbuf).
*/
static inline size_t applet_output_room(const struct appctx *appctx)
{

View File

@ -85,20 +85,10 @@ static inline int be_usable_srv(struct proxy *be)
return be->srv_bck;
}
/* Returns true if <be> backend can be used as target to a switching rules. */
static inline int be_is_eligible(const struct proxy *be)
{
/* A disabled or unpublished backend cannot be selected for traffic.
* Note that STOPPED state is ignored as there is a risk of breaking
* requests during soft-stop.
*/
return !(be->flags & (PR_FL_DISABLED|PR_FL_BE_UNPUBLISHED));
}
/* set the time of last session on the backend */
static inline void be_set_sess_last(struct proxy *be)
{
if (be->be_counters.shared.tg)
if (be->be_counters.shared.tg[tgid - 1])
HA_ATOMIC_STORE(&be->be_counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}

View File

@ -537,7 +537,7 @@ struct mem_stats {
size_t size;
struct ha_caller caller;
const void *extra; // extra info specific to this call (e.g. pool ptr)
} ALIGNED(sizeof(void*));
} __attribute__((aligned(sizeof(void*))));
#undef calloc
#define calloc(x,y) ({ \

View File

@ -140,7 +140,7 @@ int warnif_misplaced_tcp_req_sess(struct proxy *proxy, const char *file, int lin
int warnif_misplaced_tcp_req_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_tcp_res_cont(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_misplaced_quic_init(struct proxy *proxy, const char *file, int line, const char *arg, const char *arg2);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, char **err);
int warnif_cond_conflicts(const struct acl_cond *cond, unsigned int where, const char *file, int line);
int warnif_tcp_http_cond(const struct proxy *px, const struct acl_cond *cond);
int too_many_args_idx(int maxarg, int index, char **args, char **msg, int *err_code);
int too_many_args(int maxarg, char **args, char **msg, int *err_code);

View File

@ -31,23 +31,6 @@
#include <stdlib.h>
#endif
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
* is for use in macros arguments.
*/
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
* use in macros arguments.
*/
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
* use in macros arguments.
*/
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
#define _FIRST_ARG(a, b, ...) b
/*
* Gcc before 3.0 needs [0] to declare a variable-size array
*/
@ -432,13 +415,6 @@
* for multi_threading, see THREAD_PAD() below. *
\*****************************************************************************/
/* Cache line size for alignment purposes. This value is incorrect for some
* Apple CPUs which have 128 bytes cache lines.
*/
#ifndef CACHELINE_SIZE
#define CACHELINE_SIZE 64
#endif
/* sets alignment for current field or variable */
#ifndef ALIGNED
#define ALIGNED(x) __attribute__((aligned(x)))
@ -462,12 +438,12 @@
#endif
#endif
/* Sets alignment for current field or variable only when threads are enabled.
* When no parameters are provided, we align to the cache line size.
/* sets alignment for current field or variable only when threads are enabled.
* Typically used to respect cache line alignment to avoid false sharing.
*/
#ifndef THREAD_ALIGNED
#ifdef USE_THREAD
#define THREAD_ALIGNED(...) ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
#define THREAD_ALIGNED(x) __attribute__((aligned(x)))
#else
#define THREAD_ALIGNED(x)
#endif
@ -500,12 +476,13 @@
#endif
#endif
/* Add an optional alignment for next fields in a structure, only when threads
* are enabled. When no parameters are provided, we align to the cache line size.
/* add an optional alignment for next fields in a structure, only when threads
* are enabled. Typically used to respect cache line alignment to avoid false
* sharing.
*/
#ifndef THREAD_ALIGN
#ifdef USE_THREAD
#define THREAD_ALIGN(...) union { } ALIGNED(DEFVAL(CACHELINE_SIZE, ##__VA_ARGS__))
#define THREAD_ALIGN(x) union { } ALIGNED(x)
#else
#define THREAD_ALIGN(x)
#endif
@ -530,7 +507,7 @@
/* add mandatory padding of the specified size between fields in a structure,
* This is used to avoid false sharing of cache lines for dynamically allocated
* structures which cannot guarantee alignment, or to ensure that the size of
* the struct remains consistent on architectures with different alignment
* the struct remains consistent on architectures with different aligment
* constraints
*/
#ifndef ALWAYS_PAD

View File

@ -145,7 +145,7 @@ enum {
CO_FL_WAIT_ROOM = 0x00000800, /* data sink is full */
CO_FL_WANT_SPLICING = 0x00001000, /* we wish to use splicing on the connection when possible */
CO_FL_SSL_NO_CACHED_INFO = 0x00002000, /* Don't use any cached information when creating a new SSL connection */
/* unused: 0x00002000 */
CO_FL_EARLY_SSL_HS = 0x00004000, /* We have early data pending, don't start SSL handshake yet */
CO_FL_EARLY_DATA = 0x00008000, /* At least some of the data are early data */
@ -212,13 +212,13 @@ static forceinline char *conn_show_flags(char *buf, size_t len, const char *deli
/* flags */
_(CO_FL_SAFE_LIST, _(CO_FL_IDLE_LIST, _(CO_FL_CTRL_READY,
_(CO_FL_REVERSED, _(CO_FL_ACT_REVERSING, _(CO_FL_OPT_MARK, _(CO_FL_OPT_TOS,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_SSL_NO_CACHED_INFO, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_XPRT_READY, _(CO_FL_WANT_DRAIN, _(CO_FL_WAIT_ROOM, _(CO_FL_EARLY_SSL_HS,
_(CO_FL_EARLY_DATA, _(CO_FL_SOCKS4_SEND, _(CO_FL_SOCKS4_RECV, _(CO_FL_SOCK_RD_SH,
_(CO_FL_SOCK_WR_SH, _(CO_FL_ERROR, _(CO_FL_FDLESS, _(CO_FL_WAIT_L4_CONN,
_(CO_FL_WAIT_L6_CONN, _(CO_FL_SEND_PROXY, _(CO_FL_ACCEPT_PROXY, _(CO_FL_ACCEPT_CIP,
_(CO_FL_SSL_WAIT_HS, _(CO_FL_PRIVATE, _(CO_FL_RCVD_PROXY, _(CO_FL_SESS_IDLE,
_(CO_FL_XPRT_TRACKED
)))))))))))))))))))))))))))));
))))))))))))))))))))))))))));
/* epilogue */
_(~0U);
return buf;
@ -476,7 +476,7 @@ struct xprt_ops {
void (*dump_info)(struct buffer *, const struct connection *);
/*
* Returns the value for various capabilities.
* Returns 0 if the capability is known, with the actual value in arg,
* Returns 0 if the capability is known, iwth the actual value in arg,
* or -1 otherwise
*/
int (*get_capability)(struct connection *connection, void *xprt_ctx, enum xprt_capabilities, void *arg);
@ -660,7 +660,6 @@ struct connection {
struct buffer name; /* Only used for passive reverse. Used as SNI when connection added to server idle pool. */
} reverse;
uint64_t sni_hash; /* Hash of the SNI. Used to cache the TLS session and try to reuse it. set to 0 is there is no SNI */
uint32_t term_evts_log; /* Termination events log: first 4 events reported from fd, handshake or xprt */
uint32_t mark; /* set network mark, if CO_FL_OPT_MARK is set */
uint8_t tos; /* set ip tos, if CO_FL_OPT_TOS is set */
@ -795,7 +794,7 @@ struct idle_conns {
struct mt_list toremove_conns;
struct task *cleanup_task;
__decl_thread(HA_SPINLOCK_T idle_conns_lock);
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* Termination events logs:

View File

@ -66,7 +66,7 @@ struct counters_shared {
COUNTERS_SHARED;
struct {
COUNTERS_SHARED_TG;
} **tg;
} *tg[MAX_TGROUPS];
};
/*
@ -101,7 +101,7 @@ struct fe_counters_shared_tg {
struct fe_counters_shared {
COUNTERS_SHARED;
struct fe_counters_shared_tg **tg;
struct fe_counters_shared_tg *tg[MAX_TGROUPS];
};
/* counters used by listeners and frontends */
@ -160,7 +160,7 @@ struct be_counters_shared_tg {
struct be_counters_shared {
COUNTERS_SHARED;
struct be_counters_shared_tg **tg;
struct be_counters_shared_tg *tg[MAX_TGROUPS];
};
/* counters used by servers and backends */

View File

@ -43,13 +43,11 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
*/
#define COUNTERS_SHARED_LAST_OFFSET(scounters, type, offset) \
({ \
unsigned long last = 0; \
unsigned long last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
unsigned long now_seconds = ns_to_sec(now_ns); \
int it; \
\
if (scounters) \
last = HA_ATOMIC_LOAD((type *)((char *)scounters[0] + offset));\
for (it = 1; (it < global.nbtgroups && scounters); it++) { \
for (it = 1; (it < global.nbtgroups && scounters[it]); it++) { \
unsigned long cur = HA_ATOMIC_LOAD((type *)((char *)scounters[it] + offset));\
if ((now_seconds - cur) < (now_seconds - last)) \
last = cur; \
@ -76,7 +74,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
uint64_t __ret = 0; \
int it; \
\
for (it = 0; (it < global.nbtgroups && scounters); it++) \
for (it = 0; (it < global.nbtgroups && scounters[it]); it++) \
__ret += rfunc((type *)((char *)scounters[it] + offset)); \
__ret; \
})
@ -96,7 +94,7 @@ void counters_be_shared_drop(struct be_counters_shared *counters);
uint64_t __ret = 0; \
int it; \
\
for (it = 0; (it < global.nbtgroups && scounters); it++) \
for (it = 0; (it < global.nbtgroups && scounters[it]); it++) \
__ret += rfunc(&scounters[it]->elem, arg1, arg2); \
__ret; \
})

View File

@ -202,7 +202,7 @@ struct fdtab {
#ifdef DEBUG_FD
unsigned int event_count; /* number of events reported */
#endif
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* polled mask, one bit per thread and per direction for each FD */
struct polled_mask {

View File

@ -31,7 +31,7 @@
ullong _freq_ctr_total_from_values(uint period, int pend, uint tick, ullong past, ullong curr);
ullong freq_ctr_total(const struct freq_ctr *ctr, uint period, int pend);
ullong freq_ctr_total_estimate(const struct freq_ctr *ctr, uint period, int pend);
uint freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
int freq_ctr_overshoot_period(const struct freq_ctr *ctr, uint period, uint freq);
uint update_freq_ctr_period_slow(struct freq_ctr *ctr, uint period, uint inc);
/* Only usable during single threaded startup phase. */

View File

@ -261,7 +261,6 @@ struct global {
unsigned int req_count; /* request counter (HTTP or TCP session) for logs and unique_id */
int last_checks;
uint32_t anon_key;
int maxthrpertgroup; /* Maximum number of threads per thread group */
/* leave this at the end to make sure we don't share this cache line by accident */
ALWAYS_ALIGN(64);

View File

@ -255,7 +255,6 @@ struct hlua_patref_iterator_context {
struct hlua_patref *ref;
struct bref bref; /* back-reference from the pat_ref_elt being accessed
* during listing */
struct pat_ref_gen *gen; /* the generation we are iterating over */
};
#else /* USE_LUA */

View File

@ -184,7 +184,6 @@ enum {
PERSIST_TYPE_NONE = 0, /* no persistence */
PERSIST_TYPE_FORCE, /* force-persist */
PERSIST_TYPE_IGNORE, /* ignore-persist */
PERSIST_TYPE_BE_SWITCH, /* force-be-switch */
};
/* final results for http-request rules */

View File

@ -270,7 +270,7 @@ struct htx {
/* XXX 4 bytes unused */
/* Blocks representing the HTTP message itself */
char blocks[VAR_ARRAY] ALIGNED(8);
char blocks[VAR_ARRAY] __attribute__((aligned(8)));
};
#endif /* _HAPROXY_HTX_T_H */

View File

@ -186,7 +186,7 @@ struct bind_conf {
#endif
#ifdef USE_QUIC
struct quic_transport_params quic_params; /* QUIC transport parameters. */
const struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
size_t max_cwnd; /* QUIC maximumu congestion control window size (kB) */
enum quic_sock_mode quic_mode; /* QUIC socket allocation strategy */
#endif
@ -204,7 +204,6 @@ struct bind_conf {
unsigned int backlog; /* if set, listen backlog */
int maxconn; /* maximum connections allowed on this listener */
int (*accept)(struct connection *conn); /* upper layer's accept() */
int tcp_ss; /* for TCP, Save SYN */
int level; /* stats access level (ACCESS_LVL_*) */
int severity_output; /* default severity output format in cli feedback messages */
short int nice; /* nice value to assign to the instantiated tasks */
@ -310,7 +309,7 @@ struct bind_kw_list {
struct accept_queue_ring {
uint32_t idx; /* (head << 16) | tail */
struct tasklet *tasklet; /* tasklet of the thread owning this ring */
struct connection *entry[ACCEPT_QUEUE_SIZE] THREAD_ALIGNED();
struct connection *entry[ACCEPT_QUEUE_SIZE] __attribute((aligned(64)));
};

View File

@ -231,7 +231,7 @@ const char *listener_state_str(const struct listener *l);
struct task *accept_queue_process(struct task *t, void *context, unsigned int state);
struct task *manage_global_listener_queue(struct task *t, void *context, unsigned int state);
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] THREAD_ALIGNED();
extern struct accept_queue_ring accept_queue_rings[MAX_THREADS] __attribute__((aligned(64)));
extern const char* li_status_st[LI_STATE_COUNT];
enum li_status get_li_status(struct listener *l);

View File

@ -107,34 +107,20 @@ struct pat_ref {
struct list list; /* Used to chain refs. */
char *reference; /* The reference name. */
char *display; /* String displayed to identify the pattern origin. */
struct ceb_root *gen_root; /* The tree mapping generation IDs to pattern reference elements */
struct list head; /* The head of the list of struct pat_ref_elt. */
struct ceb_root *ceb_root; /* The tree where pattern reference elements are attached. */
struct list pat; /* The head of the list of struct pattern_expr. */
unsigned int flags; /* flags PAT_REF_*. */
unsigned int curr_gen; /* current generation number (anything below can be removed) */
unsigned int next_gen; /* next generation number (insertions use this one) */
/* We keep a cached pointer to the current generation for performance. */
struct {
struct pat_ref_gen *data;
unsigned int id;
} cached_gen;
int unique_id; /* Each pattern reference have unique id. */
unsigned long long revision; /* updated for each update */
unsigned long long entry_cnt; /* the total number of entries */
THREAD_ALIGN();
THREAD_ALIGN(64);
__decl_thread(HA_RWLOCK_T lock); /* Lock used to protect pat ref elements */
event_hdl_sub_list e_subs; /* event_hdl: pat_ref's subscribers list (atomically updated) */
};
/* This struct represents all the elements in a pattern reference generation. The tree
* is used most of the time, but we also maintain a list for when order matters.
*/
struct pat_ref_gen {
struct list head; /* The head of the list of struct pat_ref_elt. */
struct ceb_root *elt_root; /* The tree where pattern reference elements are attached. */
struct ceb_node gen_node; /* Linkage for the gen_root cebtree in struct pat_ref */
unsigned int gen_id;
};
/* This is a part of struct pat_ref. Each entry contains one pattern and one
* associated value as original string. All derivative forms (via exprs) are
* accessed from list_head or tree_head. Be careful, it's variable-sized!
@ -147,7 +133,7 @@ struct pat_ref_elt {
char *sample;
unsigned int gen_id; /* generation of pat_ref this was made for */
int line;
struct ceb_node node; /* Node to attach this element to its <pat_ref_gen> cebtree. */
struct ceb_node node; /* Node to attach this element to its <pat_ref> ebtree. */
const char pattern[0]; // const only to make sure nobody tries to free it.
};

View File

@ -189,10 +189,8 @@ struct pat_ref *pat_ref_new(const char *reference, const char *display, unsigned
struct pat_ref *pat_ref_newid(int unique_id, const char *display, unsigned int flags);
struct pat_ref_elt *pat_ref_find_elt(struct pat_ref *ref, const char *key);
struct pat_ref_elt *pat_ref_gen_find_elt(struct pat_ref *ref, unsigned int gen_id, const char *key);
struct pat_ref_elt *pat_ref_append(struct pat_ref *ref, unsigned int gen, const char *pattern, const char *sample, int line);
struct pat_ref_elt *pat_ref_append(struct pat_ref *ref, const char *pattern, const char *sample, int line);
struct pat_ref_elt *pat_ref_load(struct pat_ref *ref, unsigned int gen, const char *pattern, const char *sample, int line, char **err);
struct pat_ref_gen *pat_ref_gen_new(struct pat_ref *ref, unsigned int gen_id);
struct pat_ref_gen *pat_ref_gen_get(struct pat_ref *ref, unsigned int gen_id);
int pat_ref_push(struct pat_ref_elt *elt, struct pattern_expr *expr, int patflags, char **err);
int pat_ref_add(struct pat_ref *ref, const char *pattern, const char *sample, char **err);
int pat_ref_set(struct pat_ref *ref, const char *pattern, const char *sample, char **err);

View File

@ -63,7 +63,7 @@ struct pool_cache_head {
unsigned int tid; /* thread id, for debugging only */
struct pool_head *pool; /* assigned pool, for debugging only */
ulong fill_pattern; /* pattern used to fill the area on free */
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* This describes a pool registration, which is what was passed to
* create_pool() and that might have been merged with an existing pool.
@ -139,7 +139,7 @@ struct pool_head {
struct list regs; /* registrations: alt names for this pool */
/* heavily read-write part */
THREAD_ALIGN();
THREAD_ALIGN(64);
/* these entries depend on the pointer value, they're used to reduce
* the contention on fast-changing values. The alignment here is
@ -148,7 +148,7 @@ struct pool_head {
* just meant to shard elements and there are no per-free_list stats.
*/
struct {
THREAD_ALIGN();
THREAD_ALIGN(64);
struct pool_item *free_list; /* list of free shared objects */
unsigned int allocated; /* how many chunks have been allocated */
unsigned int used; /* how many chunks are currently in use */
@ -156,8 +156,8 @@ struct pool_head {
unsigned int failed; /* failed allocations (indexed by hash of TID) */
} buckets[CONFIG_HAP_POOL_BUCKETS];
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(); /* pool caches */
} THREAD_ALIGNED();
struct pool_cache_head cache[MAX_THREADS] THREAD_ALIGNED(64); /* pool caches */
} __attribute__((aligned(64)));
#endif /* _HAPROXY_POOL_T_H */

View File

@ -160,7 +160,6 @@ struct protocol {
/* default I/O handler */
void (*default_iocb)(int fd); /* generic I/O handler (typically accept callback) */
int (*get_info)(struct connection *conn, long long int *info, int info_num); /* Callback to get connection level statistical counters */
int (*get_opt)(const struct connection *conn, int level, int optname, void *buf, int size); /* getsockopt(level:optname) into buf:size */
uint flags; /* flags describing protocol support (PROTO_F_*) */
uint nb_receivers; /* number of receivers (under proto_lock) */

View File

@ -247,7 +247,6 @@ enum PR_SRV_STATE_FILE {
#define PR_FL_IMPLICIT_REF 0x10 /* The default proxy is implicitly referenced by another proxy */
#define PR_FL_PAUSED 0x20 /* The proxy was paused at run time (reversible) */
#define PR_FL_CHECKED 0x40 /* The proxy configuration was fully checked (including postparsing checks) */
#define PR_FL_BE_UNPUBLISHED 0x80 /* The proxy cannot be targetted by content switching rules */
struct stream;
@ -305,7 +304,7 @@ struct error_snapshot {
struct proxy_per_tgroup {
struct queue queue;
struct lbprm_per_tgrp lbprm;
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
struct proxy {
enum obj_type obj_type; /* object type == OBJ_TYPE_PROXY */
@ -506,7 +505,7 @@ struct proxy {
EXTRA_COUNTERS(extra_counters_fe);
EXTRA_COUNTERS(extra_counters_be);
THREAD_ALIGN();
THREAD_ALIGN(64);
unsigned int queueslength; /* Sum of the length of each queue */
int served; /* # of active sessions currently being served */
int totpend; /* total number of pending connections on this instance (for stats) */

View File

@ -166,12 +166,12 @@ static inline int proxy_abrt_close(const struct proxy *px)
/* increase the number of cumulated connections received on the designated frontend */
static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
{
if (fe->fe_counters.shared.tg) {
if (fe->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_conn);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
}
if (l && l->counters && l->counters->shared.tg)
if (l && l->counters && l->counters->shared.tg[tgid - 1])
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_conn);
if (fe->fe_counters.shared.tg[tgid - 1])
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->conn_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.cps_max,
update_freq_ctr(&fe->fe_counters._conn_per_sec, 1));
}
@ -179,12 +179,12 @@ static inline void proxy_inc_fe_conn_ctr(struct listener *l, struct proxy *fe)
/* increase the number of cumulated connections accepted by the designated frontend */
static inline void proxy_inc_fe_sess_ctr(struct listener *l, struct proxy *fe)
{
if (fe->fe_counters.shared.tg) {
if (fe->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
}
if (l && l->counters && l->counters->shared.tg)
if (l && l->counters && l->counters->shared.tg[tgid - 1])
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess);
if (fe->fe_counters.shared.tg[tgid - 1])
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.sps_max,
update_freq_ctr(&fe->fe_counters._sess_per_sec, 1));
}
@ -199,19 +199,19 @@ static inline void proxy_inc_fe_cum_sess_ver_ctr(struct listener *l, struct prox
http_ver > sizeof(fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver))
return;
if (fe->fe_counters.shared.tg)
if (fe->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
if (l && l->counters && l->counters->shared.tg && l->counters->shared.tg[tgid - 1])
if (l && l->counters && l->counters->shared.tg[tgid - 1])
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->cum_sess_ver[http_ver - 1]);
}
/* increase the number of cumulated streams on the designated backend */
static inline void proxy_inc_be_ctr(struct proxy *be)
{
if (be->be_counters.shared.tg) {
if (be->be_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&be->be_counters.shared.tg[tgid - 1]->cum_sess);
if (be->be_counters.shared.tg[tgid - 1])
update_freq_ctr(&be->be_counters.shared.tg[tgid - 1]->sess_per_sec, 1);
}
HA_ATOMIC_UPDATE_MAX(&be->be_counters.sps_max,
update_freq_ctr(&be->be_counters._sess_per_sec, 1));
}
@ -226,12 +226,12 @@ static inline void proxy_inc_fe_req_ctr(struct listener *l, struct proxy *fe,
if (http_ver >= sizeof(fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req) / sizeof(*fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req))
return;
if (fe->fe_counters.shared.tg) {
if (fe->fe_counters.shared.tg[tgid - 1])
_HA_ATOMIC_INC(&fe->fe_counters.shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
}
if (l && l->counters && l->counters->shared.tg)
if (l && l->counters && l->counters->shared.tg[tgid - 1])
_HA_ATOMIC_INC(&l->counters->shared.tg[tgid - 1]->p.http.cum_req[http_ver]);
if (fe->fe_counters.shared.tg[tgid - 1])
update_freq_ctr(&fe->fe_counters.shared.tg[tgid - 1]->req_per_sec, 1);
HA_ATOMIC_UPDATE_MAX(&fe->fe_counters.p.http.rps_max,
update_freq_ctr(&fe->fe_counters.p.http._req_per_sec, 1));
}

View File

@ -35,13 +35,13 @@
#define QUIC_CC_INFINITE_SSTHESH ((uint32_t)-1)
extern const struct quic_cc_algo quic_cc_algo_nr;
extern const struct quic_cc_algo quic_cc_algo_cubic;
extern const struct quic_cc_algo quic_cc_algo_bbr;
extern const struct quic_cc_algo *default_quic_cc_algo;
extern struct quic_cc_algo quic_cc_algo_nr;
extern struct quic_cc_algo quic_cc_algo_cubic;
extern struct quic_cc_algo quic_cc_algo_bbr;
extern struct quic_cc_algo *default_quic_cc_algo;
/* Fake algorithm with its fixed window */
extern const struct quic_cc_algo quic_cc_algo_nocc;
extern struct quic_cc_algo quic_cc_algo_nocc;
extern unsigned long long last_ts;
@ -90,7 +90,7 @@ enum quic_cc_algo_type {
struct quic_cc {
/* <conn> is there only for debugging purpose. */
struct quic_conn *qc;
const struct quic_cc_algo *algo;
struct quic_cc_algo *algo;
uint32_t priv[144];
};

View File

@ -35,7 +35,7 @@
#include <haproxy/quic_loss.h>
#include <haproxy/thread.h>
void quic_cc_init(struct quic_cc *cc, const struct quic_cc_algo *algo, struct quic_conn *qc);
void quic_cc_init(struct quic_cc *cc, struct quic_cc_algo *algo, struct quic_conn *qc);
void quic_cc_event(struct quic_cc *cc, struct quic_cc_event *ev);
void quic_cc_state_trace(struct buffer *buf, const struct quic_cc *cc);
@ -83,7 +83,7 @@ static inline void *quic_cc_priv(const struct quic_cc *cc)
* which is true for an IPv4 path, if not false for an IPv6 path.
*/
static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsigned long max_cwnd,
const struct quic_cc_algo *algo,
struct quic_cc_algo *algo,
struct quic_conn *qc)
{
unsigned int max_dgram_sz;

View File

@ -24,12 +24,6 @@ struct quic_cid {
unsigned char len; /* size of QUIC CID */
};
/* Determines whether a CID is used for frontend or backend connections. */
enum quic_cid_side {
QUIC_CID_SIDE_FE,
QUIC_CID_SIDE_BE
};
/* QUIC connection id attached to a QUIC connection.
*
* This structure is used to match received packets DCIDs with the
@ -40,12 +34,11 @@ struct quic_connection_id {
uint64_t retire_prior_to;
unsigned char stateless_reset_token[QUIC_STATELESS_RESET_TOKEN_LEN];
struct ebmb_node node; /* node for receiver tree, cid.data as key */
struct quic_cid cid; /* CID data */
struct ebmb_node node; /* node for receiver tree, cid.data as key */
struct quic_cid cid; /* CID data */
struct quic_conn *qc; /* QUIC connection using this CID */
uint tid; /* Attached Thread ID for the connection. */
enum quic_cid_side side; /* side where this CID is used */
struct quic_conn *qc; /* QUIC connection using this CID */
uint tid; /* Attached Thread ID for the connection. */
};
#endif /* _HAPROXY_QUIC_CID_T_H */

View File

@ -15,10 +15,9 @@
#include <haproxy/quic_rx-t.h>
#include <haproxy/proto_quic.h>
extern struct quic_cid_tree *quic_fe_cid_trees;
extern struct quic_cid_tree *quic_be_cid_trees;
extern struct quic_cid_tree *quic_cid_trees;
struct quic_connection_id *quic_cid_alloc(enum quic_cid_side side);
struct quic_connection_id *quic_cid_alloc(void);
int quic_cid_generate_random(struct quic_connection_id *conn_id);
int quic_cid_generate_from_hash(struct quic_connection_id *conn_id, uint64_t hash64);
@ -82,18 +81,11 @@ static inline uchar quic_cid_tree_idx(const struct quic_cid *cid)
return _quic_cid_tree_idx(cid->data);
}
/* Returns the tree instance responsible for <conn_id> storage. */
static inline struct quic_cid_tree *quic_cid_get_tree(const struct quic_connection_id *conn_id)
{
const int tree_idx = quic_cid_tree_idx(&conn_id->cid);
return conn_id->side == QUIC_CID_SIDE_FE ?
&quic_fe_cid_trees[tree_idx] : &quic_be_cid_trees[tree_idx];
}
/* Remove <conn_id> from global CID tree as a thread-safe operation. */
static inline void quic_cid_delete(struct quic_connection_id *conn_id)
{
struct quic_cid_tree __maybe_unused *tree = quic_cid_get_tree(conn_id);
const uchar idx = quic_cid_tree_idx(&conn_id->cid);
struct quic_cid_tree __maybe_unused *tree = &quic_cid_trees[idx];
HA_RWLOCK_WRLOCK(QC_CID_LOCK, &tree->lock);
ebmb_delete(&conn_id->node);

View File

@ -434,7 +434,7 @@ struct quic_conn_closed {
#define QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS (1U << 2) /* HANDSHAKE_DONE must be sent */
#define QUIC_FL_CONN_IS_BACK (1U << 3) /* conn used on backend side */
#define QUIC_FL_CONN_ACCEPT_REGISTERED (1U << 4)
#define QUIC_FL_CONN_UDP_GSO_EIO (1U << 5) /* GSO disabled due to a EIO occurred on same listener */
#define QUIC_FL_CONN_UDP_GSO_EIO (1U << 5) /* GSO disabled due to a EIO occured on same listener */
#define QUIC_FL_CONN_IDLE_TIMER_RESTARTED_AFTER_READ (1U << 6)
#define QUIC_FL_CONN_RETRANS_NEEDED (1U << 7)
#define QUIC_FL_CONN_RETRANS_OLD_DATA (1U << 8) /* retransmission in progress for probing with already sent data */

View File

@ -67,7 +67,6 @@ int qc_h3_request_reject(struct quic_conn *qc, uint64_t id);
struct quic_conn *qc_new_conn(void *target,
const struct quic_rx_packet *initial_pkt,
const struct quic_cid *token_odcid,
struct connection *connection,
struct quic_connection_id *conn_id,
struct sockaddr_storage *local_addr,
struct sockaddr_storage *peer_addr);
@ -92,12 +91,6 @@ static inline int qc_is_back(const struct quic_conn *qc)
return qc->flags & QUIC_FL_CONN_IS_BACK;
}
static inline enum quic_cid_side qc_cid_side(const struct quic_conn *qc)
{
return !(qc->flags & QUIC_FL_CONN_IS_BACK) ?
QUIC_CID_SIDE_FE : QUIC_CID_SIDE_BE;
}
/* Free the CIDs attached to <conn> QUIC connection. */
static inline void free_quic_conn_cids(struct quic_conn *conn)
{

View File

@ -17,7 +17,5 @@
#include <haproxy/pool-t.h>
extern struct pool_head *pool_head_quic_ssl_sock_ctx;
extern const char *default_quic_ciphersuites;
extern const char *default_quic_curves;
#endif /* _HAPROXY_QUIC_SSL_T_H */

View File

@ -5,7 +5,7 @@
#include <haproxy/api-t.h>
/* Counter which can be used to measure data amount across several buffers. */
/* Counter which can be used to measure data amount accross several buffers. */
struct bdata_ctr {
uint64_t tot; /* sum of data present in all underlying buffers */
uint8_t bcnt; /* current number of allocated underlying buffers */

View File

@ -33,12 +33,11 @@
/* Bit values for receiver->flags */
#define RX_F_BOUND 0x00000001 /* receiver already bound */
#define RX_F_INHERITED_FD 0x00000002 /* inherited FD from the parent process (fd@) */
#define RX_F_INHERITED 0x00000002 /* inherited FD from the parent process (fd@) or duped from another local receiver */
#define RX_F_MWORKER 0x00000004 /* keep the FD open in the master but close it in the children */
#define RX_F_MUST_DUP 0x00000008 /* this receiver's fd must be dup() from a reference; ignore socket-level ops here */
#define RX_F_NON_SUSPENDABLE 0x00000010 /* this socket cannot be suspended hence must always be unbound */
#define RX_F_PASS_PKTINFO 0x00000020 /* pass pktinfo in received messages */
#define RX_F_INHERITED_SOCK 0x00000040 /* inherited sock that could be duped from another local receiver */
/* Bit values for rx_settings->options */
#define RX_O_FOREIGN 0x00000001 /* receives on foreign addresses */
@ -64,8 +63,9 @@ struct rx_settings {
struct shard_info {
uint nbgroups; /* number of groups in this shard (=#rx); Zero = unused. */
uint nbthreads; /* number of threads in this shard (>=nbgroups) */
ulong tgroup_mask; /* bitmask of thread groups having a member here */
struct receiver *ref; /* first one, reference for FDs to duplicate */
struct receiver **members; /* all members of the shard (one per thread group) */
struct receiver *members[MAX_TGROUPS]; /* all members of the shard (one per thread group) */
};
/* This describes a receiver with all its characteristics (address, options, etc) */

View File

@ -130,11 +130,11 @@ struct ring_wait_cell {
struct ring_storage {
size_t size; // storage size
size_t rsvd; // header length (used for file-backed maps)
THREAD_ALIGN();
THREAD_ALIGN(64);
size_t tail; // storage tail
THREAD_ALIGN();
THREAD_ALIGN(64);
size_t head; // storage head
THREAD_ALIGN();
THREAD_ALIGN(64);
char area[0]; // storage area begins immediately here
};
@ -149,7 +149,7 @@ struct ring {
/* keep the queue in a separate cache line below */
struct {
THREAD_ALIGN();
THREAD_ALIGN(64);
struct ring_wait_cell *ptr;
} queue[RING_WAIT_QUEUES + 1]; // wait queue + 1 spacer
};

View File

@ -63,7 +63,6 @@ int smp_expr_output_type(struct sample_expr *expr);
int c_none(struct sample *smp);
int c_pseudo(struct sample *smp);
int smp_dup(struct sample *smp);
int sample_check_arg_base64(struct arg *arg, char **err);
/*
* This function just apply a cast on sample. It returns 0 if the cast is not

View File

@ -294,7 +294,7 @@ struct srv_per_tgroup {
struct eb_root *lb_tree; /* For LB algos with split between thread groups, the tree to be used, for each group */
unsigned npos, lpos; /* next and last positions in the LB tree, protected by LB lock */
unsigned rweight; /* remainder of weight in the current LB tree */
} THREAD_ALIGNED();
} THREAD_ALIGNED(64);
/* Configure the protocol selection for websocket */
enum __attribute__((__packed__)) srv_ws_mode {
@ -396,7 +396,7 @@ struct server {
/* The elements below may be changed on every single request by any
* thread, and generally at the same time.
*/
THREAD_ALIGN();
THREAD_ALIGN(64);
struct eb32_node idle_node; /* When to next do cleanup in the idle connections */
unsigned int curr_idle_conns; /* Current number of orphan idling connections, both the idle and the safe lists */
unsigned int curr_idle_nb; /* Current number of connections in the idle list */
@ -414,7 +414,7 @@ struct server {
/* Element below are usd by LB algorithms and must be doable in
* parallel to other threads reusing connections above.
*/
THREAD_ALIGN();
THREAD_ALIGN(64);
__decl_thread(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */
union {
struct eb32_node lb_node; /* node used for tree-based load balancing */
@ -428,7 +428,7 @@ struct server {
};
/* usually atomically updated by any thread during parsing or on end of request */
THREAD_ALIGN();
THREAD_ALIGN(64);
int cur_sess; /* number of currently active sessions (including syn_sent) */
int served; /* # of active sessions currently being served (ie not pending) */
int consecutive_errors; /* current number of consecutive errors */
@ -436,7 +436,7 @@ struct server {
struct be_counters counters; /* statistics counters */
/* Below are some relatively stable settings, only changed under the lock */
THREAD_ALIGN();
THREAD_ALIGN(64);
struct eb_root *lb_tree; /* we want to know in what tree the server is */
struct tree_occ *lb_nodes; /* lb_nodes_tot * struct tree_occ */
@ -485,7 +485,7 @@ struct server {
unsigned char *ptr;
int size;
int allocated_size;
uint64_t sni_hash; /* Hash of the SNI used for the session */
char *sni; /* SNI used for the session */
__decl_thread(HA_RWLOCK_T sess_lock);
} * reused_sess;
@ -514,8 +514,6 @@ struct server {
} ssl_ctx;
#ifdef USE_QUIC
struct quic_transport_params quic_params; /* QUIC transport parameters */
const struct quic_cc_algo *quic_cc_algo; /* QUIC control congestion algorithm */
size_t quic_max_cwnd; /* QUIC maximum congestion control window size (kB) */
#endif
struct path_parameters path_params; /* Connection parameters for that server */
struct resolv_srvrq *srvrq; /* Pointer representing the DNS SRV requeest, if any */

View File

@ -207,7 +207,7 @@ static inline void server_index_id(struct proxy *px, struct server *srv)
/* increase the number of cumulated streams on the designated server */
static inline void srv_inc_sess_ctr(struct server *s)
{
if (s->counters.shared.tg) {
if (s->counters.shared.tg[tgid - 1]) {
_HA_ATOMIC_INC(&s->counters.shared.tg[tgid - 1]->cum_sess);
update_freq_ctr(&s->counters.shared.tg[tgid - 1]->sess_per_sec, 1);
}
@ -218,7 +218,7 @@ static inline void srv_inc_sess_ctr(struct server *s)
/* set the time of last session on the designated server */
static inline void srv_set_sess_last(struct server *s)
{
if (s->counters.shared.tg)
if (s->counters.shared.tg[tgid - 1])
HA_ATOMIC_STORE(&s->counters.shared.tg[tgid - 1]->last_sess, ns_to_sec(now_ns));
}

View File

@ -46,7 +46,6 @@ struct connection *sock_accept_conn(struct listener *l, int *status);
void sock_accept_iocb(int fd);
void sock_conn_ctrl_init(struct connection *conn);
void sock_conn_ctrl_close(struct connection *conn);
int sock_conn_get_opt(const struct connection *conn, int level, int optname, void *buf, int size);
void sock_conn_iocb(int fd);
int sock_conn_check(struct connection *conn);
int sock_drain(struct connection *conn);

View File

@ -254,7 +254,7 @@ struct ssl_keylog {
#define SSL_SOCK_F_KTLS_SEND (1 << 2) /* kTLS send is configured on that socket */
#define SSL_SOCK_F_KTLS_RECV (1 << 3) /* kTLS receive is configure on that socket */
#define SSL_SOCK_F_CTRL_SEND (1 << 4) /* We want to send a kTLS control message for that socket */
#define SSL_SOCK_F_HAS_ALPN (1 << 5) /* An ALPN has been negotiated */
#define SSL_SOCK_F_HAS_ALPN (1 << 5) /* An ALPN has been negociated */
struct ssl_sock_ctx {
struct connection *conn;

View File

@ -30,7 +30,6 @@
#include <haproxy/proxy-t.h>
#include <haproxy/quic_conn-t.h>
#include <haproxy/ssl_sock-t.h>
#include <haproxy/stats.h>
#include <haproxy/thread.h>
extern struct list tlskeys_reference;
@ -58,7 +57,6 @@ extern struct pool_head *pool_head_ssl_keylog_str;
extern struct list openssl_providers;
extern struct stats_module ssl_stats_module;
uint64_t ssl_sock_sni_hash(const struct ist sni);
int ssl_sock_prep_ctx_and_inst(struct bind_conf *bind_conf, struct ssl_bind_conf *ssl_conf,
SSL_CTX *ctx, struct ckch_inst *ckch_inst, char **err);
int ssl_sock_prep_srv_ctx_and_inst(const struct server *srv, SSL_CTX *ctx,
@ -91,7 +89,6 @@ unsigned int ssl_sock_get_verify_result(struct connection *conn);
void ssl_sock_update_counters(SSL *ssl,
struct ssl_counters *counters,
struct ssl_counters *counters_px, int backend);
void ssl_sock_handle_hs_error(struct connection *conn);
#if (defined SSL_CTRL_SET_TLSEXT_TICKET_KEY_CB && TLS_TICKETS_NO > 0)
int ssl_sock_update_tlskey_ref(struct tls_keys_ref *ref,
struct buffer *tlskey);
@ -244,30 +241,6 @@ static inline struct connection *ssl_sock_get_conn(const SSL *s, struct ssl_sock
return ret;
}
/* Set at <counters> and <counters_px> addresses the SSL statistical counters */
static inline void ssl_sock_get_stats_counters(struct connection *conn,
struct ssl_counters **counters,
struct ssl_counters **counters_px)
{
switch (obj_type(conn->target)) {
case OBJ_TYPE_LISTENER: {
struct listener *li = __objt_listener(conn->target);
*counters = EXTRA_COUNTERS_GET(li->extra_counters, &ssl_stats_module);
*counters_px = EXTRA_COUNTERS_GET(li->bind_conf->frontend->extra_counters_fe,
&ssl_stats_module);
break;
}
case OBJ_TYPE_SERVER: {
struct server *srv = __objt_server(conn->target);
*counters = EXTRA_COUNTERS_GET(srv->extra_counters, &ssl_stats_module);
*counters_px = EXTRA_COUNTERS_GET(srv->proxy->extra_counters_be,
&ssl_stats_module);
break;
}
default:
break;
}
}
#endif /* USE_OPENSSL */
#endif /* _HAPROXY_SSL_SOCK_H */

View File

@ -57,9 +57,6 @@ const char *nid2nist(int nid);
const char *sigalg2str(int sigalg);
const char *curveid2str(int curve_id);
int aes_process(struct buffer *data, struct buffer *nonce, struct buffer *key, int key_size,
struct buffer *aead_tag, struct buffer *aad, struct buffer *out, int decrypt, int gcm);
#endif /* _HAPROXY_SSL_UTILS_H */
#endif /* USE_OPENSSL */

View File

@ -15,7 +15,7 @@ enum stfile_domain {
};
#define SHM_STATS_FILE_VER_MAJOR 1
#define SHM_STATS_FILE_VER_MINOR 2
#define SHM_STATS_FILE_VER_MINOR 1
#define SHM_STATS_FILE_HEARTBEAT_TIMEOUT 60 /* passed this delay (seconds) process which has not
* sent heartbeat will be considered down
@ -64,9 +64,9 @@ struct shm_stats_file_hdr {
*/
struct shm_stats_file_object {
char guid[GUID_MAX_LEN + 1];
uint16_t tgid; // thread group ID
uint8_t tgid; // thread group ID from 1 to 64
uint8_t type; // SHM_STATS_FILE_OBJECT_TYPE_* to know how to handle object.data
ALWAYS_PAD(5); // 5 bytes hole, ensure it remains the same size 32 vs 64 bits arch
ALWAYS_PAD(6); // 6 bytes hole, ensure it remains the same size 32 vs 64 bits arch
uint64_t users; // bitfield that corresponds to users of the object (see shm_stats_file_hdr slots)
/* as the struct may hold any of the types described here, let's make it
* so it may store up to the heaviest one using an union

View File

@ -313,8 +313,8 @@ struct se_abort_info {
*
* <kip> is the known input payload length. It is set by the stream endpoint
* that produce data and decremented once consumed by the app
* layer. Depending on the endpoint, this value may be unset. It may be set
* only once if the payload length is fully known from the beginning (a
* loyer. Depending on the enpoint, this value may be unset. It may be set
* only once if the payload lenght is fully known from the begining (a
* HTTP message with a content-length for instance), or incremented
* periodically when more data are expected (a chunk-encoded HTTP message
* for instance). On the app side, this value is decremented when data are

View File

@ -206,7 +206,7 @@ struct stktable {
void *ptr; /* generic ptr to check if set or not */
} write_to; /* updates received on the source table will also update write_to */
THREAD_ALIGN();
THREAD_ALIGN(64);
struct {
struct eb_root keys; /* head of sticky session tree */
@ -221,7 +221,7 @@ struct stktable {
unsigned int refcnt; /* number of local peer over all peers sections
attached to this table */
unsigned int current; /* number of sticky sessions currently in table */
THREAD_ALIGN();
THREAD_ALIGN(64);
struct eb_root updates; /* head of sticky updates sequence tree, uses updt_lock */
struct mt_list *pend_updts; /* list of updates to be added to the update sequence tree, one per thread-group */
@ -229,7 +229,7 @@ struct stktable {
unsigned int localupdate; /* uses updt_lock */
struct tasklet *updt_task;/* tasklet responsible for pushing the pending updates into the tree */
THREAD_ALIGN();
THREAD_ALIGN(64);
/* this lock is heavily used and must be on its own cache line */
__decl_thread(HA_RWLOCK_T updt_lock); /* lock protecting the updates part */

View File

@ -91,7 +91,7 @@ extern struct pool_head *pool_head_task;
extern struct pool_head *pool_head_tasklet;
extern struct pool_head *pool_head_notification;
__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED());
__decl_thread(extern HA_RWLOCK_T wq_lock THREAD_ALIGNED(64));
void __tasklet_wakeup_on(struct tasklet *tl, int thr);
struct list *__tasklet_wakeup_after(struct list *head, struct tasklet *tl);

View File

@ -51,7 +51,7 @@
/* declare a self-initializing spinlock, aligned on a cache line */
#define __decl_aligned_spinlock(lock) \
HA_SPINLOCK_T (lock) ALIGNED(64) = 0;
HA_SPINLOCK_T (lock) __attribute__((aligned(64))) = 0;
/* declare a self-initializing rwlock */
#define __decl_rwlock(lock) \
@ -59,7 +59,7 @@
/* declare a self-initializing rwlock, aligned on a cache line */
#define __decl_aligned_rwlock(lock) \
HA_RWLOCK_T (lock) ALIGNED(64) = 0;
HA_RWLOCK_T (lock) __attribute__((aligned(64))) = 0;
#else /* !USE_THREAD */
@ -72,7 +72,7 @@
/* declare a self-initializing spinlock, aligned on a cache line */
#define __decl_aligned_spinlock(lock) \
HA_SPINLOCK_T (lock) THREAD_ALIGNED(); \
HA_SPINLOCK_T (lock) __attribute__((aligned(64))); \
INITCALL1(STG_LOCK, ha_spin_init, &(lock))
/* declare a self-initializing rwlock */
@ -82,7 +82,7 @@
/* declare a self-initializing rwlock, aligned on a cache line */
#define __decl_aligned_rwlock(lock) \
HA_RWLOCK_T (lock) THREAD_ALIGNED(); \
HA_RWLOCK_T (lock) __attribute__((aligned(64))); \
INITCALL1(STG_LOCK, ha_rwlock_init, &(lock))
#endif /* USE_THREAD */

View File

@ -60,6 +60,7 @@ extern int thread_cpus_enabled_at_boot;
/* Only way found to replace variables with constants that are optimized away
* at build time.
*/
enum { all_tgroups_mask = 1UL };
enum { tid_bit = 1UL };
enum { tid = 0 };
enum { tgid = 1 };
@ -207,6 +208,7 @@ void wait_for_threads_completion();
void set_thread_cpu_affinity();
unsigned long long ha_get_pthread_id(unsigned int thr);
extern volatile unsigned long all_tgroups_mask;
extern volatile unsigned int rdv_requests;
extern volatile unsigned int isolated_thread;
extern THREAD_LOCAL unsigned int tid; /* The thread id */

View File

@ -42,7 +42,7 @@ struct thread_set {
ulong abs[(MAX_THREADS + LONGBITS - 1) / LONGBITS];
ulong rel[MAX_TGROUPS];
};
ulong nbgrps; /* Number of thread groups, 0 for abs */
ulong grps; /* bit field of all non-empty groups, 0 for abs */
};
/* tasklet classes */
@ -86,7 +86,7 @@ struct tgroup_info {
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] THREAD_ALIGNED();
char __end[0] __attribute__((aligned(64)));
};
/* This structure describes the group-specific context (e.g. active threads
@ -103,7 +103,7 @@ struct tgroup_ctx {
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] THREAD_ALIGNED();
char __end[0] __attribute__((aligned(64)));
};
/* This structure describes all the per-thread info we need. When threads are
@ -124,7 +124,7 @@ struct thread_info {
/* pad to cache line (64B) */
char __pad[0]; /* unused except to check remaining room */
char __end[0] THREAD_ALIGNED();
char __end[0] __attribute__((aligned(64)));
};
/* This structure describes all the per-thread context we need. This is
@ -150,8 +150,7 @@ struct thread_ctx {
struct list buffer_wq[DYNBUF_NBQ]; /* buffer waiters, 4 criticality-based queues */
struct list pool_lru_head; /* oldest objects in thread-local pool caches */
struct list streams; /* list of streams attached to this thread */
struct list quic_conns_fe; /* list of active FE quic-conns attached to this thread */
struct list quic_conns_be; /* list of active BE quic-conns attached to this thread */
struct list quic_conns; /* list of active quic-conns attached to this thread */
struct list quic_conns_clo; /* list of closing quic-conns attached to this thread */
struct list queued_checks; /* checks waiting for a connection slot */
struct list tasklets[TL_CLASSES]; /* tasklets (and/or tasks) to run, by class */

View File

@ -77,7 +77,7 @@ static inline int thread_set_nth_group(const struct thread_set *ts, int n)
{
int i;
if (ts->nbgrps) {
if (ts->grps) {
for (i = 0; i < MAX_TGROUPS; i++)
if (ts->rel[i] && !n--)
return i + 1;
@ -95,7 +95,7 @@ static inline ulong thread_set_nth_tmask(const struct thread_set *ts, int n)
{
int i;
if (ts->nbgrps) {
if (ts->grps) {
for (i = 0; i < MAX_TGROUPS; i++)
if (ts->rel[i] && !n--)
return ts->rel[i];
@ -111,7 +111,7 @@ static inline void thread_set_pin_grp1(struct thread_set *ts, ulong mask)
{
int i;
ts->nbgrps = 1;
ts->grps = 1;
ts->rel[0] = mask;
for (i = 1; i < MAX_TGROUPS; i++)
ts->rel[i] = 0;

View File

@ -47,6 +47,23 @@
/* return the largest possible integer of type <ret>, with all bits set */
#define MAX_RANGE(ret) (~(typeof(ret))0)
/* DEFVAL() returns either the second argument as-is, or <def> if absent. This
* is for use in macros arguments.
*/
#define DEFVAL(_def,...) _FIRST_ARG(NULL, ##__VA_ARGS__, (_def))
/* DEFNULL() returns either the argument as-is, or NULL if absent. This is for
* use in macros arguments.
*/
#define DEFNULL(...) DEFVAL(NULL, ##__VA_ARGS__)
/* DEFZERO() returns either the argument as-is, or 0 if absent. This is for
* use in macros arguments.
*/
#define DEFZERO(...) DEFVAL(0, ##__VA_ARGS__)
#define _FIRST_ARG(a, b, ...) b
/* options flags for parse_line() */
#define PARSE_OPT_SHARP 0x00000001 // '#' ends the line
#define PARSE_OPT_BKSLASH 0x00000002 // '\' escapes chars

View File

@ -1490,6 +1490,4 @@ int path_base(const char *path, const char *base, char *dst, char **err);
void ha_freearray(char ***array);
void ha_memset_s(void *s, int c, size_t n);
#endif /* _HAPROXY_TOOLS_H */

View File

@ -33,7 +33,7 @@
#ifdef CONFIG_PRODUCT_BRANCH
#define PRODUCT_BRANCH CONFIG_PRODUCT_BRANCH
#else
#define PRODUCT_BRANCH "3.4"
#define PRODUCT_BRANCH "3.3"
#endif
#ifdef CONFIG_PRODUCT_STATUS

View File

@ -63,7 +63,7 @@
* the same split bit as its parent node, it is necessary its associated leaf
*
* When descending along the tree, it is possible to know that a search key is
* not present, because its XOR with both of the branches is strictly higher
* not present, because its XOR with both of the branches is stricly higher
* than the inter-branch XOR. The reason is simple : the inter-branch XOR will
* have its highest bit set indicating the split bit. Since it's the bit that
* differs between the two branches, the key cannot have it both set and

View File

@ -1 +0,0 @@
../ssl/certs/

1
reg-tests/checks/common.pem Symbolic link
View File

@ -0,0 +1 @@
../ssl/common.pem

View File

@ -39,7 +39,7 @@ haproxy htst -conf {
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
frontend fe1
bind "fd@${fe1}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe1}" ssl crt ${testdir}/common.pem
frontend fe2
bind "fd@${fe2}"

View File

@ -45,10 +45,10 @@ haproxy htst -conf {
server fe1 ${htst_fe1_addr}:${htst_fe1_port}
frontend fe1
bind "fd@${fe1}" ssl crt ${testdir}/certs/common.pem curves P-256:P-384
bind "fd@${fe1}" ssl crt ${testdir}/common.pem curves P-256:P-384
frontend fe3
bind "fd@${fe3}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe3}" ssl crt ${testdir}/common.pem
} -start
haproxy h1 -conf {

View File

@ -62,7 +62,7 @@ haproxy htst -conf {
server fe1 ${htst_fe1_addr}:${htst_fe1_port}
frontend fe1
bind "fd@${fe1}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe1}" ssl crt ${testdir}/common.pem
} -start

View File

@ -60,15 +60,15 @@ haproxy h1 -conf {
frontend fe1
option httplog
log ${S1_addr}:${S1_port} len 2048 local0 debug err
bind "fd@${fe1}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe1}" ssl crt ${testdir}/common.pem
use_backend be1
frontend fe2
bind "fd@${fe2}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe2}" ssl crt ${testdir}/common.pem
use_backend be2
frontend fe3
bind "fd@${fe3}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe3}" ssl crt ${testdir}/common.pem
use_backend be3
} -start
@ -108,19 +108,19 @@ haproxy h2 -conf {
option httpchk OPTIONS * HTTP/1.1
http-check send hdr Host www
log ${S2_addr}:${S2_port} daemon
server srv1 ${h1_fe1_addr}:${h1_fe1_port} ssl crt ${testdir}/certs/common.pem verify none check
server srv1 ${h1_fe1_addr}:${h1_fe1_port} ssl crt ${testdir}/common.pem verify none check
backend be4
option log-health-checks
log ${S4_addr}:${S4_port} daemon
server srv2 ${h1_fe2_addr}:${h1_fe2_port} ssl crt ${testdir}/certs/common.pem verify none check-ssl check
server srv2 ${h1_fe2_addr}:${h1_fe2_port} ssl crt ${testdir}/common.pem verify none check-ssl check
backend be6
option log-health-checks
option httpchk OPTIONS * HTTP/1.1
http-check send hdr Host www
log ${S6_addr}:${S6_port} daemon
server srv3 127.0.0.1:80 crt ${testdir}/certs/common.pem verify none check check-ssl port ${h1_fe3_port} addr ${h1_fe3_addr}:80
server srv3 127.0.0.1:80 crt ${testdir}/common.pem verify none check check-ssl port ${h1_fe3_port} addr ${h1_fe3_addr}:80
} -start
syslog S1 -wait

View File

@ -1 +0,0 @@
../ssl/certs/

View File

@ -0,0 +1 @@
../ssl/common.pem

View File

@ -22,7 +22,7 @@ defaults
mode http
frontend main-https
bind "fd@${fe1}" ssl crt ${testdir}/certs/common.pem
bind "fd@${fe1}" ssl crt ${testdir}/common.pem
compression algo gzip
compression type text/html text/plain application/json application/javascript
compression offload

View File

@ -0,0 +1 @@
../ssl/ca-auth.crt

View File

@ -1 +0,0 @@
../ssl/certs/

View File

@ -0,0 +1 @@
../ssl/client1.pem

View File

@ -0,0 +1 @@
../ssl/common.pem

View File

@ -47,7 +47,7 @@ haproxy h1 -conf {
listen receiver
bind "fd@${feR}"
bind "fd@${feR_ssl}" ssl crt ${testdir}/certs/common.pem
bind "fd@${feR_ssl}" ssl crt ${testdir}/common.pem
bind "fd@${feR_proxy}" accept-proxy
http-request return status 200
http-after-response set-header http_first_request %[http_first_req]

View File

@ -24,7 +24,7 @@ haproxy h1 -conf {
server example ${h1_feR_addr}:${h1_feR_port} send-proxy-v2 proxy-v2-options unique-id ssl alpn XXX verify none
listen receiver
bind "fd@${feR}" ssl crt ${testdir}/certs/common.pem accept-proxy
bind "fd@${feR}" ssl crt ${testdir}/common.pem accept-proxy
http-request set-var(txn.proxy_unique_id) fc_pp_unique_id
http-after-response set-header proxy_unique_id %[var(txn.proxy_unique_id)]

View File

@ -29,7 +29,7 @@ backend be-reverse
server dev rhttp@ ssl sni hdr(x-name) verify none
frontend priv
bind "fd@${priv}" ssl crt ${testdir}/certs/common.pem verify required ca-verify-file ${testdir}/certs/ca-auth.crt alpn h2
bind "fd@${priv}" ssl crt ${testdir}/common.pem verify required ca-verify-file ${testdir}/ca-auth.crt alpn h2
tcp-request session attach-srv be-reverse/dev name ssl_c_s_dn(CN)
} -start
@ -45,7 +45,7 @@ defaults
listen li
bind "fd@${li}"
server h_edge "${h_edge_priv_addr}:${h_edge_priv_port}" ssl crt ${testdir}/certs/client1.pem verify none alpn h2
server h_edge "${h_edge_priv_addr}:${h_edge_priv_port}" ssl crt ${testdir}/client1.pem verify none alpn h2
} -start
# Run a client through private endpoint

View File

@ -1,85 +0,0 @@
varnishtest "aes_cbc converter Test"
feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL)'"
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev2)'"
feature ignore_unknown_macro
server s1 {
rxreq
txresp -hdr "Connection: close"
} -repeat 2 -start
haproxy h1 -conf {
global
.if feature(THREAD)
thread-groups 1
.endif
# WT: limit false-positives causing "HTTP header incomplete" due to
# idle server connections being randomly used and randomly expiring
# under us.
tune.idle-pool.shared off
defaults
mode http
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
frontend fe
bind "fd@${fe}"
http-request set-var(txn.plain) str("Hello from HAProxy AES-CBC")
http-request set-var(txn.short_nonce) str("MTIzNDU2Nzg5MDEy")
http-request set-var(txn.nonce) str("MTIzNDU2Nzg5MDEyMzQ1Ng==")
http-request set-var(txn.key) str("Zm9vb2Zvb29mb29vb29vbw==")
# AES-CBC enc with vars + dec with strings
http-request set-var(txn.encrypted1) var(txn.plain),aes_cbc_enc(128,txn.nonce,txn.key),base64
http-after-response set-header X-Encrypted1 %[var(txn.encrypted1)]
http-request set-var(txn.decrypted1) var(txn.encrypted1),b64dec,aes_cbc_dec(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw==")
http-after-response set-header X-Decrypted1 %[var(txn.decrypted1)]
# AES-CBC enc with strings + dec with vars
http-request set-var(txn.encrypted2) var(txn.plain),aes_cbc_enc(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw=="),base64
http-after-response set-header X-Encrypted2 %[var(txn.encrypted2)]
http-request set-var(txn.decrypted2) var(txn.encrypted2),b64dec,aes_cbc_dec(128,txn.nonce,txn.key)
http-after-response set-header X-Decrypted2 %[var(txn.decrypted2)]
# AES-CBC + AAD enc with vars + dec with strings
http-request set-var(txn.aad) str("dGVzdAo=")
http-request set-var(txn.encrypted3) var(txn.plain),aes_cbc_enc(128,txn.nonce,txn.key,txn.aad),base64
http-after-response set-header X-Encrypted3 %[var(txn.encrypted3)]
http-request set-var(txn.decrypted3) var(txn.encrypted3),b64dec,aes_cbc_dec(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw==","dGVzdAo=")
http-after-response set-header X-Decrypted3 %[var(txn.decrypted3)]
# AES-CBC + AAD enc with strings + enc with strings
http-request set-var(txn.encrypted4) var(txn.plain),aes_cbc_enc(128,"MTIzNDU2Nzg5MDEyMzQ1Ng==","Zm9vb2Zvb29mb29vb29vbw==","dGVzdAo="),base64
http-after-response set-header X-Encrypted4 %[var(txn.encrypted4)]
http-request set-var(txn.decrypted4) var(txn.encrypted4),b64dec,aes_cbc_dec(128,txn.nonce,txn.key,txn.aad)
http-after-response set-header X-Decrypted4 %[var(txn.decrypted4)]
# AES-CBC enc with short nonce (var) + dec with short nonce (string)
http-request set-var(txn.encrypted5) var(txn.plain),aes_cbc_enc(128,txn.short_nonce,txn.key),base64
http-after-response set-header X-Encrypted5 %[var(txn.encrypted5)]
http-request set-var(txn.decrypted5) var(txn.encrypted5),b64dec,aes_cbc_dec(128,"MTIzNDU2Nzg5MDEy","Zm9vb2Zvb29mb29vb29vbw==")
http-after-response set-header X-Decrypted5 %[var(txn.decrypted5)]
default_backend be
backend be
server s1 ${s1_addr}:${s1_port}
} -start
client c1 -connect ${h1_fe_sock} {
txreq
rxresp
expect resp.http.x-decrypted1 == "Hello from HAProxy AES-CBC"
expect resp.http.x-decrypted2 == "Hello from HAProxy AES-CBC"
expect resp.http.x-decrypted3 == "Hello from HAProxy AES-CBC"
expect resp.http.x-decrypted4 == "Hello from HAProxy AES-CBC"
expect resp.http.x-decrypted5 == "Hello from HAProxy AES-CBC"
} -run

View File

@ -1 +0,0 @@
../ssl/certs/

View File

@ -0,0 +1 @@
../ssl/common.pem

View File

@ -22,7 +22,7 @@ haproxy hapsrv -conf {
frontend fe
bind "fd@${fe}"
bind "fd@${fessl}" ssl crt ${testdir}/certs/common.pem alpn h2,http/1.1
bind "fd@${fessl}" ssl crt ${testdir}/common.pem alpn h2,http/1.1
capture request header sec-websocket-key len 128
http-request set-var(txn.ver) req.ver
use_backend be

View File

@ -1,201 +0,0 @@
#REGTEST_TYPE=devel
# This reg-test checks the behaviour of the jwt_decrypt_secret and
# jwt_decrypt_cert converters that decode a JSON Web Encryption (JWE) token,
# checks its signature and decrypt its content (RFC 7516).
# The tokens have two tiers of encryption, one that is used to encrypt a secret
# ("alg" field of the JOSE header) and this secret is then used to
# encrypt/decrypt the data contained in the token ("enc" field of the JOSE
# header).
# This reg-test tests a subset of alg/enc combination.
#
# AWS-LC does not support A128KW algorithm so for tests that use it, we will
# have a hardcoded "AWS-LC UNMANAGED" value put in the response header instead
# of the decrypted contents.
varnishtest "Test the 'jwt_decrypt' functionalities"
feature cmd "$HAPROXY_PROGRAM -cc 'version_atleast(3.4-dev2)'"
feature cmd "$HAPROXY_PROGRAM -cc 'feature(OPENSSL) && openssl_version_atleast(1.1.1)'"
feature ignore_unknown_macro
server s1 -repeat 10 {
rxreq
txresp
} -start
haproxy h1 -conf {
global
.if feature(THREAD)
thread-groups 1
.endif
.if !ssllib_name_startswith(AWS-LC)
tune.ssl.default-dh-param 2048
.endif
tune.ssl.capture-buffer-size 1
stats socket "${tmpdir}/h1/stats" level admin
crt-base "${testdir}"
key-base "${testdir}"
defaults
mode http
timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
timeout client "${HAPROXY_TEST_TIMEOUT-5s}"
timeout server "${HAPROXY_TEST_TIMEOUT-5s}"
crt-store
# Private key built out of following JWK:
# { "kty": "RSA", "e": "AQAB", "n": "wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w", "kid": "ff3c5c96-392e-46ef-a839-6ff16027af78", "d": "b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ", "p": "8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0", "q": "zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M", "dp": "1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE", "dq": "kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM", "qi": "j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5Vbsg_PWYpFSR7re6FRbF9EYOM7F2-HRv1idxKCWoyQfBqk" }
load crt rsa1_5.pem key rsa1_5.key jwt on
# Private key built out of following JWK:
# { "kty": "RSA", "e": "AQAB", "n": "wsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj_OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6-ATBEKn9COKYniQ5459UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L_c-X4AI3d_NbFdMqxNe1V_UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K-syoNobv3HEuqgZ3s6-hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r-iz39bchID2bIKtcqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9w", "kid": "ff3c5c96-392e-46ef-a839-6ff16027af78", "d": "b9hXfQ8lOtw8mX1dpqPcoElGhbczz_-xq2znCXQpbBPSZBUddZvchRSH5pSSKPEHlgb3CSGIdpLqsBCv0C_XmCM9ViN8uqsYgDO9uCLIDK5plWttbkqA_EufvW03R9UgIKWmOL3W4g4t-C2mBb8aByaGGVNjLnlb6i186uBsPGkvaeLHbQcRQKAvhOUTeNiyiiCbUGJwCm4avMiZrsz1r81Y1Z5izo0ERxdZymxM3FRZ9vjTB-6DtitvTXXnaAm1JTu6TIpj38u2mnNLkGMbflOpgelMNKBZVxSmfobIbFN8CHVc1UqLK2ElsZ9RCQANgkMHlMkOMj-XT0wHa3VBUQ", "p": "8mgriveKJAp1S7SHqirQAfZafxVuAK_A2QBYPsAUhikfBOvN0HtZjgurPXSJSdgR8KbWV7ZjdJM_eOivIb_XiuAaUdIOXbLRet7t9a_NJtmX9iybhoa9VOJFMBq_rbnbbte2kq0-FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0", "q": "zbbTv5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1ZGUC2wyH8mstO5tV34Eug3fnNrbnxFUEE_ZB_njs_rtZnwz57AoUXOXVnd194seIZF9PjdzZcuwXwXbrZ2RSVW8if_ZH5OVYEM1EsA9M", "dp": "1BaIYmIKn1X3InGlcSFcNRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln_5dqLtZkx5VM_UE_GE7yzc6BZOwBxtOftdsr8HVh-14ksSR9rAGEsO2zVBiEuW4qZf_aQM-ScWfU--wcczZ0dT-Ou8P87Bk9K9fjcn0PeaLoz3WTPepzNE", "dq": "kYw2u4_UmWvcXVOeV_VKJ5aQZkJ6_sxTpodRBMPyQmkMHKcW4eKU1mcJju_deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4kPNI6Aphn3GBjunJHNpPuU6w-wvomGsxd-NqQDGNYKHuFFMcyXO_zWXglQdP_1o1tJ1M-BM", "qi": "j94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA4snTtAS_B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3IqLocIFt5Vbsg_PWYpFSR7re6FRbF9EYOM7F2-HRv1idxKCWoyQfBqk" }
load crt rsa_oeap.pem key rsa_oeap.key jwt on
listen main-fe
bind "fd@${mainfe}"
use_backend secret_based_alg if { path_beg /secret }
use_backend pem_based_alg if { path_beg /pem }
default_backend dflt
backend secret_based_alg
http-request set-var(txn.jwe) http_auth_bearer
http-request set-var(txn.secret) hdr(X-Secret),ub64dec,base64
http-request set-var(txn.decrypted) var(txn.jwe),jwt_decrypt_secret(txn.secret)
.if ssllib_name_startswith(AWS-LC)
acl aws_unmanaged var(txn.jwe),jwt_header_query('$.alg') -m str "A128KW"
http-request set-var(txn.decrypted) str("AWS-LC UNMANAGED") if aws_unmanaged
.endif
http-response set-header X-Decrypted %[var(txn.decrypted)]
server s1 ${s1_addr}:${s1_port}
backend pem_based_alg
http-request set-var(txn.jwe) http_auth_bearer
http-request set-var(txn.pem) hdr(X-PEM)
http-request set-var(txn.decrypted) var(txn.jwe),jwt_decrypt_cert(txn.pem)
http-after-response set-header X-Decrypted %[var(txn.decrypted)]
server s1 ${s1_addr}:${s1_port}
backend dflt
server s1 ${s1_addr}:${s1_port}
} -start
#ALG: dir
#ENC: A256GCM
#KEY: {"kty":"oct", "k":"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"}
client c1_1 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
rxresp
expect resp.http.x-decrypted == "Setec Astronomy"
} -run
#ALG: dir
#ENC: A256GCM
#KEY: {"kty":"oct", "k":"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"}
# Token is modified to have an invalid tag
client c1_2 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8v" -hdr "X-Secret: ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
rxresp
expect resp.http.x-decrypted == ""
} -run
#ALG: dir
#ENC: A256GCM
#KEY: {"kty":"oct", "k":"ZMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"}
# Wrong secret
client c1_3 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiZGlyIiwgImVuYyI6ICJBMjU2R0NNIn0..hxCk0nP4aVNpgfb7.inlyAZtUzDCTpD_9iuWx.Pyu90cmgkXenMIVu9RUp8w" -hdr "X-Secret: zMpktzGq1g6_r4fKVdnx9OaYr4HjxPjIs7l7SwAsgsg"
rxresp
expect resp.http.x-decrypted == ""
} -run
#ALG: A128KW
#ENC: A128CBC-HS256
#KEY: {"kty":"oct", "k":"3921VrO5TrLvPQ-NFLlghQ"}
client c2_1 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_MA" -hdr "X-Secret: 3921VrO5TrLvPQ-NFLlghQ"
rxresp
expect resp.http.x-decrypted ~ "(Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo\\. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt\\. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem\\. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur\\? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur\\?|AWS-LC UNMANAGED)"
} -run
#ALG: A128KW
#ENC: A128CBC-HS256
#KEY: {"kty":"oct", "k":"3921VrO5TrLvPQ-NFLlghQ"}
# Token is modified to have an invalid tag
client c2_2 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiAiQTEyOEtXIiwgImVuYyI6ICJBMTI4Q0JDLUhTMjU2In0.AaOyP1zNjsywJOoQ941JJWT4LQIDlpy3UibM_48HrsoCJ5ENpQhfbQ.h2ZBUiy9ofvcDZOwV2iVJA.K0FhK6ri44ZWmtFUtJRpiZSeT8feKX5grFpU8xG5026bGXAdZADO4ZkQ8DRvSEE9DwNIlK6cIEoSavm12gSzQVXajz3MWv5U6VbK5gPFCeFjJfMPmdQ9THIi-hapcueSxYz2rkcGxo3iP3ixE_bww8UB_XlQvnokhFxtf8NushMkjef4RDrW5vQu4j_qPbqG334msDKmFi8Klprs6JktrADeEJ0bPGN80NKEWp7XPcCbfmcwYe-9z_tPw_KJcQhLpQevfPLfVI4WjPgPxYNGw03qKYnLD7oTjr9qCrQmzUVXutlhxfpD3UQr11SJu8q19Ug82bON-GRd2CjpSrErQq42dd0_mWjG9iDqjqpYFBK9DV_qawy2dxFbfIcCsnb6ewifjoJLiFg2OT7-YdTaC7kqaXeE1JpA-OtMXN72FUDrnQ8r9ifj_VpMNvBf_36dbOCT-cGwIOI8Pf6HH2smXULhtBv9q-qO2zyScpmliqZDXUqmvQ8rxi-xYI2hijV80jo14teZgIotWsZE2FrMPJTkegDmh4cG5UzoUsQxzPhXqHvkss6Hv7h-_fmvXvXY1AZ8T8bL1qM4bS8mKpewmGtjmU6S220tL60ieT2QL0vmTFlJkOE8uFreWlPnxNKBix_zj4Smhg1zS_sl7GoXhp5Q_QY3MOMM5-gCAALY0crqLLWtHswElVOiJSyd64T9HFyXm7Rleqq2kLXmTvDhOR6lzMnA0rcGP7lQGYlLZgFiicsMY722XlKI3v1-cJYvj2RZMPe1ijBLFFTqyPeCBkbsDC3XCpWhMByNHSHKN3t-NJmQBIC-89ZeOMU-WBtqrDDi_CMnaz9mwkyt3P7ja_fVskc4KKBBlMVYDZ3DJeJw3Kg9Pie0XlqHkD6W1vyAWjOM2z76Rh_3553dLAH1HxNRwidLjq3SvoaX3TOU5O2_omFGPBek7QdzhNBGLgv6Zlul_XxZq9UGiVo1jrnkd40_vAZQRL6NyMxGBEij_b8F_wDMz5njrL-a0c2Y5mMno-q8gmM4sFKI1BS5HsrUAw.PFFSFlDslALnebAdaqS_Ma" -hdr "X-Secret: 3921VrO5TrLvPQ-NFLlghQ"
rxresp
expect resp.http.x-decrypted ~ "(|AWS-LC UNMANAGED)"
} -run
#ALG: A256GCMKW
#ENC: A256CBC-HS512
#KEY: {"k":"vof8hNUaHiMw_0o3EGVPtBOPDDWJ62b8kQWE2ufSjIE","kty":"oct"}
client c3 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMjU2Q0JDLUhTNTEyIiwiaXYiOiJRclluZUNxVmVldExzN1FKIiwidGFnIjoieFEyeFI2SHdBUngzeDJUdFg5UFVSZyJ9.wk4eJtdTKOPsic4IBtVcppO6Sp6LfXmxHzBvHZtU0Sk7JCVqhAghkeAw0qWJ5XsdwSneIlZ4rGygtnafFl4Thw.ylzjPBsgJ4qefDQZ_jUVpA.xX0XhdL4KTSZfRvHuZD1_Dh-XrfZogRsBHpgxkDZdYk.w8LPVak5maNeQpSWgCIGGsj26SLQZTx6nAmkvDQKFIA" -hdr "X-Secret: vof8hNUaHiMw_0o3EGVPtBOPDDWJ62b8kQWE2ufSjIE"
rxresp
expect resp.http.x-decrypted == "My Encrypted message"
} -run
# RFC7516 JWE
# https://datatracker.ietf.org/doc/html/rfc7516#appendix-A.3
#ALG: A128KW
#ENC: A128CBC-HS256
#KEY: {"kty":"oct", "k":"GawgguFyGrWKav7AX4VKUg" }
client c4 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMTI4S1ciLCJlbmMiOiJBMTI4Q0JDLUhTMjU2In0.6KB707dM9YTIgHtLvtgWQ8mKwboJW3of9locizkDTHzBC2IlrT1oOQ.AxY8DCtDaGlsbGljb3RoZQ.KDlTtXchhZTGufMYmOYGS4HffxPSUrfmqCHXaI9wOGY.U0m_YmjN04DJvceFICbCVQ" -hdr "X-Secret: GawgguFyGrWKav7AX4VKUg"
rxresp
expect resp.http.x-decrypted ~ "(Live long and prosper\\.|AWS-LC UNMANAGED)"
} -run
#ALG: A256GCMKW
#ENC: A192CBC-HS384
#KEY: {"k":"vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw","kty":"oct"}
client c5 -connect ${h1_mainfe_sock} {
txreq -url "/secret" -hdr "Authorization: Bearer eyJhbGciOiJBMjU2R0NNS1ciLCJlbmMiOiJBMTkyQ0JDLUhTMzg0IiwiaXYiOiJzVE81QjlPRXFuaUhCX3dYIiwidGFnIjoid2M1ZnRpYUFnNGNOR1JkZzNWQ3FXdyJ9.2zqnM9zeNU-eAMp5h2uFJyxbHHKsZs9YAYKzOcIF3d3Q9uq1TMQAvqOIuXw3kU9o.hh5aObIoIMR6Ke0rXm6V1A.R7U-4OlqOR6f2C1b3nI5bFqZBIGNBgza7FfoPEgrQT8.asJCzUAHCuxS7o8Ut4ENfaY5RluLB35F" -hdr "X-Secret: vprpatiNyI-biJY57qr8Gg4--4Rycgb2G5yO1_myYAw"
rxresp
expect resp.http.x-decrypted == "My Encrypted message"
} -run
#ALG: RSA1_5
#ENC: A256GCM
client c6 -connect ${h1_mainfe_sock} {
txreq -url "/pem" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBMV81IiwgImVuYyI6ICJBMjU2R0NNIn0.ew8AbprGcd_J73-CZPIsE1YonD9rtcL7VCuOOuVkrpS_9UzA9_kMh1yw20u-b5rKJAhmFMCQPXl44ro6IzOeHu8E2X_NlPEnQfyNVQ4R1HB_E9sSk5BLxOH3aHkVUh0I-e2eDDj-pdI3OrdjZtnZEBeQ7tpMcoBEbn1VGg7Pmw4qtdS-0qnDSs-PttU-cejjgPUNLRU8UdoRVC9uJKacJms110QugDuFuMYTTSU2nbIYh0deCMRAuKGWt0Ii6EMYW2JaJ7JfXag59Ar1uylQPyEVrocnOsDuB9xnp2jd796qCPdKxBK9yKUnwjal4SQpYbutr40QzG1S4MsKaUorLg.0el2ruY0mm2s7LUR.X5RI6dF06Y_dbAr8meb-6SG5enj5noto9nzgQU5HDrYdiUofPptIf6E-FikKUM9QR4pY9SyphqbPYeAN1ZYVxBrR8tUf4Do2kw1biuuRAmuIyytpmxwvY946T3ctu1Zw3Ymwe-jWXX08EngzssvzFOGT66gkdufrTkC45Fkr0RBOmWa5OVVg_VR6LwcivtQMmlArlrwbaDmmLqt_2p7afT0UksEz4loq0sskw-p7GbhB2lpzXoDnijdHrQkftRbVCiDbK4-qGr7IRFb0YOHvyVFr-kmDoJv2Zsg_rPKV1LkYmPJUbVDo9T3RAcLinlKPK4ZPC_2bWj3M9BvfOq1HeuyVWzX2Cb1mHFdxXFGqaLPfsE0VOfn0GqL7oHVbuczYYw2eKdmiw5LEMwuuJEdYDE9IIFEe8oRB4hNZ0XMYB6oqqZejD0Fh6nqlj5QUrTYpTSE-3LkgK2zRJ0oZFXZyHCB426bmViuE0mXF7twkQep09g0U35-jFBZcSYBDvZZL1t5d_YEQ0QtO0mEeEpGb0Pvk_EsSMFib7NxClz4_rdtwWCFuM4uFOS5vrQMiMqi_TadhLxrugRFhJpsibuScCiJ7eNDrUvwSWEwv1U593MUX3guDq_ONOo_49EOJSyRJtQCNC6FW6GLWSz9TCo6g5LCnXt-pqwu0Iymr7ZTQ3MTsdq2G55JM2e6SdG43iET8r235hynmXHKPUYHlSjsC2AEAY_pGDO0akIhf4wDVIM5rytn-rjQf-29ZJp05g6KPe-EaN1C-X7aBGhgAEgnX-iaXXbotpGeKRTNj2jAG1UrkYi6BGHxluiXJ8jH_LjHuxKyzIObqK8p28ePDKRL-jyNTrvGW2uorgb_u7HGmWYIWLTI7obnZ5vw3MbkjcwEd4bX5JXUj2rRsUWMlZSSFVO9Wgf7MBvcLsyF0Yqun3p0bi__edmcqNF_uuYZT-8jkUlMborqIDDCYYqIolgi5R1Bmut-gFYq6xyfEncxOi50xmYon50UulVnAH-up_RELGtCjmAivaJb8.upVY733IMAT8YbMab2PZnw" -hdr "X-PEM: ${testdir}/rsa1_5.pem"
rxresp
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
} -run
#ALG: RSA-OAEP
#ENC: A256GCM
client c7 -connect ${h1_mainfe_sock} {
txreq -url "/pem" -hdr "Authorization: Bearer eyJhbGciOiAiUlNBLU9BRVAiLCAiZW5jIjogIkEyNTZHQ00ifQ.Os33U1HEY92lrpup2E-HNttBW26shGSCafqNbVfs1rwWB__B-0dRAiKg4OtIrIXVCN7oQMqLr9RFRO6Gb-OAPIr-59FETLSXP8K_3uNcy-jdKrpKLbv8wgisEYqBJj4BysZQjuWgUgJ7Dvx28_zIUg0FJGOwxtpX2SUWxEgw5CPRgRrENJDJ2EYA6wuX9SbfarhQR4uPN7pdRKZ0ZQN6_5H3H9pWJ4WNnsQ0wjChKTsdR3kHOvygiUmdYSEWGe6LBQLSBQCnQim1pr--GBOHvDf2g4Je9EDFrrO1icFDbBdJ8I4ol4ixglLEnBCTHdhYd_lVe0i5JcxxHF8hmemAYQ.IOphaFIcCosKyXcN.KEjWfV2yBKLuMLX20mtEvrQ-P_oKWkdgZabx0FgRLqjSorD7DS3aIXLMEmyrOYd4kGHKCMg2Fvg61xKvI2FsQviA5LgHtx0QKmFARacP8kBl8vFPMEg2WtW0rIImTc1tj4C0PM9A0TbyDohtcoN9UYosrw5GyPOlHwIFwWosLA9WHqp00MAfAu3JOa4CwuMXsORGzeIyb7X-jg_bbG_9xkVUsgZpaCUX447a3QmKLJVBfQpeEO_PuYbds-MvIU9m4uYzWplNeHnf3B1dh9p6o4Ml6OEp-0G_4Nd4UmMz_g9A-TatH-A__MAC9Mx1Wj1cDn5M3upcrAyu2JLQ48A-Qa2ocElhQ4ODzwbgbC5PS34Mlm_x18zqL-0Fw3ckhzgoAyDBoRO6SaNmsKb1wQ6QGbwBJx1jC51hpzBHRv3pUlegsHXgq7OWN1x1tDJvRc_DHMa23Mheg-aKJcliP846Dduq2_Hve3md30C0hbrP1OMF5ZJSVu4kUo7UFaZA_6hhcoGvvyEGDMnPH5SznrrsyHGIre-WOdXCObZNkDV6Qn0sqAP_vkj_6Dj965W8ksCKk6ye409cB4mnqfLv3dUtGLV8o8VtCLIEs2G62lwaDGrX4HB-pZ6jea2qH6UvgwK5WT-VzrypSQcVoWCKopln2gtO1JROKmbOiL9f8dfbLKqYSRB6ppMxh5Euddx_eNikZfLEcXfq2Grwyrj0NLP82AFSxSYf3BpYqpOhSxca0gx0psb8tCwq3sqmh5Bp_qmKIOthXb6k-9R_Ng6cRTp132OnDEXEDtvDv59WJWHuo4qACyrg7jUlrh4dAYwYke1yBgVcqK5JwVnmKDjnx9vRGFSD9esrL8MpGiP6uUeN3AXiv7OSb83hDdwTTQU5nvitHWKS72Mb1FRPdDXUxooiyShAkV5Spo3YNl4EHkm6lnlJ-kC3BFlxYqYd5a_vtqA-ywR7ozWo1GtMBjYycq2s9Kp8FnqI2cTWobOCjMxaej4CXaRA4IwhjC1u6OTCvxP70MWYT0pJPjUS.k9i0Lw9MfJs4Rp-_uwIEeA" -hdr "X-PEM: ${testdir}/rsa_oeap.pem"
rxresp
expect resp.http.x-decrypted == "Sed ut perspiciatis unde omnis iste natus error sit voluptatem doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. porro quisquam est, qui dolorem ipsum quia dolor sit amet, adipisci velit, sed quia non numquam eius modi tempora incidunt ut dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in voluptate velit esse quam nihil molestiae consequatur, vel illum qui eum fugiat quo voluptas nulla pariatur?"
} -run

View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAwsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUG
rASj/OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6+ATBEKn9COKYniQ5459
UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L/c+X4AI3d/NbFdMqxN
e1V/UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K+syoN
obv3HEuqgZ3s6+hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r+iz39bchID2bIKt
cqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9wIDAQABAoIBAG/YV30PJTrcPJl9
Xaaj3KBJRoW3M8//sats5wl0KWwT0mQVHXWb3IUUh+aUkijxB5YG9wkhiHaS6rAQ
r9Av15gjPVYjfLqrGIAzvbgiyAyuaZVrbW5KgPxLn71tN0fVICClpji91uIOLfgt
pgW/GgcmhhlTYy55W+otfOrgbDxpL2nix20HEUCgL4TlE3jYsoogm1BicApuGrzI
ma7M9a/NWNWeYs6NBEcXWcpsTNxUWfb40wfug7Yrb01152gJtSU7ukyKY9/Ltppz
S5BjG35TqYHpTDSgWVcUpn6GyGxTfAh1XNVKiythJbGfUQkADYJDB5TJDjI/l09M
B2t1QVECgYEA8mgriveKJAp1S7SHqirQAfZafxVuAK/A2QBYPsAUhikfBOvN0HtZ
jgurPXSJSdgR8KbWV7ZjdJM/eOivIb/XiuAaUdIOXbLRet7t9a/NJtmX9iybhoa9
VOJFMBq/rbnbbte2kq0+FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0CgYEAzbbT
v5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1Z
GUC2wyH8mstO5tV34Eug3fnNrbnxFUEE/ZB/njs/rtZnwz57AoUXOXVnd194seIZ
F9PjdzZcuwXwXbrZ2RSVW8if/ZH5OVYEM1EsA9MCgYEA1BaIYmIKn1X3InGlcSFc
NRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln/5dqLtZkx5VM/UE/GE7yzc6BZOwBxtO
ftdsr8HVh+14ksSR9rAGEsO2zVBiEuW4qZf/aQM+ScWfU++wcczZ0dT+Ou8P87Bk
9K9fjcn0PeaLoz3WTPepzNECgYEAkYw2u4/UmWvcXVOeV/VKJ5aQZkJ6/sxTpodR
BMPyQmkMHKcW4eKU1mcJju/deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4
kPNI6Aphn3GBjunJHNpPuU6w+wvomGsxd+NqQDGNYKHuFFMcyXO/zWXglQdP/1o1
tJ1M+BMCgYEAj94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA
4snTtAS/B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3
IqLocIFt5Vbsg/PWYpFSR7re6FRbF9EYOM7F2+HRv1idxKCWoyQfBqk=
-----END RSA PRIVATE KEY-----

View File

@ -1,21 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDizCCAnOgAwIBAgIUWKLX2P4KNDw9kBROSjFXWa/kjtowDQYJKoZIhvcNAQEL
BQAwVTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEOMAwGA1UEAwwFYWEuYmIwHhcNMjUx
MjA0MTYyMTE2WhcNMjYxMjA0MTYyMTE2WjBVMQswCQYDVQQGEwJBVTETMBEGA1UE
CAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRk
MQ4wDAYDVQQDDAVhYS5iYjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AMLKiW6KcdfDUBWC2DquGXjEhPPchohGp5FKX9uclfEKq6ClBqwEo/znqk1lI8Dx
ikzdbxhRbh2MYiXgFEB9qkD5oPTqpOvgEwRCp/QjimJ4kOeOfVMQsJmQNkS+rn4a
zcqtCRdxn15IywwX4VPcySRDoZJ7ANC/3Pl+ACN3fzWxXTKsTXtVf1FgJS3GyncD
uogvXwL8FJg0MYMukdAwtQjKLkKXpzEXAC2kh2q9XJuCvrMqDaG79xxLqoGd7Ovo
TjkqmrqgDtBlKMz6WiAOznq7skZebE1k9K/os9/W3ISA9myCrXKi4hXEmDywXMZr
GkAqkZRoZr+rLY0wi1dck/cCAwEAAaNTMFEwHQYDVR0OBBYEFD+wduQlsKCoxfO5
U1W7Urqs+oTbMB8GA1UdIwQYMBaAFD+wduQlsKCoxfO5U1W7Urqs+oTbMA8GA1Ud
EwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAANfh6jY8+3XQ16SH7Pa07MK
ncnQuZqMemYUQzieBL15zftdpd0vYjOfaN5UAQ7ODVAb/iTF4nnADl0VwOocqEiR
vfaqwJTmKiNDjyIp1SJjhkRcYu3hmDXTZOzhuFxoZALe7OzWFgSjf3fX2IOOBfH+
HBqviTuMi53oURWv/ISPXk+Dr7LaCmm1rEjRq8PINJ2Ni6cN90UvHOrHdl+ty2o/
C3cQWIZrsNM6agUfiNiPCWz6x+Z4t+zP7+EorCM7CKKLGnycPUJE2I6H8bJmIHHS
ITNmUO5juLawQ5h2m5Wu/BCY3rlLU9SLrmWAAHm6lFJb0XzFgqhiCz7lxYofj8c=
-----END CERTIFICATE-----

View File

@ -1,28 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAwsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUG
rASj/OeqTWUjwPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6+ATBEKn9COKYniQ5459
UxCwmZA2RL6ufhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L/c+X4AI3d/NbFdMqxN
e1V/UWAlLcbKdwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K+syoN
obv3HEuqgZ3s6+hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r+iz39bchID2bIKt
cqLiFcSYPLBcxmsaQCqRlGhmv6stjTCLV1yT9wIDAQABAoIBAG/YV30PJTrcPJl9
Xaaj3KBJRoW3M8//sats5wl0KWwT0mQVHXWb3IUUh+aUkijxB5YG9wkhiHaS6rAQ
r9Av15gjPVYjfLqrGIAzvbgiyAyuaZVrbW5KgPxLn71tN0fVICClpji91uIOLfgt
pgW/GgcmhhlTYy55W+otfOrgbDxpL2nix20HEUCgL4TlE3jYsoogm1BicApuGrzI
ma7M9a/NWNWeYs6NBEcXWcpsTNxUWfb40wfug7Yrb01152gJtSU7ukyKY9/Ltppz
S5BjG35TqYHpTDSgWVcUpn6GyGxTfAh1XNVKiythJbGfUQkADYJDB5TJDjI/l09M
B2t1QVECgYEA8mgriveKJAp1S7SHqirQAfZafxVuAK/A2QBYPsAUhikfBOvN0HtZ
jgurPXSJSdgR8KbWV7ZjdJM/eOivIb/XiuAaUdIOXbLRet7t9a/NJtmX9iybhoa9
VOJFMBq/rbnbbte2kq0+FnXmv3cukbC2LaEw3aEcDgyURLCgWFqt7M0CgYEAzbbT
v5421GowOfKVEuVoA35CEWgl8mdasnEZac2LWxMwKExikKU5LLacLQlcOt7A6n1Z
GUC2wyH8mstO5tV34Eug3fnNrbnxFUEE/ZB/njs/rtZnwz57AoUXOXVnd194seIZ
F9PjdzZcuwXwXbrZ2RSVW8if/ZH5OVYEM1EsA9MCgYEA1BaIYmIKn1X3InGlcSFc
NRtSOnaJdFhRpotCqkRssKUx2qBlxs7ln/5dqLtZkx5VM/UE/GE7yzc6BZOwBxtO
ftdsr8HVh+14ksSR9rAGEsO2zVBiEuW4qZf/aQM+ScWfU++wcczZ0dT+Ou8P87Bk
9K9fjcn0PeaLoz3WTPepzNECgYEAkYw2u4/UmWvcXVOeV/VKJ5aQZkJ6/sxTpodR
BMPyQmkMHKcW4eKU1mcJju/deqWadw5jGPPpm5yTXm5UkAwfOeookoWpGa7CvVf4
kPNI6Aphn3GBjunJHNpPuU6w+wvomGsxd+NqQDGNYKHuFFMcyXO/zWXglQdP/1o1
tJ1M+BMCgYEAj94Ens784M8zsfwWoJhYq9prcSZOGgNbtFWQZO8HP8pcNM9ls7YA
4snTtAS/B4peWWFAFZ0LSKPCxAvJnrq69ocmEKEk7ss1Jo062f9pLTQ6cnhMjev3
IqLocIFt5Vbsg/PWYpFSR7re6FRbF9EYOM7F2+HRv1idxKCWoyQfBqk=
-----END RSA PRIVATE KEY-----

View File

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDjTCCAnWgAwIBAgIUHGhD07tC9adNLCkSBNrfrhFUX9IwDQYJKoZIhvcNAQEL
BQAwVTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEOMAwGA1UEAwwFYWEuYmIwIBcNMjUx
MjA1MTMxOTQ0WhgPMjA1MzA0MjIxMzE5NDRaMFUxCzAJBgNVBAYTAkFVMRMwEQYD
VQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBM
dGQxDjAMBgNVBAMMBWFhLmJiMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
AQEAwsqJbopx18NQFYLYOq4ZeMSE89yGiEankUpf25yV8QqroKUGrASj/OeqTWUj
wPGKTN1vGFFuHYxiJeAUQH2qQPmg9Oqk6+ATBEKn9COKYniQ5459UxCwmZA2RL6u
fhrNyq0JF3GfXkjLDBfhU9zJJEOhknsA0L/c+X4AI3d/NbFdMqxNe1V/UWAlLcbK
dwO6iC9fAvwUmDQxgy6R0DC1CMouQpenMRcALaSHar1cm4K+syoNobv3HEuqgZ3s
6+hOOSqauqAO0GUozPpaIA7OeruyRl5sTWT0r+iz39bchID2bIKtcqLiFcSYPLBc
xmsaQCqRlGhmv6stjTCLV1yT9wIDAQABo1MwUTAdBgNVHQ4EFgQUP7B25CWwoKjF
87lTVbtSuqz6hNswHwYDVR0jBBgwFoAUP7B25CWwoKjF87lTVbtSuqz6hNswDwYD
VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEArDl4gSwqpriAFjWcAtWE
sTLTxNgbnkARDeyhQ1dj6rj9xCccBU6WN07r639c9S0lsMb+jeQU9EJFoVtX91jM
fymumOWMDY/CYm41PkHqcF6hEup5dfAeDnN/OoDjXwgTU74Y3lF/sldeS06KorCp
O9ROyq3mM9n4EtFAAEEN2Esyy1d1CJiMYKHdYRKycMwgcu1pm9n1up4ivdgLY+BH
XhnJPuKmmU3FauYlXzfcijUPAAuJdm3PZ+i4SNGsTa49tXOkHMED31EOjaAEzuX0
rWij715QkL/RIp8lPxeAvHqxavQIDtfjojFD21Cx+jIGuNcfrGNkzNjfS7AF+1+W
jA==
-----END CERTIFICATE-----

View File

@ -1 +0,0 @@
../ssl/certs/

1
reg-tests/lua/common.pem Symbolic link
View File

@ -0,0 +1 @@
../ssl/common.pem

View File

@ -32,7 +32,7 @@ haproxy h1 -conf {
frontend fe2
mode http
bind ":8443" ssl crt ${testdir}/certs/common.pem
bind ":8443" ssl crt ${testdir}/common.pem
stats enable
stats uri /

View File

@ -26,7 +26,7 @@ haproxy h1 -conf {
frontend fe2
mode http
bind ":8443" ssl crt ${testdir}/certs/common.pem
bind ":8443" ssl crt ${testdir}/common.pem
stats enable
stats uri /

View File

@ -1 +0,0 @@
../ssl/certs

1
reg-tests/peers/common.pem Symbolic link
View File

@ -0,0 +1 @@
../ssl/common.pem

Some files were not shown because too many files have changed in this diff Show More